]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-3.19.3-201503302150.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-3.19.3-201503302150.patch
CommitLineData
6181b738
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index a311db8..415b28c 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -642,7 +643,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -665,7 +688,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -693,7 +716,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -724,7 +747,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 176d4fe..6eabd3c 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1191,6 +1191,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+ grsec_sysfs_restrict= Format: 0 | 1
327+ Default: 1
328+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
329+
330 hashdist= [KNL,NUMA] Large hashes allocated during boot
331 are distributed across NUMA nodes. Defaults on
332 for 64-bit NUMA, off otherwise.
333@@ -2283,6 +2290,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
334 noexec=on: enable non-executable mappings (default)
335 noexec=off: disable non-executable mappings
336
337+ nopcid [X86-64]
338+ Disable PCID (Process-Context IDentifier) even if it
339+ is supported by the processor.
340+
341 nosmap [X86]
342 Disable SMAP (Supervisor Mode Access Prevention)
343 even if it is supported by processor.
344@@ -2584,6 +2595,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
345 the specified number of seconds. This is to be used if
346 your oopses keep scrolling off the screen.
347
348+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
349+ virtualization environments that don't cope well with the
350+ expand down segment used by UDEREF on X86-32 or the frequent
351+ page table updates on X86-64.
352+
353+ pax_sanitize_slab=
354+ Format: { 0 | 1 | off | fast | full }
355+ Options '0' and '1' are only provided for backward
356+ compatibility, 'off' or 'fast' should be used instead.
357+ 0|off : disable slab object sanitization
358+ 1|fast: enable slab object sanitization excluding
359+ whitelisted slabs (default)
360+ full : sanitize all slabs, even the whitelisted ones
361+
362+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
363+
364+ pax_extra_latent_entropy
365+ Enable a very simple form of latent entropy extraction
366+ from the first 4GB of memory as the bootmem allocator
367+ passes the memory pages to the buddy allocator.
368+
369+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
370+ when the processor supports PCID.
371+
372 pcbit= [HW,ISDN]
373
374 pcd. [PARIDE]
375diff --git a/Makefile b/Makefile
376index 713bf26..9ceae96 100644
377--- a/Makefile
378+++ b/Makefile
379@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
380 HOSTCC = gcc
381 HOSTCXX = g++
382 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
383-HOSTCXXFLAGS = -O2
384+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
385+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
386+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
387
388 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
389 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
390@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
391 # Rules shared between *config targets and build targets
392
393 # Basic helpers built in scripts/
394-PHONY += scripts_basic
395-scripts_basic:
396+PHONY += scripts_basic gcc-plugins
397+scripts_basic: gcc-plugins
398 $(Q)$(MAKE) $(build)=scripts/basic
399 $(Q)rm -f .tmp_quiet_recordmcount
400
401@@ -622,6 +624,72 @@ endif
402 # Tell gcc to never replace conditional load with a non-conditional one
403 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
404
405+ifndef DISABLE_PAX_PLUGINS
406+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
408+else
409+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
410+endif
411+ifneq ($(PLUGINCC),)
412+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
413+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
414+endif
415+ifdef CONFIG_PAX_MEMORY_STACKLEAK
416+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
417+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
418+endif
419+ifdef CONFIG_KALLOCSTAT_PLUGIN
420+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
421+endif
422+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
424+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
425+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
426+endif
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
428+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
429+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
430+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
431+endif
432+endif
433+ifdef CONFIG_CHECKER_PLUGIN
434+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
435+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
436+endif
437+endif
438+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
439+ifdef CONFIG_PAX_SIZE_OVERFLOW
440+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
441+endif
442+ifdef CONFIG_PAX_LATENT_ENTROPY
443+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
444+endif
445+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
446+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
447+endif
448+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
449+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
450+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
451+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
452+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
453+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
454+ifeq ($(KBUILD_EXTMOD),)
455+gcc-plugins:
456+ $(Q)$(MAKE) $(build)=tools/gcc
457+else
458+gcc-plugins: ;
459+endif
460+else
461+gcc-plugins:
462+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
463+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
464+else
465+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
466+endif
467+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
468+endif
469+endif
470+
471 ifdef CONFIG_READABLE_ASM
472 # Disable optimizations that make assembler listings hard to read.
473 # reorder blocks reorders the control in the function
474@@ -714,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
475 else
476 KBUILD_CFLAGS += -g
477 endif
478-KBUILD_AFLAGS += -Wa,-gdwarf-2
479+KBUILD_AFLAGS += -Wa,--gdwarf-2
480 endif
481 ifdef CONFIG_DEBUG_INFO_DWARF4
482 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
483@@ -879,7 +947,7 @@ export mod_sign_cmd
484
485
486 ifeq ($(KBUILD_EXTMOD),)
487-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
488+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
489
490 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
491 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
492@@ -926,6 +994,8 @@ endif
493
494 # The actual objects are generated when descending,
495 # make sure no implicit rule kicks in
496+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
497+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
498 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
499
500 # Handle descending into subdirectories listed in $(vmlinux-dirs)
501@@ -935,7 +1005,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
502 # Error messages still appears in the original language
503
504 PHONY += $(vmlinux-dirs)
505-$(vmlinux-dirs): prepare scripts
506+$(vmlinux-dirs): gcc-plugins prepare scripts
507 $(Q)$(MAKE) $(build)=$@
508
509 define filechk_kernel.release
510@@ -978,10 +1048,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
511
512 archprepare: archheaders archscripts prepare1 scripts_basic
513
514+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
515+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
516 prepare0: archprepare FORCE
517 $(Q)$(MAKE) $(build)=.
518
519 # All the preparing..
520+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
521 prepare: prepare0
522
523 # Generate some files
524@@ -1095,6 +1168,8 @@ all: modules
525 # using awk while concatenating to the final file.
526
527 PHONY += modules
528+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
529+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
530 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
531 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
532 @$(kecho) ' Building modules, stage 2.';
533@@ -1110,7 +1185,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
534
535 # Target to prepare building external modules
536 PHONY += modules_prepare
537-modules_prepare: prepare scripts
538+modules_prepare: gcc-plugins prepare scripts
539
540 # Target to install modules
541 PHONY += modules_install
542@@ -1176,7 +1251,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
543 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
544 signing_key.priv signing_key.x509 x509.genkey \
545 extra_certificates signing_key.x509.keyid \
546- signing_key.x509.signer
547+ signing_key.x509.signer \
548+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
549+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
550+ tools/gcc/randomize_layout_seed.h
551
552 # clean - Delete most, but leave enough to build external modules
553 #
554@@ -1215,7 +1293,7 @@ distclean: mrproper
555 @find $(srctree) $(RCS_FIND_IGNORE) \
556 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
557 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
558- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
559+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
560 -type f -print | xargs rm -f
561
562
563@@ -1381,6 +1459,8 @@ PHONY += $(module-dirs) modules
564 $(module-dirs): crmodverdir $(objtree)/Module.symvers
565 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
566
567+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
568+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
569 modules: $(module-dirs)
570 @$(kecho) ' Building modules, stage 2.';
571 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
572@@ -1521,17 +1601,21 @@ else
573 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
574 endif
575
576-%.s: %.c prepare scripts FORCE
577+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
578+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
579+%.s: %.c gcc-plugins prepare scripts FORCE
580 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
581 %.i: %.c prepare scripts FORCE
582 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
583-%.o: %.c prepare scripts FORCE
584+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
585+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
586+%.o: %.c gcc-plugins prepare scripts FORCE
587 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
588 %.lst: %.c prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590-%.s: %.S prepare scripts FORCE
591+%.s: %.S gcc-plugins prepare scripts FORCE
592 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
593-%.o: %.S prepare scripts FORCE
594+%.o: %.S gcc-plugins prepare scripts FORCE
595 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
596 %.symtypes: %.c prepare scripts FORCE
597 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
598@@ -1543,11 +1627,15 @@ endif
599 $(build)=$(build-dir)
600 # Make sure the latest headers are built for Documentation
601 Documentation/: headers_install
602-%/: prepare scripts FORCE
603+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
604+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
605+%/: gcc-plugins prepare scripts FORCE
606 $(cmd_crmodverdir)
607 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
608 $(build)=$(build-dir)
609-%.ko: prepare scripts FORCE
610+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
611+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
612+%.ko: gcc-plugins prepare scripts FORCE
613 $(cmd_crmodverdir)
614 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
615 $(build)=$(build-dir) $(@:.ko=.o)
616diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
617index 8f8eafb..3405f46 100644
618--- a/arch/alpha/include/asm/atomic.h
619+++ b/arch/alpha/include/asm/atomic.h
620@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
621 #define atomic_dec(v) atomic_sub(1,(v))
622 #define atomic64_dec(v) atomic64_sub(1,(v))
623
624+#define atomic64_read_unchecked(v) atomic64_read(v)
625+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
626+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
627+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
628+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
629+#define atomic64_inc_unchecked(v) atomic64_inc(v)
630+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
631+#define atomic64_dec_unchecked(v) atomic64_dec(v)
632+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
633+
634 #endif /* _ALPHA_ATOMIC_H */
635diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
636index ad368a9..fbe0f25 100644
637--- a/arch/alpha/include/asm/cache.h
638+++ b/arch/alpha/include/asm/cache.h
639@@ -4,19 +4,19 @@
640 #ifndef __ARCH_ALPHA_CACHE_H
641 #define __ARCH_ALPHA_CACHE_H
642
643+#include <linux/const.h>
644
645 /* Bytes per L1 (data) cache line. */
646 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
647-# define L1_CACHE_BYTES 64
648 # define L1_CACHE_SHIFT 6
649 #else
650 /* Both EV4 and EV5 are write-through, read-allocate,
651 direct-mapped, physical.
652 */
653-# define L1_CACHE_BYTES 32
654 # define L1_CACHE_SHIFT 5
655 #endif
656
657+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
658 #define SMP_CACHE_BYTES L1_CACHE_BYTES
659
660 #endif
661diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
662index 968d999..d36b2df 100644
663--- a/arch/alpha/include/asm/elf.h
664+++ b/arch/alpha/include/asm/elf.h
665@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
666
667 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
668
669+#ifdef CONFIG_PAX_ASLR
670+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
671+
672+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
673+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
674+#endif
675+
676 /* $0 is set by ld.so to a pointer to a function which might be
677 registered using atexit. This provides a mean for the dynamic
678 linker to call DT_FINI functions for shared libraries that have
679diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
680index aab14a0..b4fa3e7 100644
681--- a/arch/alpha/include/asm/pgalloc.h
682+++ b/arch/alpha/include/asm/pgalloc.h
683@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
684 pgd_set(pgd, pmd);
685 }
686
687+static inline void
688+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
689+{
690+ pgd_populate(mm, pgd, pmd);
691+}
692+
693 extern pgd_t *pgd_alloc(struct mm_struct *mm);
694
695 static inline void
696diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
697index d8f9b7e..f6222fa 100644
698--- a/arch/alpha/include/asm/pgtable.h
699+++ b/arch/alpha/include/asm/pgtable.h
700@@ -102,6 +102,17 @@ struct vm_area_struct;
701 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
702 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
703 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
704+
705+#ifdef CONFIG_PAX_PAGEEXEC
706+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
707+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
708+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
709+#else
710+# define PAGE_SHARED_NOEXEC PAGE_SHARED
711+# define PAGE_COPY_NOEXEC PAGE_COPY
712+# define PAGE_READONLY_NOEXEC PAGE_READONLY
713+#endif
714+
715 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
716
717 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
718diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
719index 2fd00b7..cfd5069 100644
720--- a/arch/alpha/kernel/module.c
721+++ b/arch/alpha/kernel/module.c
722@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
723
724 /* The small sections were sorted to the end of the segment.
725 The following should definitely cover them. */
726- gp = (u64)me->module_core + me->core_size - 0x8000;
727+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
728 got = sechdrs[me->arch.gotsecindex].sh_addr;
729
730 for (i = 0; i < n; i++) {
731diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
732index e51f578..16c64a3 100644
733--- a/arch/alpha/kernel/osf_sys.c
734+++ b/arch/alpha/kernel/osf_sys.c
735@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
736 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
737
738 static unsigned long
739-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
740- unsigned long limit)
741+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
742+ unsigned long limit, unsigned long flags)
743 {
744 struct vm_unmapped_area_info info;
745+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
746
747 info.flags = 0;
748 info.length = len;
749@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
750 info.high_limit = limit;
751 info.align_mask = 0;
752 info.align_offset = 0;
753+ info.threadstack_offset = offset;
754 return vm_unmapped_area(&info);
755 }
756
757@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
758 merely specific addresses, but regions of memory -- perhaps
759 this feature should be incorporated into all ports? */
760
761+#ifdef CONFIG_PAX_RANDMMAP
762+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
763+#endif
764+
765 if (addr) {
766- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
767+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
768 if (addr != (unsigned long) -ENOMEM)
769 return addr;
770 }
771
772 /* Next, try allocating at TASK_UNMAPPED_BASE. */
773- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
774- len, limit);
775+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
776+
777 if (addr != (unsigned long) -ENOMEM)
778 return addr;
779
780 /* Finally, try allocating in low memory. */
781- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
782+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
783
784 return addr;
785 }
786diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
787index 9d0ac09..479a962 100644
788--- a/arch/alpha/mm/fault.c
789+++ b/arch/alpha/mm/fault.c
790@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
791 __reload_thread(pcb);
792 }
793
794+#ifdef CONFIG_PAX_PAGEEXEC
795+/*
796+ * PaX: decide what to do with offenders (regs->pc = fault address)
797+ *
798+ * returns 1 when task should be killed
799+ * 2 when patched PLT trampoline was detected
800+ * 3 when unpatched PLT trampoline was detected
801+ */
802+static int pax_handle_fetch_fault(struct pt_regs *regs)
803+{
804+
805+#ifdef CONFIG_PAX_EMUPLT
806+ int err;
807+
808+ do { /* PaX: patched PLT emulation #1 */
809+ unsigned int ldah, ldq, jmp;
810+
811+ err = get_user(ldah, (unsigned int *)regs->pc);
812+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
813+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
814+
815+ if (err)
816+ break;
817+
818+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
819+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
820+ jmp == 0x6BFB0000U)
821+ {
822+ unsigned long r27, addr;
823+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
824+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
825+
826+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
827+ err = get_user(r27, (unsigned long *)addr);
828+ if (err)
829+ break;
830+
831+ regs->r27 = r27;
832+ regs->pc = r27;
833+ return 2;
834+ }
835+ } while (0);
836+
837+ do { /* PaX: patched PLT emulation #2 */
838+ unsigned int ldah, lda, br;
839+
840+ err = get_user(ldah, (unsigned int *)regs->pc);
841+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
842+ err |= get_user(br, (unsigned int *)(regs->pc+8));
843+
844+ if (err)
845+ break;
846+
847+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
848+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
849+ (br & 0xFFE00000U) == 0xC3E00000U)
850+ {
851+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
852+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
853+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
854+
855+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
856+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
857+ return 2;
858+ }
859+ } while (0);
860+
861+ do { /* PaX: unpatched PLT emulation */
862+ unsigned int br;
863+
864+ err = get_user(br, (unsigned int *)regs->pc);
865+
866+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
867+ unsigned int br2, ldq, nop, jmp;
868+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
869+
870+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
871+ err = get_user(br2, (unsigned int *)addr);
872+ err |= get_user(ldq, (unsigned int *)(addr+4));
873+ err |= get_user(nop, (unsigned int *)(addr+8));
874+ err |= get_user(jmp, (unsigned int *)(addr+12));
875+ err |= get_user(resolver, (unsigned long *)(addr+16));
876+
877+ if (err)
878+ break;
879+
880+ if (br2 == 0xC3600000U &&
881+ ldq == 0xA77B000CU &&
882+ nop == 0x47FF041FU &&
883+ jmp == 0x6B7B0000U)
884+ {
885+ regs->r28 = regs->pc+4;
886+ regs->r27 = addr+16;
887+ regs->pc = resolver;
888+ return 3;
889+ }
890+ }
891+ } while (0);
892+#endif
893+
894+ return 1;
895+}
896+
897+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
898+{
899+ unsigned long i;
900+
901+ printk(KERN_ERR "PAX: bytes at PC: ");
902+ for (i = 0; i < 5; i++) {
903+ unsigned int c;
904+ if (get_user(c, (unsigned int *)pc+i))
905+ printk(KERN_CONT "???????? ");
906+ else
907+ printk(KERN_CONT "%08x ", c);
908+ }
909+ printk("\n");
910+}
911+#endif
912
913 /*
914 * This routine handles page faults. It determines the address,
915@@ -133,8 +251,29 @@ retry:
916 good_area:
917 si_code = SEGV_ACCERR;
918 if (cause < 0) {
919- if (!(vma->vm_flags & VM_EXEC))
920+ if (!(vma->vm_flags & VM_EXEC)) {
921+
922+#ifdef CONFIG_PAX_PAGEEXEC
923+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
924+ goto bad_area;
925+
926+ up_read(&mm->mmap_sem);
927+ switch (pax_handle_fetch_fault(regs)) {
928+
929+#ifdef CONFIG_PAX_EMUPLT
930+ case 2:
931+ case 3:
932+ return;
933+#endif
934+
935+ }
936+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
937+ do_group_exit(SIGKILL);
938+#else
939 goto bad_area;
940+#endif
941+
942+ }
943 } else if (!cause) {
944 /* Allow reads even for write-only mappings */
945 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
946diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
947index 97d07ed..2931f2b 100644
948--- a/arch/arm/Kconfig
949+++ b/arch/arm/Kconfig
950@@ -1727,7 +1727,7 @@ config ALIGNMENT_TRAP
951
952 config UACCESS_WITH_MEMCPY
953 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
954- depends on MMU
955+ depends on MMU && !PAX_MEMORY_UDEREF
956 default y if CPU_FEROCEON
957 help
958 Implement faster copy_to_user and clear_user methods for CPU
959@@ -1991,6 +1991,7 @@ config XIP_PHYS_ADDR
960 config KEXEC
961 bool "Kexec system call (EXPERIMENTAL)"
962 depends on (!SMP || PM_SLEEP_SMP)
963+ depends on !GRKERNSEC_KMEM
964 help
965 kexec is a system call that implements the ability to shutdown your
966 current kernel, and to start another kernel. It is like a reboot
967diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
968index e22c119..eaa807d 100644
969--- a/arch/arm/include/asm/atomic.h
970+++ b/arch/arm/include/asm/atomic.h
971@@ -18,17 +18,41 @@
972 #include <asm/barrier.h>
973 #include <asm/cmpxchg.h>
974
975+#ifdef CONFIG_GENERIC_ATOMIC64
976+#include <asm-generic/atomic64.h>
977+#endif
978+
979 #define ATOMIC_INIT(i) { (i) }
980
981 #ifdef __KERNEL__
982
983+#ifdef CONFIG_THUMB2_KERNEL
984+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
985+#else
986+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
987+#endif
988+
989+#define _ASM_EXTABLE(from, to) \
990+" .pushsection __ex_table,\"a\"\n"\
991+" .align 3\n" \
992+" .long " #from ", " #to"\n" \
993+" .popsection"
994+
995 /*
996 * On ARM, ordinary assignment (str instruction) doesn't clear the local
997 * strex/ldrex monitor on some implementations. The reason we can use it for
998 * atomic_set() is the clrex or dummy strex done on every exception return.
999 */
1000 #define atomic_read(v) ACCESS_ONCE((v)->counter)
1001+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1002+{
1003+ return ACCESS_ONCE(v->counter);
1004+}
1005 #define atomic_set(v,i) (((v)->counter) = (i))
1006+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1007+{
1008+ v->counter = i;
1009+}
1010
1011 #if __LINUX_ARM_ARCH__ >= 6
1012
1013@@ -38,26 +62,50 @@
1014 * to ensure that the update happens.
1015 */
1016
1017-#define ATOMIC_OP(op, c_op, asm_op) \
1018-static inline void atomic_##op(int i, atomic_t *v) \
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+#define __OVERFLOW_POST \
1021+ " bvc 3f\n" \
1022+ "2: " REFCOUNT_TRAP_INSN "\n"\
1023+ "3:\n"
1024+#define __OVERFLOW_POST_RETURN \
1025+ " bvc 3f\n" \
1026+" mov %0, %1\n" \
1027+ "2: " REFCOUNT_TRAP_INSN "\n"\
1028+ "3:\n"
1029+#define __OVERFLOW_EXTABLE \
1030+ "4:\n" \
1031+ _ASM_EXTABLE(2b, 4b)
1032+#else
1033+#define __OVERFLOW_POST
1034+#define __OVERFLOW_POST_RETURN
1035+#define __OVERFLOW_EXTABLE
1036+#endif
1037+
1038+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1039+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1040 { \
1041 unsigned long tmp; \
1042 int result; \
1043 \
1044 prefetchw(&v->counter); \
1045- __asm__ __volatile__("@ atomic_" #op "\n" \
1046+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1047 "1: ldrex %0, [%3]\n" \
1048 " " #asm_op " %0, %0, %4\n" \
1049+ post_op \
1050 " strex %1, %0, [%3]\n" \
1051 " teq %1, #0\n" \
1052-" bne 1b" \
1053+" bne 1b\n" \
1054+ extable \
1055 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1056 : "r" (&v->counter), "Ir" (i) \
1057 : "cc"); \
1058 } \
1059
1060-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1061-static inline int atomic_##op##_return(int i, atomic_t *v) \
1062+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op, , )\
1063+ __ATOMIC_OP(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1064+
1065+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1066+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1067 { \
1068 unsigned long tmp; \
1069 int result; \
1070@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1071 smp_mb(); \
1072 prefetchw(&v->counter); \
1073 \
1074- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1075+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1076 "1: ldrex %0, [%3]\n" \
1077 " " #asm_op " %0, %0, %4\n" \
1078+ post_op \
1079 " strex %1, %0, [%3]\n" \
1080 " teq %1, #0\n" \
1081-" bne 1b" \
1082+" bne 1b\n" \
1083+ extable \
1084 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1085 : "r" (&v->counter), "Ir" (i) \
1086 : "cc"); \
1087@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1088 return result; \
1089 }
1090
1091+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op, , )\
1092+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1093+
1094 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1095 {
1096 int oldval;
1097@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1098 __asm__ __volatile__ ("@ atomic_add_unless\n"
1099 "1: ldrex %0, [%4]\n"
1100 " teq %0, %5\n"
1101-" beq 2f\n"
1102-" add %1, %0, %6\n"
1103+" beq 4f\n"
1104+" adds %1, %0, %6\n"
1105+
1106+#ifdef CONFIG_PAX_REFCOUNT
1107+" bvc 3f\n"
1108+"2: " REFCOUNT_TRAP_INSN "\n"
1109+"3:\n"
1110+#endif
1111+
1112 " strex %2, %1, [%4]\n"
1113 " teq %2, #0\n"
1114 " bne 1b\n"
1115-"2:"
1116+"4:"
1117+
1118+#ifdef CONFIG_PAX_REFCOUNT
1119+ _ASM_EXTABLE(2b, 4b)
1120+#endif
1121+
1122 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1123 : "r" (&v->counter), "r" (u), "r" (a)
1124 : "cc");
1125@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1126 return oldval;
1127 }
1128
1129+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1130+{
1131+ unsigned long oldval, res;
1132+
1133+ smp_mb();
1134+
1135+ do {
1136+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1137+ "ldrex %1, [%3]\n"
1138+ "mov %0, #0\n"
1139+ "teq %1, %4\n"
1140+ "strexeq %0, %5, [%3]\n"
1141+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1142+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1143+ : "cc");
1144+ } while (res);
1145+
1146+ smp_mb();
1147+
1148+ return oldval;
1149+}
1150+
1151 #else /* ARM_ARCH_6 */
1152
1153 #ifdef CONFIG_SMP
1154 #error SMP not supported on pre-ARMv6 CPUs
1155 #endif
1156
1157-#define ATOMIC_OP(op, c_op, asm_op) \
1158-static inline void atomic_##op(int i, atomic_t *v) \
1159+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1160+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1161 { \
1162 unsigned long flags; \
1163 \
1164@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1165 raw_local_irq_restore(flags); \
1166 } \
1167
1168-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1169-static inline int atomic_##op##_return(int i, atomic_t *v) \
1170+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1171+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1172+
1173+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1174+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1175 { \
1176 unsigned long flags; \
1177 int val; \
1178@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1179 return val; \
1180 }
1181
1182+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1183+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1184+
1185 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1186 {
1187 int ret;
1188@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1189 return ret;
1190 }
1191
1192+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1193+{
1194+ return atomic_cmpxchg((atomic_t *)v, old, new);
1195+}
1196+
1197 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1198 {
1199 int c, old;
1200@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1201
1202 #undef ATOMIC_OPS
1203 #undef ATOMIC_OP_RETURN
1204+#undef __ATOMIC_OP_RETURN
1205 #undef ATOMIC_OP
1206+#undef __ATOMIC_OP
1207
1208 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1209+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1210+{
1211+ return xchg(&v->counter, new);
1212+}
1213
1214 #define atomic_inc(v) atomic_add(1, v)
1215+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1216+{
1217+ atomic_add_unchecked(1, v);
1218+}
1219 #define atomic_dec(v) atomic_sub(1, v)
1220+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1221+{
1222+ atomic_sub_unchecked(1, v);
1223+}
1224
1225 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1226+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1227+{
1228+ return atomic_add_return_unchecked(1, v) == 0;
1229+}
1230 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1231 #define atomic_inc_return(v) (atomic_add_return(1, v))
1232+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1233+{
1234+ return atomic_add_return_unchecked(1, v);
1235+}
1236 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1237 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1238
1239@@ -216,6 +336,14 @@ typedef struct {
1240 long long counter;
1241 } atomic64_t;
1242
1243+#ifdef CONFIG_PAX_REFCOUNT
1244+typedef struct {
1245+ long long counter;
1246+} atomic64_unchecked_t;
1247+#else
1248+typedef atomic64_t atomic64_unchecked_t;
1249+#endif
1250+
1251 #define ATOMIC64_INIT(i) { (i) }
1252
1253 #ifdef CONFIG_ARM_LPAE
1254@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1255 return result;
1256 }
1257
1258+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1259+{
1260+ long long result;
1261+
1262+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1263+" ldrd %0, %H0, [%1]"
1264+ : "=&r" (result)
1265+ : "r" (&v->counter), "Qo" (v->counter)
1266+ );
1267+
1268+ return result;
1269+}
1270+
1271 static inline void atomic64_set(atomic64_t *v, long long i)
1272 {
1273 __asm__ __volatile__("@ atomic64_set\n"
1274@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1275 : "r" (&v->counter), "r" (i)
1276 );
1277 }
1278+
1279+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1280+{
1281+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1282+" strd %2, %H2, [%1]"
1283+ : "=Qo" (v->counter)
1284+ : "r" (&v->counter), "r" (i)
1285+ );
1286+}
1287 #else
1288 static inline long long atomic64_read(const atomic64_t *v)
1289 {
1290@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1291 return result;
1292 }
1293
1294+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1295+{
1296+ long long result;
1297+
1298+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1299+" ldrexd %0, %H0, [%1]"
1300+ : "=&r" (result)
1301+ : "r" (&v->counter), "Qo" (v->counter)
1302+ );
1303+
1304+ return result;
1305+}
1306+
1307 static inline void atomic64_set(atomic64_t *v, long long i)
1308 {
1309 long long tmp;
1310@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1311 : "r" (&v->counter), "r" (i)
1312 : "cc");
1313 }
1314+
1315+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1316+{
1317+ long long tmp;
1318+
1319+ prefetchw(&v->counter);
1320+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1321+"1: ldrexd %0, %H0, [%2]\n"
1322+" strexd %0, %3, %H3, [%2]\n"
1323+" teq %0, #0\n"
1324+" bne 1b"
1325+ : "=&r" (tmp), "=Qo" (v->counter)
1326+ : "r" (&v->counter), "r" (i)
1327+ : "cc");
1328+}
1329 #endif
1330
1331-#define ATOMIC64_OP(op, op1, op2) \
1332-static inline void atomic64_##op(long long i, atomic64_t *v) \
1333+#undef __OVERFLOW_POST_RETURN
1334+#define __OVERFLOW_POST_RETURN \
1335+ " bvc 3f\n" \
1336+" mov %0, %1\n" \
1337+" mov %H0, %H1\n" \
1338+ "2: " REFCOUNT_TRAP_INSN "\n"\
1339+ "3:\n"
1340+
1341+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1342+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1343 { \
1344 long long result; \
1345 unsigned long tmp; \
1346 \
1347 prefetchw(&v->counter); \
1348- __asm__ __volatile__("@ atomic64_" #op "\n" \
1349+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1350 "1: ldrexd %0, %H0, [%3]\n" \
1351 " " #op1 " %Q0, %Q0, %Q4\n" \
1352 " " #op2 " %R0, %R0, %R4\n" \
1353+ post_op \
1354 " strexd %1, %0, %H0, [%3]\n" \
1355 " teq %1, #0\n" \
1356-" bne 1b" \
1357+" bne 1b\n" \
1358+ extable \
1359 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1360 : "r" (&v->counter), "r" (i) \
1361 : "cc"); \
1362 } \
1363
1364-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1365-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1366+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, , op1, op2, , ) \
1367+ __ATOMIC64_OP(op, _unchecked, op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1368+
1369+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1370+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1371 { \
1372 long long result; \
1373 unsigned long tmp; \
1374@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1375 smp_mb(); \
1376 prefetchw(&v->counter); \
1377 \
1378- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1379+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1380 "1: ldrexd %0, %H0, [%3]\n" \
1381 " " #op1 " %Q0, %Q0, %Q4\n" \
1382 " " #op2 " %R0, %R0, %R4\n" \
1383+ post_op \
1384 " strexd %1, %0, %H0, [%3]\n" \
1385 " teq %1, #0\n" \
1386-" bne 1b" \
1387+" bne 1b\n" \
1388+ extable \
1389 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1390 : "r" (&v->counter), "r" (i) \
1391 : "cc"); \
1392@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1393 return result; \
1394 }
1395
1396+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, , op1, op2, , ) \
1397+ __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1398+
1399 #define ATOMIC64_OPS(op, op1, op2) \
1400 ATOMIC64_OP(op, op1, op2) \
1401 ATOMIC64_OP_RETURN(op, op1, op2)
1402@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1403
1404 #undef ATOMIC64_OPS
1405 #undef ATOMIC64_OP_RETURN
1406+#undef __ATOMIC64_OP_RETURN
1407 #undef ATOMIC64_OP
1408+#undef __ATOMIC64_OP
1409+#undef __OVERFLOW_EXTABLE
1410+#undef __OVERFLOW_POST_RETURN
1411+#undef __OVERFLOW_POST
1412
1413 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1414 long long new)
1415@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1416 return oldval;
1417 }
1418
1419+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1420+ long long new)
1421+{
1422+ long long oldval;
1423+ unsigned long res;
1424+
1425+ smp_mb();
1426+
1427+ do {
1428+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1429+ "ldrexd %1, %H1, [%3]\n"
1430+ "mov %0, #0\n"
1431+ "teq %1, %4\n"
1432+ "teqeq %H1, %H4\n"
1433+ "strexdeq %0, %5, %H5, [%3]"
1434+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1435+ : "r" (&ptr->counter), "r" (old), "r" (new)
1436+ : "cc");
1437+ } while (res);
1438+
1439+ smp_mb();
1440+
1441+ return oldval;
1442+}
1443+
1444 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1445 {
1446 long long result;
1447@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1448 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1449 {
1450 long long result;
1451- unsigned long tmp;
1452+ u64 tmp;
1453
1454 smp_mb();
1455 prefetchw(&v->counter);
1456
1457 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1458-"1: ldrexd %0, %H0, [%3]\n"
1459-" subs %Q0, %Q0, #1\n"
1460-" sbc %R0, %R0, #0\n"
1461+"1: ldrexd %1, %H1, [%3]\n"
1462+" subs %Q0, %Q1, #1\n"
1463+" sbcs %R0, %R1, #0\n"
1464+
1465+#ifdef CONFIG_PAX_REFCOUNT
1466+" bvc 3f\n"
1467+" mov %Q0, %Q1\n"
1468+" mov %R0, %R1\n"
1469+"2: " REFCOUNT_TRAP_INSN "\n"
1470+"3:\n"
1471+#endif
1472+
1473 " teq %R0, #0\n"
1474-" bmi 2f\n"
1475+" bmi 4f\n"
1476 " strexd %1, %0, %H0, [%3]\n"
1477 " teq %1, #0\n"
1478 " bne 1b\n"
1479-"2:"
1480+"4:\n"
1481+
1482+#ifdef CONFIG_PAX_REFCOUNT
1483+ _ASM_EXTABLE(2b, 4b)
1484+#endif
1485+
1486 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1487 : "r" (&v->counter)
1488 : "cc");
1489@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1490 " teq %0, %5\n"
1491 " teqeq %H0, %H5\n"
1492 " moveq %1, #0\n"
1493-" beq 2f\n"
1494+" beq 4f\n"
1495 " adds %Q0, %Q0, %Q6\n"
1496-" adc %R0, %R0, %R6\n"
1497+" adcs %R0, %R0, %R6\n"
1498+
1499+#ifdef CONFIG_PAX_REFCOUNT
1500+" bvc 3f\n"
1501+"2: " REFCOUNT_TRAP_INSN "\n"
1502+"3:\n"
1503+#endif
1504+
1505 " strexd %2, %0, %H0, [%4]\n"
1506 " teq %2, #0\n"
1507 " bne 1b\n"
1508-"2:"
1509+"4:\n"
1510+
1511+#ifdef CONFIG_PAX_REFCOUNT
1512+ _ASM_EXTABLE(2b, 4b)
1513+#endif
1514+
1515 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1516 : "r" (&v->counter), "r" (u), "r" (a)
1517 : "cc");
1518@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1519
1520 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1521 #define atomic64_inc(v) atomic64_add(1LL, (v))
1522+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1523 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1524+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1525 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1526 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1527 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1528+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1529 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1530 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1531 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1532diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1533index d2f81e6..3c4dba5 100644
1534--- a/arch/arm/include/asm/barrier.h
1535+++ b/arch/arm/include/asm/barrier.h
1536@@ -67,7 +67,7 @@
1537 do { \
1538 compiletime_assert_atomic_type(*p); \
1539 smp_mb(); \
1540- ACCESS_ONCE(*p) = (v); \
1541+ ACCESS_ONCE_RW(*p) = (v); \
1542 } while (0)
1543
1544 #define smp_load_acquire(p) \
1545diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1546index 75fe66b..ba3dee4 100644
1547--- a/arch/arm/include/asm/cache.h
1548+++ b/arch/arm/include/asm/cache.h
1549@@ -4,8 +4,10 @@
1550 #ifndef __ASMARM_CACHE_H
1551 #define __ASMARM_CACHE_H
1552
1553+#include <linux/const.h>
1554+
1555 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1556-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1557+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1558
1559 /*
1560 * Memory returned by kmalloc() may be used for DMA, so we must make
1561@@ -24,5 +26,6 @@
1562 #endif
1563
1564 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1565+#define __read_only __attribute__ ((__section__(".data..read_only")))
1566
1567 #endif
1568diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1569index 2d46862..a35415b 100644
1570--- a/arch/arm/include/asm/cacheflush.h
1571+++ b/arch/arm/include/asm/cacheflush.h
1572@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1573 void (*dma_unmap_area)(const void *, size_t, int);
1574
1575 void (*dma_flush_range)(const void *, const void *);
1576-};
1577+} __no_const;
1578
1579 /*
1580 * Select the calling method
1581diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1582index 5233151..87a71fa 100644
1583--- a/arch/arm/include/asm/checksum.h
1584+++ b/arch/arm/include/asm/checksum.h
1585@@ -37,7 +37,19 @@ __wsum
1586 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1587
1588 __wsum
1589-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1590+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1591+
1592+static inline __wsum
1593+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1594+{
1595+ __wsum ret;
1596+ pax_open_userland();
1597+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1598+ pax_close_userland();
1599+ return ret;
1600+}
1601+
1602+
1603
1604 /*
1605 * Fold a partial checksum without adding pseudo headers
1606diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1607index abb2c37..96db950 100644
1608--- a/arch/arm/include/asm/cmpxchg.h
1609+++ b/arch/arm/include/asm/cmpxchg.h
1610@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1611
1612 #define xchg(ptr,x) \
1613 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1614+#define xchg_unchecked(ptr,x) \
1615+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1616
1617 #include <asm-generic/cmpxchg-local.h>
1618
1619diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1620index 6ddbe44..b5e38b1a 100644
1621--- a/arch/arm/include/asm/domain.h
1622+++ b/arch/arm/include/asm/domain.h
1623@@ -48,18 +48,37 @@
1624 * Domain types
1625 */
1626 #define DOMAIN_NOACCESS 0
1627-#define DOMAIN_CLIENT 1
1628 #ifdef CONFIG_CPU_USE_DOMAINS
1629+#define DOMAIN_USERCLIENT 1
1630+#define DOMAIN_KERNELCLIENT 1
1631 #define DOMAIN_MANAGER 3
1632+#define DOMAIN_VECTORS DOMAIN_USER
1633 #else
1634+
1635+#ifdef CONFIG_PAX_KERNEXEC
1636 #define DOMAIN_MANAGER 1
1637+#define DOMAIN_KERNEXEC 3
1638+#else
1639+#define DOMAIN_MANAGER 1
1640+#endif
1641+
1642+#ifdef CONFIG_PAX_MEMORY_UDEREF
1643+#define DOMAIN_USERCLIENT 0
1644+#define DOMAIN_UDEREF 1
1645+#define DOMAIN_VECTORS DOMAIN_KERNEL
1646+#else
1647+#define DOMAIN_USERCLIENT 1
1648+#define DOMAIN_VECTORS DOMAIN_USER
1649+#endif
1650+#define DOMAIN_KERNELCLIENT 1
1651+
1652 #endif
1653
1654 #define domain_val(dom,type) ((type) << (2*(dom)))
1655
1656 #ifndef __ASSEMBLY__
1657
1658-#ifdef CONFIG_CPU_USE_DOMAINS
1659+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1660 static inline void set_domain(unsigned val)
1661 {
1662 asm volatile(
1663@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1664 isb();
1665 }
1666
1667-#define modify_domain(dom,type) \
1668- do { \
1669- struct thread_info *thread = current_thread_info(); \
1670- unsigned int domain = thread->cpu_domain; \
1671- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1672- thread->cpu_domain = domain | domain_val(dom, type); \
1673- set_domain(thread->cpu_domain); \
1674- } while (0)
1675-
1676+extern void modify_domain(unsigned int dom, unsigned int type);
1677 #else
1678 static inline void set_domain(unsigned val) { }
1679 static inline void modify_domain(unsigned dom, unsigned type) { }
1680diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1681index afb9caf..9a0bac0 100644
1682--- a/arch/arm/include/asm/elf.h
1683+++ b/arch/arm/include/asm/elf.h
1684@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1685 the loader. We need to make sure that it is out of the way of the program
1686 that it will "exec", and that there is sufficient room for the brk. */
1687
1688-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1689+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1690+
1691+#ifdef CONFIG_PAX_ASLR
1692+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1693+
1694+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1695+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1696+#endif
1697
1698 /* When the program starts, a1 contains a pointer to a function to be
1699 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1700@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1701 extern void elf_set_personality(const struct elf32_hdr *);
1702 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1703
1704-struct mm_struct;
1705-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1706-#define arch_randomize_brk arch_randomize_brk
1707-
1708 #ifdef CONFIG_MMU
1709 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1710 struct linux_binprm;
1711diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1712index de53547..52b9a28 100644
1713--- a/arch/arm/include/asm/fncpy.h
1714+++ b/arch/arm/include/asm/fncpy.h
1715@@ -81,7 +81,9 @@
1716 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1717 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1718 \
1719+ pax_open_kernel(); \
1720 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1721+ pax_close_kernel(); \
1722 flush_icache_range((unsigned long)(dest_buf), \
1723 (unsigned long)(dest_buf) + (size)); \
1724 \
1725diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1726index 53e69da..3fdc896 100644
1727--- a/arch/arm/include/asm/futex.h
1728+++ b/arch/arm/include/asm/futex.h
1729@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1730 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1731 return -EFAULT;
1732
1733+ pax_open_userland();
1734+
1735 smp_mb();
1736 /* Prefetching cannot fault */
1737 prefetchw(uaddr);
1738@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1739 : "cc", "memory");
1740 smp_mb();
1741
1742+ pax_close_userland();
1743+
1744 *uval = val;
1745 return ret;
1746 }
1747@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1748 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1749 return -EFAULT;
1750
1751+ pax_open_userland();
1752+
1753 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1754 "1: " TUSER(ldr) " %1, [%4]\n"
1755 " teq %1, %2\n"
1756@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1757 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1758 : "cc", "memory");
1759
1760+ pax_close_userland();
1761+
1762 *uval = val;
1763 return ret;
1764 }
1765@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1766 return -EFAULT;
1767
1768 pagefault_disable(); /* implies preempt_disable() */
1769+ pax_open_userland();
1770
1771 switch (op) {
1772 case FUTEX_OP_SET:
1773@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1774 ret = -ENOSYS;
1775 }
1776
1777+ pax_close_userland();
1778 pagefault_enable(); /* subsumes preempt_enable() */
1779
1780 if (!ret) {
1781diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1782index 83eb2f7..ed77159 100644
1783--- a/arch/arm/include/asm/kmap_types.h
1784+++ b/arch/arm/include/asm/kmap_types.h
1785@@ -4,6 +4,6 @@
1786 /*
1787 * This is the "bare minimum". AIO seems to require this.
1788 */
1789-#define KM_TYPE_NR 16
1790+#define KM_TYPE_NR 17
1791
1792 #endif
1793diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1794index 9e614a1..3302cca 100644
1795--- a/arch/arm/include/asm/mach/dma.h
1796+++ b/arch/arm/include/asm/mach/dma.h
1797@@ -22,7 +22,7 @@ struct dma_ops {
1798 int (*residue)(unsigned int, dma_t *); /* optional */
1799 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1800 const char *type;
1801-};
1802+} __do_const;
1803
1804 struct dma_struct {
1805 void *addr; /* single DMA address */
1806diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1807index f98c7f3..e5c626d 100644
1808--- a/arch/arm/include/asm/mach/map.h
1809+++ b/arch/arm/include/asm/mach/map.h
1810@@ -23,17 +23,19 @@ struct map_desc {
1811
1812 /* types 0-3 are defined in asm/io.h */
1813 enum {
1814- MT_UNCACHED = 4,
1815- MT_CACHECLEAN,
1816- MT_MINICLEAN,
1817+ MT_UNCACHED_RW = 4,
1818+ MT_CACHECLEAN_RO,
1819+ MT_MINICLEAN_RO,
1820 MT_LOW_VECTORS,
1821 MT_HIGH_VECTORS,
1822- MT_MEMORY_RWX,
1823+ __MT_MEMORY_RWX,
1824 MT_MEMORY_RW,
1825- MT_ROM,
1826- MT_MEMORY_RWX_NONCACHED,
1827+ MT_MEMORY_RX,
1828+ MT_ROM_RX,
1829+ MT_MEMORY_RW_NONCACHED,
1830+ MT_MEMORY_RX_NONCACHED,
1831 MT_MEMORY_RW_DTCM,
1832- MT_MEMORY_RWX_ITCM,
1833+ MT_MEMORY_RX_ITCM,
1834 MT_MEMORY_RW_SO,
1835 MT_MEMORY_DMA_READY,
1836 };
1837diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1838index 891a56b..48f337e 100644
1839--- a/arch/arm/include/asm/outercache.h
1840+++ b/arch/arm/include/asm/outercache.h
1841@@ -36,7 +36,7 @@ struct outer_cache_fns {
1842
1843 /* This is an ARM L2C thing */
1844 void (*write_sec)(unsigned long, unsigned);
1845-};
1846+} __no_const;
1847
1848 extern struct outer_cache_fns outer_cache;
1849
1850diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1851index 4355f0e..cd9168e 100644
1852--- a/arch/arm/include/asm/page.h
1853+++ b/arch/arm/include/asm/page.h
1854@@ -23,6 +23,7 @@
1855
1856 #else
1857
1858+#include <linux/compiler.h>
1859 #include <asm/glue.h>
1860
1861 /*
1862@@ -114,7 +115,7 @@ struct cpu_user_fns {
1863 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1864 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1865 unsigned long vaddr, struct vm_area_struct *vma);
1866-};
1867+} __no_const;
1868
1869 #ifdef MULTI_USER
1870 extern struct cpu_user_fns cpu_user;
1871diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1872index 19cfab5..3f5c7e9 100644
1873--- a/arch/arm/include/asm/pgalloc.h
1874+++ b/arch/arm/include/asm/pgalloc.h
1875@@ -17,6 +17,7 @@
1876 #include <asm/processor.h>
1877 #include <asm/cacheflush.h>
1878 #include <asm/tlbflush.h>
1879+#include <asm/system_info.h>
1880
1881 #define check_pgt_cache() do { } while (0)
1882
1883@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1884 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1885 }
1886
1887+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1888+{
1889+ pud_populate(mm, pud, pmd);
1890+}
1891+
1892 #else /* !CONFIG_ARM_LPAE */
1893
1894 /*
1895@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1896 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1897 #define pmd_free(mm, pmd) do { } while (0)
1898 #define pud_populate(mm,pmd,pte) BUG()
1899+#define pud_populate_kernel(mm,pmd,pte) BUG()
1900
1901 #endif /* CONFIG_ARM_LPAE */
1902
1903@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1904 __free_page(pte);
1905 }
1906
1907+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1908+{
1909+#ifdef CONFIG_ARM_LPAE
1910+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1911+#else
1912+ if (addr & SECTION_SIZE)
1913+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1914+ else
1915+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1916+#endif
1917+ flush_pmd_entry(pmdp);
1918+}
1919+
1920 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1921 pmdval_t prot)
1922 {
1923diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1924index 5e68278..1869bae 100644
1925--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1926+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1927@@ -27,7 +27,7 @@
1928 /*
1929 * - section
1930 */
1931-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1932+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1933 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1934 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1935 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1936@@ -39,6 +39,7 @@
1937 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1938 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1939 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1940+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1941
1942 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1943 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1944@@ -68,6 +69,7 @@
1945 * - extended small page/tiny page
1946 */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1948+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1949 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1950 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1951 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1952diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1953index f027941..f36ce30 100644
1954--- a/arch/arm/include/asm/pgtable-2level.h
1955+++ b/arch/arm/include/asm/pgtable-2level.h
1956@@ -126,6 +126,9 @@
1957 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1958 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1959
1960+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1961+#define L_PTE_PXN (_AT(pteval_t, 0))
1962+
1963 /*
1964 * These are the memory types, defined to be compatible with
1965 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1966diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1967index a31ecdad..95e98d4 100644
1968--- a/arch/arm/include/asm/pgtable-3level.h
1969+++ b/arch/arm/include/asm/pgtable-3level.h
1970@@ -81,6 +81,7 @@
1971 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1972 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1973 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1974+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1975 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1976 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1977 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1978@@ -92,10 +93,12 @@
1979 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1980 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1981 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1982+#define PMD_SECT_RDONLY PMD_SECT_AP2
1983
1984 /*
1985 * To be used in assembly code with the upper page attributes.
1986 */
1987+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1988 #define L_PTE_XN_HIGH (1 << (54 - 32))
1989 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1990
1991diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1992index d5cac54..906ea3e 100644
1993--- a/arch/arm/include/asm/pgtable.h
1994+++ b/arch/arm/include/asm/pgtable.h
1995@@ -33,6 +33,9 @@
1996 #include <asm/pgtable-2level.h>
1997 #endif
1998
1999+#define ktla_ktva(addr) (addr)
2000+#define ktva_ktla(addr) (addr)
2001+
2002 /*
2003 * Just any arbitrary offset to the start of the vmalloc VM area: the
2004 * current 8MB value just means that there will be a 8MB "hole" after the
2005@@ -48,6 +51,9 @@
2006 #define LIBRARY_TEXT_START 0x0c000000
2007
2008 #ifndef __ASSEMBLY__
2009+extern pteval_t __supported_pte_mask;
2010+extern pmdval_t __supported_pmd_mask;
2011+
2012 extern void __pte_error(const char *file, int line, pte_t);
2013 extern void __pmd_error(const char *file, int line, pmd_t);
2014 extern void __pgd_error(const char *file, int line, pgd_t);
2015@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2016 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2017 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2018
2019+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2020+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2021+
2022+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2023+#include <asm/domain.h>
2024+#include <linux/thread_info.h>
2025+#include <linux/preempt.h>
2026+
2027+static inline int test_domain(int domain, int domaintype)
2028+{
2029+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2030+}
2031+#endif
2032+
2033+#ifdef CONFIG_PAX_KERNEXEC
2034+static inline unsigned long pax_open_kernel(void) {
2035+#ifdef CONFIG_ARM_LPAE
2036+ /* TODO */
2037+#else
2038+ preempt_disable();
2039+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2040+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2041+#endif
2042+ return 0;
2043+}
2044+
2045+static inline unsigned long pax_close_kernel(void) {
2046+#ifdef CONFIG_ARM_LPAE
2047+ /* TODO */
2048+#else
2049+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2050+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2051+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2052+ preempt_enable_no_resched();
2053+#endif
2054+ return 0;
2055+}
2056+#else
2057+static inline unsigned long pax_open_kernel(void) { return 0; }
2058+static inline unsigned long pax_close_kernel(void) { return 0; }
2059+#endif
2060+
2061 /*
2062 * This is the lowest virtual address we can permit any user space
2063 * mapping to be mapped at. This is particularly important for
2064@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2065 /*
2066 * The pgprot_* and protection_map entries will be fixed up in runtime
2067 * to include the cachable and bufferable bits based on memory policy,
2068- * as well as any architecture dependent bits like global/ASID and SMP
2069- * shared mapping bits.
2070+ * as well as any architecture dependent bits like global/ASID, PXN,
2071+ * and SMP shared mapping bits.
2072 */
2073 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2074
2075@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2076 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2077 {
2078 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2079- L_PTE_NONE | L_PTE_VALID;
2080+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2081 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2082 return pte;
2083 }
2084diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2085index c25ef3e..735f14b 100644
2086--- a/arch/arm/include/asm/psci.h
2087+++ b/arch/arm/include/asm/psci.h
2088@@ -32,7 +32,7 @@ struct psci_operations {
2089 int (*affinity_info)(unsigned long target_affinity,
2090 unsigned long lowest_affinity_level);
2091 int (*migrate_info_type)(void);
2092-};
2093+} __no_const;
2094
2095 extern struct psci_operations psci_ops;
2096 extern struct smp_operations psci_smp_ops;
2097diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2098index 18f5a55..5072a40 100644
2099--- a/arch/arm/include/asm/smp.h
2100+++ b/arch/arm/include/asm/smp.h
2101@@ -107,7 +107,7 @@ struct smp_operations {
2102 int (*cpu_disable)(unsigned int cpu);
2103 #endif
2104 #endif
2105-};
2106+} __no_const;
2107
2108 struct of_cpu_method {
2109 const char *method;
2110diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2111index d890e41..3921292 100644
2112--- a/arch/arm/include/asm/thread_info.h
2113+++ b/arch/arm/include/asm/thread_info.h
2114@@ -78,9 +78,9 @@ struct thread_info {
2115 .flags = 0, \
2116 .preempt_count = INIT_PREEMPT_COUNT, \
2117 .addr_limit = KERNEL_DS, \
2118- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2119- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2120- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2121+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2122+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2123+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2124 .restart_block = { \
2125 .fn = do_no_restart_syscall, \
2126 }, \
2127@@ -159,7 +159,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2128 #define TIF_SYSCALL_AUDIT 9
2129 #define TIF_SYSCALL_TRACEPOINT 10
2130 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2131-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2132+/* within 8 bits of TIF_SYSCALL_TRACE
2133+ * to meet flexible second operand requirements
2134+ */
2135+#define TIF_GRSEC_SETXID 12
2136+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2137 #define TIF_USING_IWMMXT 17
2138 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2139 #define TIF_RESTORE_SIGMASK 20
2140@@ -173,10 +177,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2141 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2142 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2143 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2144+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2145
2146 /* Checks for any syscall work in entry-common.S */
2147 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2148- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2149+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2150
2151 /*
2152 * Change these and you break ASM code in entry-common.S
2153diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2154index 5f833f7..76e6644 100644
2155--- a/arch/arm/include/asm/tls.h
2156+++ b/arch/arm/include/asm/tls.h
2157@@ -3,6 +3,7 @@
2158
2159 #include <linux/compiler.h>
2160 #include <asm/thread_info.h>
2161+#include <asm/pgtable.h>
2162
2163 #ifdef __ASSEMBLY__
2164 #include <asm/asm-offsets.h>
2165@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2166 * at 0xffff0fe0 must be used instead. (see
2167 * entry-armv.S for details)
2168 */
2169+ pax_open_kernel();
2170 *((unsigned int *)0xffff0ff0) = val;
2171+ pax_close_kernel();
2172 #endif
2173 }
2174
2175diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2176index 4767eb9..bf00668 100644
2177--- a/arch/arm/include/asm/uaccess.h
2178+++ b/arch/arm/include/asm/uaccess.h
2179@@ -18,6 +18,7 @@
2180 #include <asm/domain.h>
2181 #include <asm/unified.h>
2182 #include <asm/compiler.h>
2183+#include <asm/pgtable.h>
2184
2185 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2186 #include <asm-generic/uaccess-unaligned.h>
2187@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2188 static inline void set_fs(mm_segment_t fs)
2189 {
2190 current_thread_info()->addr_limit = fs;
2191- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2192+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2193 }
2194
2195 #define segment_eq(a,b) ((a) == (b))
2196
2197+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2198+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2199+
2200+static inline void pax_open_userland(void)
2201+{
2202+
2203+#ifdef CONFIG_PAX_MEMORY_UDEREF
2204+ if (segment_eq(get_fs(), USER_DS)) {
2205+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2206+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2207+ }
2208+#endif
2209+
2210+}
2211+
2212+static inline void pax_close_userland(void)
2213+{
2214+
2215+#ifdef CONFIG_PAX_MEMORY_UDEREF
2216+ if (segment_eq(get_fs(), USER_DS)) {
2217+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2218+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2219+ }
2220+#endif
2221+
2222+}
2223+
2224 #define __addr_ok(addr) ({ \
2225 unsigned long flag; \
2226 __asm__("cmp %2, %0; movlo %0, #0" \
2227@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2228
2229 #define get_user(x,p) \
2230 ({ \
2231+ int __e; \
2232 might_fault(); \
2233- __get_user_check(x,p); \
2234+ pax_open_userland(); \
2235+ __e = __get_user_check(x,p); \
2236+ pax_close_userland(); \
2237+ __e; \
2238 })
2239
2240 extern int __put_user_1(void *, unsigned int);
2241@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2242
2243 #define put_user(x,p) \
2244 ({ \
2245+ int __e; \
2246 might_fault(); \
2247- __put_user_check(x,p); \
2248+ pax_open_userland(); \
2249+ __e = __put_user_check(x,p); \
2250+ pax_close_userland(); \
2251+ __e; \
2252 })
2253
2254 #else /* CONFIG_MMU */
2255@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2256
2257 #endif /* CONFIG_MMU */
2258
2259+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2260 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2261
2262 #define user_addr_max() \
2263@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2264 #define __get_user(x,ptr) \
2265 ({ \
2266 long __gu_err = 0; \
2267+ pax_open_userland(); \
2268 __get_user_err((x),(ptr),__gu_err); \
2269+ pax_close_userland(); \
2270 __gu_err; \
2271 })
2272
2273 #define __get_user_error(x,ptr,err) \
2274 ({ \
2275+ pax_open_userland(); \
2276 __get_user_err((x),(ptr),err); \
2277+ pax_close_userland(); \
2278 (void) 0; \
2279 })
2280
2281@@ -368,13 +409,17 @@ do { \
2282 #define __put_user(x,ptr) \
2283 ({ \
2284 long __pu_err = 0; \
2285+ pax_open_userland(); \
2286 __put_user_err((x),(ptr),__pu_err); \
2287+ pax_close_userland(); \
2288 __pu_err; \
2289 })
2290
2291 #define __put_user_error(x,ptr,err) \
2292 ({ \
2293+ pax_open_userland(); \
2294 __put_user_err((x),(ptr),err); \
2295+ pax_close_userland(); \
2296 (void) 0; \
2297 })
2298
2299@@ -474,11 +519,44 @@ do { \
2300
2301
2302 #ifdef CONFIG_MMU
2303-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2304-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2305+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2306+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2307+
2308+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2309+{
2310+ unsigned long ret;
2311+
2312+ check_object_size(to, n, false);
2313+ pax_open_userland();
2314+ ret = ___copy_from_user(to, from, n);
2315+ pax_close_userland();
2316+ return ret;
2317+}
2318+
2319+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2320+{
2321+ unsigned long ret;
2322+
2323+ check_object_size(from, n, true);
2324+ pax_open_userland();
2325+ ret = ___copy_to_user(to, from, n);
2326+ pax_close_userland();
2327+ return ret;
2328+}
2329+
2330 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2331-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2332+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2333 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2334+
2335+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2336+{
2337+ unsigned long ret;
2338+ pax_open_userland();
2339+ ret = ___clear_user(addr, n);
2340+ pax_close_userland();
2341+ return ret;
2342+}
2343+
2344 #else
2345 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2346 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2347@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2348
2349 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2350 {
2351+ if ((long)n < 0)
2352+ return n;
2353+
2354 if (access_ok(VERIFY_READ, from, n))
2355 n = __copy_from_user(to, from, n);
2356 else /* security hole - plug it */
2357@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2358
2359 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2360 {
2361+ if ((long)n < 0)
2362+ return n;
2363+
2364 if (access_ok(VERIFY_WRITE, to, n))
2365 n = __copy_to_user(to, from, n);
2366 return n;
2367diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2368index 5af0ed1..cea83883 100644
2369--- a/arch/arm/include/uapi/asm/ptrace.h
2370+++ b/arch/arm/include/uapi/asm/ptrace.h
2371@@ -92,7 +92,7 @@
2372 * ARMv7 groups of PSR bits
2373 */
2374 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2375-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2376+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2377 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2378 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2379
2380diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2381index a88671c..1cc895e 100644
2382--- a/arch/arm/kernel/armksyms.c
2383+++ b/arch/arm/kernel/armksyms.c
2384@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2385
2386 /* networking */
2387 EXPORT_SYMBOL(csum_partial);
2388-EXPORT_SYMBOL(csum_partial_copy_from_user);
2389+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2390 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2391 EXPORT_SYMBOL(__csum_ipv6_magic);
2392
2393@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2394 #ifdef CONFIG_MMU
2395 EXPORT_SYMBOL(copy_page);
2396
2397-EXPORT_SYMBOL(__copy_from_user);
2398-EXPORT_SYMBOL(__copy_to_user);
2399-EXPORT_SYMBOL(__clear_user);
2400+EXPORT_SYMBOL(___copy_from_user);
2401+EXPORT_SYMBOL(___copy_to_user);
2402+EXPORT_SYMBOL(___clear_user);
2403
2404 EXPORT_SYMBOL(__get_user_1);
2405 EXPORT_SYMBOL(__get_user_2);
2406diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2407index 2f5555d..d493c91 100644
2408--- a/arch/arm/kernel/entry-armv.S
2409+++ b/arch/arm/kernel/entry-armv.S
2410@@ -47,6 +47,87 @@
2411 9997:
2412 .endm
2413
2414+ .macro pax_enter_kernel
2415+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2416+ @ make aligned space for saved DACR
2417+ sub sp, sp, #8
2418+ @ save regs
2419+ stmdb sp!, {r1, r2}
2420+ @ read DACR from cpu_domain into r1
2421+ mov r2, sp
2422+ @ assume 8K pages, since we have to split the immediate in two
2423+ bic r2, r2, #(0x1fc0)
2424+ bic r2, r2, #(0x3f)
2425+ ldr r1, [r2, #TI_CPU_DOMAIN]
2426+ @ store old DACR on stack
2427+ str r1, [sp, #8]
2428+#ifdef CONFIG_PAX_KERNEXEC
2429+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2430+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2431+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2432+#endif
2433+#ifdef CONFIG_PAX_MEMORY_UDEREF
2434+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2435+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2436+#endif
2437+ @ write r1 to current_thread_info()->cpu_domain
2438+ str r1, [r2, #TI_CPU_DOMAIN]
2439+ @ write r1 to DACR
2440+ mcr p15, 0, r1, c3, c0, 0
2441+ @ instruction sync
2442+ instr_sync
2443+ @ restore regs
2444+ ldmia sp!, {r1, r2}
2445+#endif
2446+ .endm
2447+
2448+ .macro pax_open_userland
2449+#ifdef CONFIG_PAX_MEMORY_UDEREF
2450+ @ save regs
2451+ stmdb sp!, {r0, r1}
2452+ @ read DACR from cpu_domain into r1
2453+ mov r0, sp
2454+ @ assume 8K pages, since we have to split the immediate in two
2455+ bic r0, r0, #(0x1fc0)
2456+ bic r0, r0, #(0x3f)
2457+ ldr r1, [r0, #TI_CPU_DOMAIN]
2458+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2459+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2460+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2461+ @ write r1 to current_thread_info()->cpu_domain
2462+ str r1, [r0, #TI_CPU_DOMAIN]
2463+ @ write r1 to DACR
2464+ mcr p15, 0, r1, c3, c0, 0
2465+ @ instruction sync
2466+ instr_sync
2467+ @ restore regs
2468+ ldmia sp!, {r0, r1}
2469+#endif
2470+ .endm
2471+
2472+ .macro pax_close_userland
2473+#ifdef CONFIG_PAX_MEMORY_UDEREF
2474+ @ save regs
2475+ stmdb sp!, {r0, r1}
2476+ @ read DACR from cpu_domain into r1
2477+ mov r0, sp
2478+ @ assume 8K pages, since we have to split the immediate in two
2479+ bic r0, r0, #(0x1fc0)
2480+ bic r0, r0, #(0x3f)
2481+ ldr r1, [r0, #TI_CPU_DOMAIN]
2482+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2483+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2484+ @ write r1 to current_thread_info()->cpu_domain
2485+ str r1, [r0, #TI_CPU_DOMAIN]
2486+ @ write r1 to DACR
2487+ mcr p15, 0, r1, c3, c0, 0
2488+ @ instruction sync
2489+ instr_sync
2490+ @ restore regs
2491+ ldmia sp!, {r0, r1}
2492+#endif
2493+ .endm
2494+
2495 .macro pabt_helper
2496 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2497 #ifdef MULTI_PABORT
2498@@ -89,11 +170,15 @@
2499 * Invalid mode handlers
2500 */
2501 .macro inv_entry, reason
2502+
2503+ pax_enter_kernel
2504+
2505 sub sp, sp, #S_FRAME_SIZE
2506 ARM( stmib sp, {r1 - lr} )
2507 THUMB( stmia sp, {r0 - r12} )
2508 THUMB( str sp, [sp, #S_SP] )
2509 THUMB( str lr, [sp, #S_LR] )
2510+
2511 mov r1, #\reason
2512 .endm
2513
2514@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2515 .macro svc_entry, stack_hole=0, trace=1
2516 UNWIND(.fnstart )
2517 UNWIND(.save {r0 - pc} )
2518+
2519+ pax_enter_kernel
2520+
2521 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2522+
2523 #ifdef CONFIG_THUMB2_KERNEL
2524 SPFIX( str r0, [sp] ) @ temporarily saved
2525 SPFIX( mov r0, sp )
2526@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2527 ldmia r0, {r3 - r5}
2528 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2529 mov r6, #-1 @ "" "" "" ""
2530+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2531+ @ offset sp by 8 as done in pax_enter_kernel
2532+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2533+#else
2534 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2535+#endif
2536 SPFIX( addeq r2, r2, #4 )
2537 str r3, [sp, #-4]! @ save the "real" r0 copied
2538 @ from the exception stack
2539@@ -368,6 +462,9 @@ ENDPROC(__fiq_abt)
2540 .macro usr_entry, trace=1
2541 UNWIND(.fnstart )
2542 UNWIND(.cantunwind ) @ don't unwind the user space
2543+
2544+ pax_enter_kernel_user
2545+
2546 sub sp, sp, #S_FRAME_SIZE
2547 ARM( stmib sp, {r1 - r12} )
2548 THUMB( stmia sp, {r0 - r12} )
2549@@ -478,7 +575,9 @@ __und_usr:
2550 tst r3, #PSR_T_BIT @ Thumb mode?
2551 bne __und_usr_thumb
2552 sub r4, r2, #4 @ ARM instr at LR - 4
2553+ pax_open_userland
2554 1: ldrt r0, [r4]
2555+ pax_close_userland
2556 ARM_BE8(rev r0, r0) @ little endian instruction
2557
2558 @ r0 = 32-bit ARM instruction which caused the exception
2559@@ -512,11 +611,15 @@ __und_usr_thumb:
2560 */
2561 .arch armv6t2
2562 #endif
2563+ pax_open_userland
2564 2: ldrht r5, [r4]
2565+ pax_close_userland
2566 ARM_BE8(rev16 r5, r5) @ little endian instruction
2567 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2568 blo __und_usr_fault_16 @ 16bit undefined instruction
2569+ pax_open_userland
2570 3: ldrht r0, [r2]
2571+ pax_close_userland
2572 ARM_BE8(rev16 r0, r0) @ little endian instruction
2573 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2574 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2575@@ -546,7 +649,8 @@ ENDPROC(__und_usr)
2576 */
2577 .pushsection .fixup, "ax"
2578 .align 2
2579-4: str r4, [sp, #S_PC] @ retry current instruction
2580+4: pax_close_userland
2581+ str r4, [sp, #S_PC] @ retry current instruction
2582 ret r9
2583 .popsection
2584 .pushsection __ex_table,"a"
2585@@ -766,7 +870,7 @@ ENTRY(__switch_to)
2586 THUMB( str lr, [ip], #4 )
2587 ldr r4, [r2, #TI_TP_VALUE]
2588 ldr r5, [r2, #TI_TP_VALUE + 4]
2589-#ifdef CONFIG_CPU_USE_DOMAINS
2590+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2591 ldr r6, [r2, #TI_CPU_DOMAIN]
2592 #endif
2593 switch_tls r1, r4, r5, r3, r7
2594@@ -775,7 +879,7 @@ ENTRY(__switch_to)
2595 ldr r8, =__stack_chk_guard
2596 ldr r7, [r7, #TSK_STACK_CANARY]
2597 #endif
2598-#ifdef CONFIG_CPU_USE_DOMAINS
2599+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2600 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2601 #endif
2602 mov r5, r0
2603diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2604index f8ccc21..83d192f 100644
2605--- a/arch/arm/kernel/entry-common.S
2606+++ b/arch/arm/kernel/entry-common.S
2607@@ -11,18 +11,46 @@
2608 #include <asm/assembler.h>
2609 #include <asm/unistd.h>
2610 #include <asm/ftrace.h>
2611+#include <asm/domain.h>
2612 #include <asm/unwind.h>
2613
2614+#include "entry-header.S"
2615+
2616 #ifdef CONFIG_NEED_RET_TO_USER
2617 #include <mach/entry-macro.S>
2618 #else
2619 .macro arch_ret_to_user, tmp1, tmp2
2620+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2621+ @ save regs
2622+ stmdb sp!, {r1, r2}
2623+ @ read DACR from cpu_domain into r1
2624+ mov r2, sp
2625+ @ assume 8K pages, since we have to split the immediate in two
2626+ bic r2, r2, #(0x1fc0)
2627+ bic r2, r2, #(0x3f)
2628+ ldr r1, [r2, #TI_CPU_DOMAIN]
2629+#ifdef CONFIG_PAX_KERNEXEC
2630+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2631+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2632+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2633+#endif
2634+#ifdef CONFIG_PAX_MEMORY_UDEREF
2635+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2636+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2637+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2638+#endif
2639+ @ write r1 to current_thread_info()->cpu_domain
2640+ str r1, [r2, #TI_CPU_DOMAIN]
2641+ @ write r1 to DACR
2642+ mcr p15, 0, r1, c3, c0, 0
2643+ @ instruction sync
2644+ instr_sync
2645+ @ restore regs
2646+ ldmia sp!, {r1, r2}
2647+#endif
2648 .endm
2649 #endif
2650
2651-#include "entry-header.S"
2652-
2653-
2654 .align 5
2655 /*
2656 * This is the fast syscall return path. We do as little as
2657@@ -171,6 +199,12 @@ ENTRY(vector_swi)
2658 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2659 #endif
2660
2661+ /*
2662+ * do this here to avoid a performance hit of wrapping the code above
2663+ * that directly dereferences userland to parse the SWI instruction
2664+ */
2665+ pax_enter_kernel_user
2666+
2667 adr tbl, sys_call_table @ load syscall table pointer
2668
2669 #if defined(CONFIG_OABI_COMPAT)
2670diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2671index 1a0045a..9b4f34d 100644
2672--- a/arch/arm/kernel/entry-header.S
2673+++ b/arch/arm/kernel/entry-header.S
2674@@ -196,6 +196,60 @@
2675 msr cpsr_c, \rtemp @ switch back to the SVC mode
2676 .endm
2677
2678+ .macro pax_enter_kernel_user
2679+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2680+ @ save regs
2681+ stmdb sp!, {r0, r1}
2682+ @ read DACR from cpu_domain into r1
2683+ mov r0, sp
2684+ @ assume 8K pages, since we have to split the immediate in two
2685+ bic r0, r0, #(0x1fc0)
2686+ bic r0, r0, #(0x3f)
2687+ ldr r1, [r0, #TI_CPU_DOMAIN]
2688+#ifdef CONFIG_PAX_MEMORY_UDEREF
2689+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2690+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2691+#endif
2692+#ifdef CONFIG_PAX_KERNEXEC
2693+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2694+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2695+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2696+#endif
2697+ @ write r1 to current_thread_info()->cpu_domain
2698+ str r1, [r0, #TI_CPU_DOMAIN]
2699+ @ write r1 to DACR
2700+ mcr p15, 0, r1, c3, c0, 0
2701+ @ instruction sync
2702+ instr_sync
2703+ @ restore regs
2704+ ldmia sp!, {r0, r1}
2705+#endif
2706+ .endm
2707+
2708+ .macro pax_exit_kernel
2709+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2710+ @ save regs
2711+ stmdb sp!, {r0, r1}
2712+ @ read old DACR from stack into r1
2713+ ldr r1, [sp, #(8 + S_SP)]
2714+ sub r1, r1, #8
2715+ ldr r1, [r1]
2716+
2717+ @ write r1 to current_thread_info()->cpu_domain
2718+ mov r0, sp
2719+ @ assume 8K pages, since we have to split the immediate in two
2720+ bic r0, r0, #(0x1fc0)
2721+ bic r0, r0, #(0x3f)
2722+ str r1, [r0, #TI_CPU_DOMAIN]
2723+ @ write r1 to DACR
2724+ mcr p15, 0, r1, c3, c0, 0
2725+ @ instruction sync
2726+ instr_sync
2727+ @ restore regs
2728+ ldmia sp!, {r0, r1}
2729+#endif
2730+ .endm
2731+
2732 #ifndef CONFIG_THUMB2_KERNEL
2733 .macro svc_exit, rpsr, irq = 0
2734 .if \irq != 0
2735@@ -215,6 +269,9 @@
2736 blne trace_hardirqs_off
2737 #endif
2738 .endif
2739+
2740+ pax_exit_kernel
2741+
2742 msr spsr_cxsf, \rpsr
2743 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2744 @ We must avoid clrex due to Cortex-A15 erratum #830321
2745@@ -291,6 +348,9 @@
2746 blne trace_hardirqs_off
2747 #endif
2748 .endif
2749+
2750+ pax_exit_kernel
2751+
2752 ldr lr, [sp, #S_SP] @ top of the stack
2753 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2754
2755diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2756index 059c3da..8e45cfc 100644
2757--- a/arch/arm/kernel/fiq.c
2758+++ b/arch/arm/kernel/fiq.c
2759@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2760 void *base = vectors_page;
2761 unsigned offset = FIQ_OFFSET;
2762
2763+ pax_open_kernel();
2764 memcpy(base + offset, start, length);
2765+ pax_close_kernel();
2766+
2767 if (!cache_is_vipt_nonaliasing())
2768 flush_icache_range((unsigned long)base + offset, offset +
2769 length);
2770diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2771index 664eee8..f470938 100644
2772--- a/arch/arm/kernel/head.S
2773+++ b/arch/arm/kernel/head.S
2774@@ -437,7 +437,7 @@ __enable_mmu:
2775 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2776 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2777 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2778- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2779+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2780 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2781 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2782 #endif
2783diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2784index bea7db9..a210d10 100644
2785--- a/arch/arm/kernel/module.c
2786+++ b/arch/arm/kernel/module.c
2787@@ -38,12 +38,39 @@
2788 #endif
2789
2790 #ifdef CONFIG_MMU
2791-void *module_alloc(unsigned long size)
2792+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2793 {
2794+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2795+ return NULL;
2796 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2797- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2798+ GFP_KERNEL, prot, NUMA_NO_NODE,
2799 __builtin_return_address(0));
2800 }
2801+
2802+void *module_alloc(unsigned long size)
2803+{
2804+
2805+#ifdef CONFIG_PAX_KERNEXEC
2806+ return __module_alloc(size, PAGE_KERNEL);
2807+#else
2808+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2809+#endif
2810+
2811+}
2812+
2813+#ifdef CONFIG_PAX_KERNEXEC
2814+void module_memfree_exec(void *module_region)
2815+{
2816+ module_memfree(module_region);
2817+}
2818+EXPORT_SYMBOL(module_memfree_exec);
2819+
2820+void *module_alloc_exec(unsigned long size)
2821+{
2822+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2823+}
2824+EXPORT_SYMBOL(module_alloc_exec);
2825+#endif
2826 #endif
2827
2828 int
2829diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2830index 5038960..4aa71d8 100644
2831--- a/arch/arm/kernel/patch.c
2832+++ b/arch/arm/kernel/patch.c
2833@@ -67,6 +67,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2834 else
2835 __acquire(&patch_lock);
2836
2837+ pax_open_kernel();
2838 if (thumb2 && __opcode_is_thumb16(insn)) {
2839 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2840 size = sizeof(u16);
2841@@ -98,6 +99,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2842 *(u32 *)waddr = insn;
2843 size = sizeof(u32);
2844 }
2845+ pax_close_kernel();
2846
2847 if (waddr != addr) {
2848 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2849diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2850index fdfa3a7..5d208b8 100644
2851--- a/arch/arm/kernel/process.c
2852+++ b/arch/arm/kernel/process.c
2853@@ -207,6 +207,7 @@ void machine_power_off(void)
2854
2855 if (pm_power_off)
2856 pm_power_off();
2857+ BUG();
2858 }
2859
2860 /*
2861@@ -220,7 +221,7 @@ void machine_power_off(void)
2862 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2863 * to use. Implementing such co-ordination would be essentially impossible.
2864 */
2865-void machine_restart(char *cmd)
2866+__noreturn void machine_restart(char *cmd)
2867 {
2868 local_irq_disable();
2869 smp_send_stop();
2870@@ -246,8 +247,8 @@ void __show_regs(struct pt_regs *regs)
2871
2872 show_regs_print_info(KERN_DEFAULT);
2873
2874- print_symbol("PC is at %s\n", instruction_pointer(regs));
2875- print_symbol("LR is at %s\n", regs->ARM_lr);
2876+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2877+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2878 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2879 "sp : %08lx ip : %08lx fp : %08lx\n",
2880 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2881@@ -424,12 +425,6 @@ unsigned long get_wchan(struct task_struct *p)
2882 return 0;
2883 }
2884
2885-unsigned long arch_randomize_brk(struct mm_struct *mm)
2886-{
2887- unsigned long range_end = mm->brk + 0x02000000;
2888- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2889-}
2890-
2891 #ifdef CONFIG_MMU
2892 #ifdef CONFIG_KUSER_HELPERS
2893 /*
2894@@ -445,7 +440,7 @@ static struct vm_area_struct gate_vma = {
2895
2896 static int __init gate_vma_init(void)
2897 {
2898- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2899+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2900 return 0;
2901 }
2902 arch_initcall(gate_vma_init);
2903@@ -474,81 +469,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2904 return is_gate_vma(vma) ? "[vectors]" : NULL;
2905 }
2906
2907-/* If possible, provide a placement hint at a random offset from the
2908- * stack for the signal page.
2909- */
2910-static unsigned long sigpage_addr(const struct mm_struct *mm,
2911- unsigned int npages)
2912-{
2913- unsigned long offset;
2914- unsigned long first;
2915- unsigned long last;
2916- unsigned long addr;
2917- unsigned int slots;
2918-
2919- first = PAGE_ALIGN(mm->start_stack);
2920-
2921- last = TASK_SIZE - (npages << PAGE_SHIFT);
2922-
2923- /* No room after stack? */
2924- if (first > last)
2925- return 0;
2926-
2927- /* Just enough room? */
2928- if (first == last)
2929- return first;
2930-
2931- slots = ((last - first) >> PAGE_SHIFT) + 1;
2932-
2933- offset = get_random_int() % slots;
2934-
2935- addr = first + (offset << PAGE_SHIFT);
2936-
2937- return addr;
2938-}
2939-
2940-static struct page *signal_page;
2941-extern struct page *get_signal_page(void);
2942-
2943-static const struct vm_special_mapping sigpage_mapping = {
2944- .name = "[sigpage]",
2945- .pages = &signal_page,
2946-};
2947-
2948 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2949 {
2950 struct mm_struct *mm = current->mm;
2951- struct vm_area_struct *vma;
2952- unsigned long addr;
2953- unsigned long hint;
2954- int ret = 0;
2955-
2956- if (!signal_page)
2957- signal_page = get_signal_page();
2958- if (!signal_page)
2959- return -ENOMEM;
2960
2961 down_write(&mm->mmap_sem);
2962- hint = sigpage_addr(mm, 1);
2963- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2964- if (IS_ERR_VALUE(addr)) {
2965- ret = addr;
2966- goto up_fail;
2967- }
2968-
2969- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2970- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2971- &sigpage_mapping);
2972-
2973- if (IS_ERR(vma)) {
2974- ret = PTR_ERR(vma);
2975- goto up_fail;
2976- }
2977-
2978- mm->context.sigpage = addr;
2979-
2980- up_fail:
2981+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2982 up_write(&mm->mmap_sem);
2983- return ret;
2984+ return 0;
2985 }
2986 #endif
2987diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2988index f73891b..cf3004e 100644
2989--- a/arch/arm/kernel/psci.c
2990+++ b/arch/arm/kernel/psci.c
2991@@ -28,7 +28,7 @@
2992 #include <asm/psci.h>
2993 #include <asm/system_misc.h>
2994
2995-struct psci_operations psci_ops;
2996+struct psci_operations psci_ops __read_only;
2997
2998 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2999 typedef int (*psci_initcall_t)(const struct device_node *);
3000diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3001index ef9119f..31995a3 100644
3002--- a/arch/arm/kernel/ptrace.c
3003+++ b/arch/arm/kernel/ptrace.c
3004@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3005 regs->ARM_ip = ip;
3006 }
3007
3008+#ifdef CONFIG_GRKERNSEC_SETXID
3009+extern void gr_delayed_cred_worker(void);
3010+#endif
3011+
3012 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3013 {
3014 current_thread_info()->syscall = scno;
3015
3016+#ifdef CONFIG_GRKERNSEC_SETXID
3017+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3018+ gr_delayed_cred_worker();
3019+#endif
3020+
3021 /* Do the secure computing check first; failures should be fast. */
3022 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3023 if (secure_computing() == -1)
3024diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3025index e55408e..14d9998 100644
3026--- a/arch/arm/kernel/setup.c
3027+++ b/arch/arm/kernel/setup.c
3028@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3029 unsigned int elf_hwcap2 __read_mostly;
3030 EXPORT_SYMBOL(elf_hwcap2);
3031
3032+pteval_t __supported_pte_mask __read_only;
3033+pmdval_t __supported_pmd_mask __read_only;
3034
3035 #ifdef MULTI_CPU
3036-struct processor processor __read_mostly;
3037+struct processor processor __read_only;
3038 #endif
3039 #ifdef MULTI_TLB
3040-struct cpu_tlb_fns cpu_tlb __read_mostly;
3041+struct cpu_tlb_fns cpu_tlb __read_only;
3042 #endif
3043 #ifdef MULTI_USER
3044-struct cpu_user_fns cpu_user __read_mostly;
3045+struct cpu_user_fns cpu_user __read_only;
3046 #endif
3047 #ifdef MULTI_CACHE
3048-struct cpu_cache_fns cpu_cache __read_mostly;
3049+struct cpu_cache_fns cpu_cache __read_only;
3050 #endif
3051 #ifdef CONFIG_OUTER_CACHE
3052-struct outer_cache_fns outer_cache __read_mostly;
3053+struct outer_cache_fns outer_cache __read_only;
3054 EXPORT_SYMBOL(outer_cache);
3055 #endif
3056
3057@@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
3058 asm("mrc p15, 0, %0, c0, c1, 4"
3059 : "=r" (mmfr0));
3060 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3061- (mmfr0 & 0x000000f0) >= 0x00000030)
3062+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3063 cpu_arch = CPU_ARCH_ARMv7;
3064- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3065+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3066+ __supported_pte_mask |= L_PTE_PXN;
3067+ __supported_pmd_mask |= PMD_PXNTABLE;
3068+ }
3069+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3070 (mmfr0 & 0x000000f0) == 0x00000020)
3071 cpu_arch = CPU_ARCH_ARMv6;
3072 else
3073diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3074index 8aa6f1b..0899e08 100644
3075--- a/arch/arm/kernel/signal.c
3076+++ b/arch/arm/kernel/signal.c
3077@@ -24,8 +24,6 @@
3078
3079 extern const unsigned long sigreturn_codes[7];
3080
3081-static unsigned long signal_return_offset;
3082-
3083 #ifdef CONFIG_CRUNCH
3084 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3085 {
3086@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3087 * except when the MPU has protected the vectors
3088 * page from PL0
3089 */
3090- retcode = mm->context.sigpage + signal_return_offset +
3091- (idx << 2) + thumb;
3092+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3093 } else
3094 #endif
3095 {
3096@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3097 } while (thread_flags & _TIF_WORK_MASK);
3098 return 0;
3099 }
3100-
3101-struct page *get_signal_page(void)
3102-{
3103- unsigned long ptr;
3104- unsigned offset;
3105- struct page *page;
3106- void *addr;
3107-
3108- page = alloc_pages(GFP_KERNEL, 0);
3109-
3110- if (!page)
3111- return NULL;
3112-
3113- addr = page_address(page);
3114-
3115- /* Give the signal return code some randomness */
3116- offset = 0x200 + (get_random_int() & 0x7fc);
3117- signal_return_offset = offset;
3118-
3119- /*
3120- * Copy signal return handlers into the vector page, and
3121- * set sigreturn to be a pointer to these.
3122- */
3123- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3124-
3125- ptr = (unsigned long)addr + offset;
3126- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3127-
3128- return page;
3129-}
3130diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3131index 86ef244..c518451 100644
3132--- a/arch/arm/kernel/smp.c
3133+++ b/arch/arm/kernel/smp.c
3134@@ -76,7 +76,7 @@ enum ipi_msg_type {
3135
3136 static DECLARE_COMPLETION(cpu_running);
3137
3138-static struct smp_operations smp_ops;
3139+static struct smp_operations smp_ops __read_only;
3140
3141 void __init smp_set_ops(struct smp_operations *ops)
3142 {
3143diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3144index 7a3be1d..b00c7de 100644
3145--- a/arch/arm/kernel/tcm.c
3146+++ b/arch/arm/kernel/tcm.c
3147@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3148 .virtual = ITCM_OFFSET,
3149 .pfn = __phys_to_pfn(ITCM_OFFSET),
3150 .length = 0,
3151- .type = MT_MEMORY_RWX_ITCM,
3152+ .type = MT_MEMORY_RX_ITCM,
3153 }
3154 };
3155
3156@@ -267,7 +267,9 @@ no_dtcm:
3157 start = &__sitcm_text;
3158 end = &__eitcm_text;
3159 ram = &__itcm_start;
3160+ pax_open_kernel();
3161 memcpy(start, ram, itcm_code_sz);
3162+ pax_close_kernel();
3163 pr_debug("CPU ITCM: copied code from %p - %p\n",
3164 start, end);
3165 itcm_present = true;
3166diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3167index 788e23f..6fa06a1 100644
3168--- a/arch/arm/kernel/traps.c
3169+++ b/arch/arm/kernel/traps.c
3170@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3171 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3172 {
3173 #ifdef CONFIG_KALLSYMS
3174- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3175+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3176 #else
3177 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3178 #endif
3179@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3180 static int die_owner = -1;
3181 static unsigned int die_nest_count;
3182
3183+extern void gr_handle_kernel_exploit(void);
3184+
3185 static unsigned long oops_begin(void)
3186 {
3187 int cpu;
3188@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3189 panic("Fatal exception in interrupt");
3190 if (panic_on_oops)
3191 panic("Fatal exception");
3192+
3193+ gr_handle_kernel_exploit();
3194+
3195 if (signr)
3196 do_exit(signr);
3197 }
3198@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3199 kuser_init(vectors_base);
3200
3201 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3202- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3203+
3204+#ifndef CONFIG_PAX_MEMORY_UDEREF
3205+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3206+#endif
3207+
3208 #else /* ifndef CONFIG_CPU_V7M */
3209 /*
3210 * on V7-M there is no need to copy the vector table to a dedicated
3211diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3212index b31aa73..cc4b7a1 100644
3213--- a/arch/arm/kernel/vmlinux.lds.S
3214+++ b/arch/arm/kernel/vmlinux.lds.S
3215@@ -37,7 +37,7 @@
3216 #endif
3217
3218 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3219- defined(CONFIG_GENERIC_BUG)
3220+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3221 #define ARM_EXIT_KEEP(x) x
3222 #define ARM_EXIT_DISCARD(x)
3223 #else
3224@@ -123,6 +123,8 @@ SECTIONS
3225 #ifdef CONFIG_DEBUG_RODATA
3226 . = ALIGN(1<<SECTION_SHIFT);
3227 #endif
3228+ _etext = .; /* End of text section */
3229+
3230 RO_DATA(PAGE_SIZE)
3231
3232 . = ALIGN(4);
3233@@ -153,8 +155,6 @@ SECTIONS
3234
3235 NOTES
3236
3237- _etext = .; /* End of text and rodata section */
3238-
3239 #ifndef CONFIG_XIP_KERNEL
3240 # ifdef CONFIG_ARM_KERNMEM_PERMS
3241 . = ALIGN(1<<SECTION_SHIFT);
3242diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3243index 0b0d58a..988cb45 100644
3244--- a/arch/arm/kvm/arm.c
3245+++ b/arch/arm/kvm/arm.c
3246@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3247 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3248
3249 /* The VMID used in the VTTBR */
3250-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3251+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3252 static u8 kvm_next_vmid;
3253 static DEFINE_SPINLOCK(kvm_vmid_lock);
3254
3255@@ -351,7 +351,7 @@ void force_vm_exit(const cpumask_t *mask)
3256 */
3257 static bool need_new_vmid_gen(struct kvm *kvm)
3258 {
3259- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3260+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3261 }
3262
3263 /**
3264@@ -384,7 +384,7 @@ static void update_vttbr(struct kvm *kvm)
3265
3266 /* First user of a new VMID generation? */
3267 if (unlikely(kvm_next_vmid == 0)) {
3268- atomic64_inc(&kvm_vmid_gen);
3269+ atomic64_inc_unchecked(&kvm_vmid_gen);
3270 kvm_next_vmid = 1;
3271
3272 /*
3273@@ -401,7 +401,7 @@ static void update_vttbr(struct kvm *kvm)
3274 kvm_call_hyp(__kvm_flush_vm_context);
3275 }
3276
3277- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3278+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3279 kvm->arch.vmid = kvm_next_vmid;
3280 kvm_next_vmid++;
3281
3282@@ -1038,7 +1038,7 @@ static void check_kvm_target_cpu(void *ret)
3283 /**
3284 * Initialize Hyp-mode and memory mappings on all CPUs.
3285 */
3286-int kvm_arch_init(void *opaque)
3287+int kvm_arch_init(const void *opaque)
3288 {
3289 int err;
3290 int ret, cpu;
3291diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3292index 14a0d98..7771a7d 100644
3293--- a/arch/arm/lib/clear_user.S
3294+++ b/arch/arm/lib/clear_user.S
3295@@ -12,14 +12,14 @@
3296
3297 .text
3298
3299-/* Prototype: int __clear_user(void *addr, size_t sz)
3300+/* Prototype: int ___clear_user(void *addr, size_t sz)
3301 * Purpose : clear some user memory
3302 * Params : addr - user memory address to clear
3303 * : sz - number of bytes to clear
3304 * Returns : number of bytes NOT cleared
3305 */
3306 ENTRY(__clear_user_std)
3307-WEAK(__clear_user)
3308+WEAK(___clear_user)
3309 stmfd sp!, {r1, lr}
3310 mov r2, #0
3311 cmp r1, #4
3312@@ -44,7 +44,7 @@ WEAK(__clear_user)
3313 USER( strnebt r2, [r0])
3314 mov r0, #0
3315 ldmfd sp!, {r1, pc}
3316-ENDPROC(__clear_user)
3317+ENDPROC(___clear_user)
3318 ENDPROC(__clear_user_std)
3319
3320 .pushsection .fixup,"ax"
3321diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3322index 7a235b9..73a0556 100644
3323--- a/arch/arm/lib/copy_from_user.S
3324+++ b/arch/arm/lib/copy_from_user.S
3325@@ -17,7 +17,7 @@
3326 /*
3327 * Prototype:
3328 *
3329- * size_t __copy_from_user(void *to, const void *from, size_t n)
3330+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3331 *
3332 * Purpose:
3333 *
3334@@ -89,11 +89,11 @@
3335
3336 .text
3337
3338-ENTRY(__copy_from_user)
3339+ENTRY(___copy_from_user)
3340
3341 #include "copy_template.S"
3342
3343-ENDPROC(__copy_from_user)
3344+ENDPROC(___copy_from_user)
3345
3346 .pushsection .fixup,"ax"
3347 .align 0
3348diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3349index 6ee2f67..d1cce76 100644
3350--- a/arch/arm/lib/copy_page.S
3351+++ b/arch/arm/lib/copy_page.S
3352@@ -10,6 +10,7 @@
3353 * ASM optimised string functions
3354 */
3355 #include <linux/linkage.h>
3356+#include <linux/const.h>
3357 #include <asm/assembler.h>
3358 #include <asm/asm-offsets.h>
3359 #include <asm/cache.h>
3360diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3361index a9d3db1..164b089 100644
3362--- a/arch/arm/lib/copy_to_user.S
3363+++ b/arch/arm/lib/copy_to_user.S
3364@@ -17,7 +17,7 @@
3365 /*
3366 * Prototype:
3367 *
3368- * size_t __copy_to_user(void *to, const void *from, size_t n)
3369+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3370 *
3371 * Purpose:
3372 *
3373@@ -93,11 +93,11 @@
3374 .text
3375
3376 ENTRY(__copy_to_user_std)
3377-WEAK(__copy_to_user)
3378+WEAK(___copy_to_user)
3379
3380 #include "copy_template.S"
3381
3382-ENDPROC(__copy_to_user)
3383+ENDPROC(___copy_to_user)
3384 ENDPROC(__copy_to_user_std)
3385
3386 .pushsection .fixup,"ax"
3387diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3388index 7d08b43..f7ca7ea 100644
3389--- a/arch/arm/lib/csumpartialcopyuser.S
3390+++ b/arch/arm/lib/csumpartialcopyuser.S
3391@@ -57,8 +57,8 @@
3392 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3393 */
3394
3395-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3396-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3397+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3398+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3399
3400 #include "csumpartialcopygeneric.S"
3401
3402diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3403index 312d43e..21d2322 100644
3404--- a/arch/arm/lib/delay.c
3405+++ b/arch/arm/lib/delay.c
3406@@ -29,7 +29,7 @@
3407 /*
3408 * Default to the loop-based delay implementation.
3409 */
3410-struct arm_delay_ops arm_delay_ops = {
3411+struct arm_delay_ops arm_delay_ops __read_only = {
3412 .delay = __loop_delay,
3413 .const_udelay = __loop_const_udelay,
3414 .udelay = __loop_udelay,
3415diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3416index 3e58d71..029817c 100644
3417--- a/arch/arm/lib/uaccess_with_memcpy.c
3418+++ b/arch/arm/lib/uaccess_with_memcpy.c
3419@@ -136,7 +136,7 @@ out:
3420 }
3421
3422 unsigned long
3423-__copy_to_user(void __user *to, const void *from, unsigned long n)
3424+___copy_to_user(void __user *to, const void *from, unsigned long n)
3425 {
3426 /*
3427 * This test is stubbed out of the main function above to keep
3428@@ -190,7 +190,7 @@ out:
3429 return n;
3430 }
3431
3432-unsigned long __clear_user(void __user *addr, unsigned long n)
3433+unsigned long ___clear_user(void __user *addr, unsigned long n)
3434 {
3435 /* See rational for this in __copy_to_user() above. */
3436 if (n < 64)
3437diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3438index ce25e85..3dd7850 100644
3439--- a/arch/arm/mach-at91/setup.c
3440+++ b/arch/arm/mach-at91/setup.c
3441@@ -57,7 +57,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3442
3443 desc->pfn = __phys_to_pfn(base);
3444 desc->length = length;
3445- desc->type = MT_MEMORY_RWX_NONCACHED;
3446+ desc->type = MT_MEMORY_RW_NONCACHED;
3447
3448 pr_info("sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3449 base, length, desc->virtual);
3450diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3451index f8e7dcd..17ee921 100644
3452--- a/arch/arm/mach-exynos/suspend.c
3453+++ b/arch/arm/mach-exynos/suspend.c
3454@@ -18,6 +18,7 @@
3455 #include <linux/syscore_ops.h>
3456 #include <linux/cpu_pm.h>
3457 #include <linux/io.h>
3458+#include <linux/irq.h>
3459 #include <linux/irqchip/arm-gic.h>
3460 #include <linux/err.h>
3461 #include <linux/regulator/machine.h>
3462@@ -558,8 +559,10 @@ void __init exynos_pm_init(void)
3463 tmp |= pm_data->wake_disable_mask;
3464 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3465
3466- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3467- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3468+ pax_open_kernel();
3469+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3470+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3471+ pax_close_kernel();
3472
3473 register_syscore_ops(&exynos_pm_syscore_ops);
3474 suspend_set_ops(&exynos_suspend_ops);
3475diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3476index 7f352de..6dc0929 100644
3477--- a/arch/arm/mach-keystone/keystone.c
3478+++ b/arch/arm/mach-keystone/keystone.c
3479@@ -27,7 +27,7 @@
3480
3481 #include "keystone.h"
3482
3483-static struct notifier_block platform_nb;
3484+static notifier_block_no_const platform_nb;
3485 static unsigned long keystone_dma_pfn_offset __read_mostly;
3486
3487 static int keystone_platform_notifier(struct notifier_block *nb,
3488diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3489index ccef880..5dfad80 100644
3490--- a/arch/arm/mach-mvebu/coherency.c
3491+++ b/arch/arm/mach-mvebu/coherency.c
3492@@ -164,7 +164,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3493
3494 /*
3495 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3496- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3497+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3498 * is needed as a workaround for a deadlock issue between the PCIe
3499 * interface and the cache controller.
3500 */
3501@@ -177,7 +177,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3502 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3503
3504 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3505- mtype = MT_UNCACHED;
3506+ mtype = MT_UNCACHED_RW;
3507
3508 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3509 }
3510diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3511index b6443a4..20a0b74 100644
3512--- a/arch/arm/mach-omap2/board-n8x0.c
3513+++ b/arch/arm/mach-omap2/board-n8x0.c
3514@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3515 }
3516 #endif
3517
3518-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3519+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3520 .late_init = n8x0_menelaus_late_init,
3521 };
3522
3523diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3524index 79f49d9..70bf184 100644
3525--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3526+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3527@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3528 void (*resume)(void);
3529 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3530 void (*hotplug_restart)(void);
3531-};
3532+} __no_const;
3533
3534 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3535 static struct powerdomain *mpuss_pd;
3536@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3537 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3538 {}
3539
3540-struct cpu_pm_ops omap_pm_ops = {
3541+static struct cpu_pm_ops omap_pm_ops __read_only = {
3542 .finish_suspend = default_finish_suspend,
3543 .resume = dummy_cpu_resume,
3544 .scu_prepare = dummy_scu_prepare,
3545diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3546index 5305ec7..6d74045 100644
3547--- a/arch/arm/mach-omap2/omap-smp.c
3548+++ b/arch/arm/mach-omap2/omap-smp.c
3549@@ -19,6 +19,7 @@
3550 #include <linux/device.h>
3551 #include <linux/smp.h>
3552 #include <linux/io.h>
3553+#include <linux/irq.h>
3554 #include <linux/irqchip/arm-gic.h>
3555
3556 #include <asm/smp_scu.h>
3557diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3558index f961c46..4a453dc 100644
3559--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3560+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3561@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3562 return NOTIFY_OK;
3563 }
3564
3565-static struct notifier_block __refdata irq_hotplug_notifier = {
3566+static struct notifier_block irq_hotplug_notifier = {
3567 .notifier_call = irq_cpu_hotplug_notify,
3568 };
3569
3570diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3571index be9541e..821805f 100644
3572--- a/arch/arm/mach-omap2/omap_device.c
3573+++ b/arch/arm/mach-omap2/omap_device.c
3574@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3575 struct platform_device __init *omap_device_build(const char *pdev_name,
3576 int pdev_id,
3577 struct omap_hwmod *oh,
3578- void *pdata, int pdata_len)
3579+ const void *pdata, int pdata_len)
3580 {
3581 struct omap_hwmod *ohs[] = { oh };
3582
3583@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3584 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3585 int pdev_id,
3586 struct omap_hwmod **ohs,
3587- int oh_cnt, void *pdata,
3588+ int oh_cnt, const void *pdata,
3589 int pdata_len)
3590 {
3591 int ret = -ENOMEM;
3592diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3593index 78c02b3..c94109a 100644
3594--- a/arch/arm/mach-omap2/omap_device.h
3595+++ b/arch/arm/mach-omap2/omap_device.h
3596@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3597 /* Core code interface */
3598
3599 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3600- struct omap_hwmod *oh, void *pdata,
3601+ struct omap_hwmod *oh, const void *pdata,
3602 int pdata_len);
3603
3604 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3605 struct omap_hwmod **oh, int oh_cnt,
3606- void *pdata, int pdata_len);
3607+ const void *pdata, int pdata_len);
3608
3609 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3610 struct omap_hwmod **ohs, int oh_cnt);
3611diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3612index 9025fff..3555702 100644
3613--- a/arch/arm/mach-omap2/omap_hwmod.c
3614+++ b/arch/arm/mach-omap2/omap_hwmod.c
3615@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3616 int (*init_clkdm)(struct omap_hwmod *oh);
3617 void (*update_context_lost)(struct omap_hwmod *oh);
3618 int (*get_context_lost)(struct omap_hwmod *oh);
3619-};
3620+} __no_const;
3621
3622 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3623-static struct omap_hwmod_soc_ops soc_ops;
3624+static struct omap_hwmod_soc_ops soc_ops __read_only;
3625
3626 /* omap_hwmod_list contains all registered struct omap_hwmods */
3627 static LIST_HEAD(omap_hwmod_list);
3628diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3629index 95fee54..cfa9cf1 100644
3630--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3631+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3632@@ -10,6 +10,7 @@
3633
3634 #include <linux/kernel.h>
3635 #include <linux/init.h>
3636+#include <asm/pgtable.h>
3637
3638 #include "powerdomain.h"
3639
3640@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3641
3642 void __init am43xx_powerdomains_init(void)
3643 {
3644- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3645+ pax_open_kernel();
3646+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3647+ pax_close_kernel();
3648 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3649 pwrdm_register_pwrdms(powerdomains_am43xx);
3650 pwrdm_complete_init();
3651diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3652index ff0a68c..b312aa0 100644
3653--- a/arch/arm/mach-omap2/wd_timer.c
3654+++ b/arch/arm/mach-omap2/wd_timer.c
3655@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3656 struct omap_hwmod *oh;
3657 char *oh_name = "wd_timer2";
3658 char *dev_name = "omap_wdt";
3659- struct omap_wd_timer_platform_data pdata;
3660+ static struct omap_wd_timer_platform_data pdata = {
3661+ .read_reset_sources = prm_read_reset_sources
3662+ };
3663
3664 if (!cpu_class_is_omap2() || of_have_populated_dt())
3665 return 0;
3666@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3667 return -EINVAL;
3668 }
3669
3670- pdata.read_reset_sources = prm_read_reset_sources;
3671-
3672 pdev = omap_device_build(dev_name, id, oh, &pdata,
3673 sizeof(struct omap_wd_timer_platform_data));
3674 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3675diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3676index 4f25a7c..a81be85 100644
3677--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3678+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3679@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3680 bool entered_lp2 = false;
3681
3682 if (tegra_pending_sgi())
3683- ACCESS_ONCE(abort_flag) = true;
3684+ ACCESS_ONCE_RW(abort_flag) = true;
3685
3686 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3687
3688diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3689index ab95f53..4b977a7 100644
3690--- a/arch/arm/mach-tegra/irq.c
3691+++ b/arch/arm/mach-tegra/irq.c
3692@@ -20,6 +20,7 @@
3693 #include <linux/cpu_pm.h>
3694 #include <linux/interrupt.h>
3695 #include <linux/io.h>
3696+#include <linux/irq.h>
3697 #include <linux/irqchip/arm-gic.h>
3698 #include <linux/irq.h>
3699 #include <linux/kernel.h>
3700diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3701index 2cb587b..6ddfebf 100644
3702--- a/arch/arm/mach-ux500/pm.c
3703+++ b/arch/arm/mach-ux500/pm.c
3704@@ -10,6 +10,7 @@
3705 */
3706
3707 #include <linux/kernel.h>
3708+#include <linux/irq.h>
3709 #include <linux/irqchip/arm-gic.h>
3710 #include <linux/delay.h>
3711 #include <linux/io.h>
3712diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3713index 2dea8b5..6499da2 100644
3714--- a/arch/arm/mach-ux500/setup.h
3715+++ b/arch/arm/mach-ux500/setup.h
3716@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3717 .type = MT_DEVICE, \
3718 }
3719
3720-#define __MEM_DEV_DESC(x, sz) { \
3721- .virtual = IO_ADDRESS(x), \
3722- .pfn = __phys_to_pfn(x), \
3723- .length = sz, \
3724- .type = MT_MEMORY_RWX, \
3725-}
3726-
3727 extern struct smp_operations ux500_smp_ops;
3728 extern void ux500_cpu_die(unsigned int cpu);
3729
3730diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3731index 52d768f..5f93180 100644
3732--- a/arch/arm/mach-zynq/platsmp.c
3733+++ b/arch/arm/mach-zynq/platsmp.c
3734@@ -24,6 +24,7 @@
3735 #include <linux/io.h>
3736 #include <asm/cacheflush.h>
3737 #include <asm/smp_scu.h>
3738+#include <linux/irq.h>
3739 #include <linux/irqchip/arm-gic.h>
3740 #include "common.h"
3741
3742diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3743index c43c714..4f8f7b9 100644
3744--- a/arch/arm/mm/Kconfig
3745+++ b/arch/arm/mm/Kconfig
3746@@ -446,6 +446,7 @@ config CPU_32v5
3747
3748 config CPU_32v6
3749 bool
3750+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3751 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3752
3753 config CPU_32v6K
3754@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3755
3756 config CPU_USE_DOMAINS
3757 bool
3758+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3759 help
3760 This option enables or disables the use of domain switching
3761 via the set_fs() function.
3762@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3763
3764 config KUSER_HELPERS
3765 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3766- depends on MMU
3767+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3768 default y
3769 help
3770 Warning: disabling this option may break user programs.
3771@@ -812,7 +814,7 @@ config KUSER_HELPERS
3772 See Documentation/arm/kernel_user_helpers.txt for details.
3773
3774 However, the fixed address nature of these helpers can be used
3775- by ROP (return orientated programming) authors when creating
3776+ by ROP (Return Oriented Programming) authors when creating
3777 exploits.
3778
3779 If all of the binaries and libraries which run on your platform
3780diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3781index 2c0c541..4585df9 100644
3782--- a/arch/arm/mm/alignment.c
3783+++ b/arch/arm/mm/alignment.c
3784@@ -216,10 +216,12 @@ union offset_union {
3785 #define __get16_unaligned_check(ins,val,addr) \
3786 do { \
3787 unsigned int err = 0, v, a = addr; \
3788+ pax_open_userland(); \
3789 __get8_unaligned_check(ins,v,a,err); \
3790 val = v << ((BE) ? 8 : 0); \
3791 __get8_unaligned_check(ins,v,a,err); \
3792 val |= v << ((BE) ? 0 : 8); \
3793+ pax_close_userland(); \
3794 if (err) \
3795 goto fault; \
3796 } while (0)
3797@@ -233,6 +235,7 @@ union offset_union {
3798 #define __get32_unaligned_check(ins,val,addr) \
3799 do { \
3800 unsigned int err = 0, v, a = addr; \
3801+ pax_open_userland(); \
3802 __get8_unaligned_check(ins,v,a,err); \
3803 val = v << ((BE) ? 24 : 0); \
3804 __get8_unaligned_check(ins,v,a,err); \
3805@@ -241,6 +244,7 @@ union offset_union {
3806 val |= v << ((BE) ? 8 : 16); \
3807 __get8_unaligned_check(ins,v,a,err); \
3808 val |= v << ((BE) ? 0 : 24); \
3809+ pax_close_userland(); \
3810 if (err) \
3811 goto fault; \
3812 } while (0)
3813@@ -254,6 +258,7 @@ union offset_union {
3814 #define __put16_unaligned_check(ins,val,addr) \
3815 do { \
3816 unsigned int err = 0, v = val, a = addr; \
3817+ pax_open_userland(); \
3818 __asm__( FIRST_BYTE_16 \
3819 ARM( "1: "ins" %1, [%2], #1\n" ) \
3820 THUMB( "1: "ins" %1, [%2]\n" ) \
3821@@ -273,6 +278,7 @@ union offset_union {
3822 " .popsection\n" \
3823 : "=r" (err), "=&r" (v), "=&r" (a) \
3824 : "0" (err), "1" (v), "2" (a)); \
3825+ pax_close_userland(); \
3826 if (err) \
3827 goto fault; \
3828 } while (0)
3829@@ -286,6 +292,7 @@ union offset_union {
3830 #define __put32_unaligned_check(ins,val,addr) \
3831 do { \
3832 unsigned int err = 0, v = val, a = addr; \
3833+ pax_open_userland(); \
3834 __asm__( FIRST_BYTE_32 \
3835 ARM( "1: "ins" %1, [%2], #1\n" ) \
3836 THUMB( "1: "ins" %1, [%2]\n" ) \
3837@@ -315,6 +322,7 @@ union offset_union {
3838 " .popsection\n" \
3839 : "=r" (err), "=&r" (v), "=&r" (a) \
3840 : "0" (err), "1" (v), "2" (a)); \
3841+ pax_close_userland(); \
3842 if (err) \
3843 goto fault; \
3844 } while (0)
3845diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3846index 5e65ca8..879e7b3 100644
3847--- a/arch/arm/mm/cache-l2x0.c
3848+++ b/arch/arm/mm/cache-l2x0.c
3849@@ -42,7 +42,7 @@ struct l2c_init_data {
3850 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
3851 void (*save)(void __iomem *);
3852 struct outer_cache_fns outer_cache;
3853-};
3854+} __do_const;
3855
3856 #define CACHE_LINE_SIZE 32
3857
3858diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3859index 845769e..4278fd7 100644
3860--- a/arch/arm/mm/context.c
3861+++ b/arch/arm/mm/context.c
3862@@ -43,7 +43,7 @@
3863 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3864
3865 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3866-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3867+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3868 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3869
3870 static DEFINE_PER_CPU(atomic64_t, active_asids);
3871@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3872 {
3873 static u32 cur_idx = 1;
3874 u64 asid = atomic64_read(&mm->context.id);
3875- u64 generation = atomic64_read(&asid_generation);
3876+ u64 generation = atomic64_read_unchecked(&asid_generation);
3877
3878 if (asid != 0) {
3879 /*
3880@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3881 */
3882 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3883 if (asid == NUM_USER_ASIDS) {
3884- generation = atomic64_add_return(ASID_FIRST_VERSION,
3885+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3886 &asid_generation);
3887 flush_context(cpu);
3888 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3889@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3890 cpu_set_reserved_ttbr0();
3891
3892 asid = atomic64_read(&mm->context.id);
3893- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3894+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3895 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3896 goto switch_mm_fastpath;
3897
3898 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3899 /* Check that our ASID belongs to the current generation. */
3900 asid = atomic64_read(&mm->context.id);
3901- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3902+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3903 asid = new_context(mm, cpu);
3904 atomic64_set(&mm->context.id, asid);
3905 }
3906diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3907index a982dc3..2d9f5f7 100644
3908--- a/arch/arm/mm/fault.c
3909+++ b/arch/arm/mm/fault.c
3910@@ -25,6 +25,7 @@
3911 #include <asm/system_misc.h>
3912 #include <asm/system_info.h>
3913 #include <asm/tlbflush.h>
3914+#include <asm/sections.h>
3915
3916 #include "fault.h"
3917
3918@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3919 if (fixup_exception(regs))
3920 return;
3921
3922+#ifdef CONFIG_PAX_MEMORY_UDEREF
3923+ if (addr < TASK_SIZE) {
3924+ if (current->signal->curr_ip)
3925+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3926+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3927+ else
3928+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3929+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3930+ }
3931+#endif
3932+
3933+#ifdef CONFIG_PAX_KERNEXEC
3934+ if ((fsr & FSR_WRITE) &&
3935+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3936+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3937+ {
3938+ if (current->signal->curr_ip)
3939+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3940+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3941+ else
3942+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3943+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3944+ }
3945+#endif
3946+
3947 /*
3948 * No handler, we'll have to terminate things with extreme prejudice.
3949 */
3950@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3951 }
3952 #endif
3953
3954+#ifdef CONFIG_PAX_PAGEEXEC
3955+ if (fsr & FSR_LNX_PF) {
3956+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3957+ do_group_exit(SIGKILL);
3958+ }
3959+#endif
3960+
3961 tsk->thread.address = addr;
3962 tsk->thread.error_code = fsr;
3963 tsk->thread.trap_no = 14;
3964@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3965 }
3966 #endif /* CONFIG_MMU */
3967
3968+#ifdef CONFIG_PAX_PAGEEXEC
3969+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3970+{
3971+ long i;
3972+
3973+ printk(KERN_ERR "PAX: bytes at PC: ");
3974+ for (i = 0; i < 20; i++) {
3975+ unsigned char c;
3976+ if (get_user(c, (__force unsigned char __user *)pc+i))
3977+ printk(KERN_CONT "?? ");
3978+ else
3979+ printk(KERN_CONT "%02x ", c);
3980+ }
3981+ printk("\n");
3982+
3983+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3984+ for (i = -1; i < 20; i++) {
3985+ unsigned long c;
3986+ if (get_user(c, (__force unsigned long __user *)sp+i))
3987+ printk(KERN_CONT "???????? ");
3988+ else
3989+ printk(KERN_CONT "%08lx ", c);
3990+ }
3991+ printk("\n");
3992+}
3993+#endif
3994+
3995 /*
3996 * First Level Translation Fault Handler
3997 *
3998@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3999 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4000 struct siginfo info;
4001
4002+#ifdef CONFIG_PAX_MEMORY_UDEREF
4003+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4004+ if (current->signal->curr_ip)
4005+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4006+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4007+ else
4008+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4009+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4010+ goto die;
4011+ }
4012+#endif
4013+
4014 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4015 return;
4016
4017+die:
4018 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4019 inf->name, fsr, addr);
4020
4021@@ -573,15 +646,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4022 ifsr_info[nr].name = name;
4023 }
4024
4025+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4026+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4027+
4028 asmlinkage void __exception
4029 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4030 {
4031 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4032 struct siginfo info;
4033+ unsigned long pc = instruction_pointer(regs);
4034+
4035+ if (user_mode(regs)) {
4036+ unsigned long sigpage = current->mm->context.sigpage;
4037+
4038+ if (sigpage <= pc && pc < sigpage + 7*4) {
4039+ if (pc < sigpage + 3*4)
4040+ sys_sigreturn(regs);
4041+ else
4042+ sys_rt_sigreturn(regs);
4043+ return;
4044+ }
4045+ if (pc == 0xffff0f60UL) {
4046+ /*
4047+ * PaX: __kuser_cmpxchg64 emulation
4048+ */
4049+ // TODO
4050+ //regs->ARM_pc = regs->ARM_lr;
4051+ //return;
4052+ }
4053+ if (pc == 0xffff0fa0UL) {
4054+ /*
4055+ * PaX: __kuser_memory_barrier emulation
4056+ */
4057+ // dmb(); implied by the exception
4058+ regs->ARM_pc = regs->ARM_lr;
4059+ return;
4060+ }
4061+ if (pc == 0xffff0fc0UL) {
4062+ /*
4063+ * PaX: __kuser_cmpxchg emulation
4064+ */
4065+ // TODO
4066+ //long new;
4067+ //int op;
4068+
4069+ //op = FUTEX_OP_SET << 28;
4070+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4071+ //regs->ARM_r0 = old != new;
4072+ //regs->ARM_pc = regs->ARM_lr;
4073+ //return;
4074+ }
4075+ if (pc == 0xffff0fe0UL) {
4076+ /*
4077+ * PaX: __kuser_get_tls emulation
4078+ */
4079+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4080+ regs->ARM_pc = regs->ARM_lr;
4081+ return;
4082+ }
4083+ }
4084+
4085+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4086+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4087+ if (current->signal->curr_ip)
4088+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4089+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4090+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4091+ else
4092+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4093+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4094+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4095+ goto die;
4096+ }
4097+#endif
4098+
4099+#ifdef CONFIG_PAX_REFCOUNT
4100+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4101+#ifdef CONFIG_THUMB2_KERNEL
4102+ unsigned short bkpt;
4103+
4104+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4105+#else
4106+ unsigned int bkpt;
4107+
4108+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4109+#endif
4110+ current->thread.error_code = ifsr;
4111+ current->thread.trap_no = 0;
4112+ pax_report_refcount_overflow(regs);
4113+ fixup_exception(regs);
4114+ return;
4115+ }
4116+ }
4117+#endif
4118
4119 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4120 return;
4121
4122+die:
4123 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4124 inf->name, ifsr, addr);
4125
4126diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4127index cf08bdf..772656c 100644
4128--- a/arch/arm/mm/fault.h
4129+++ b/arch/arm/mm/fault.h
4130@@ -3,6 +3,7 @@
4131
4132 /*
4133 * Fault status register encodings. We steal bit 31 for our own purposes.
4134+ * Set when the FSR value is from an instruction fault.
4135 */
4136 #define FSR_LNX_PF (1 << 31)
4137 #define FSR_WRITE (1 << 11)
4138@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4139 }
4140 #endif
4141
4142+/* valid for LPAE and !LPAE */
4143+static inline int is_xn_fault(unsigned int fsr)
4144+{
4145+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4146+}
4147+
4148+static inline int is_domain_fault(unsigned int fsr)
4149+{
4150+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4151+}
4152+
4153 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4154 unsigned long search_exception_table(unsigned long addr);
4155
4156diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4157index 2495c8c..415b7fc 100644
4158--- a/arch/arm/mm/init.c
4159+++ b/arch/arm/mm/init.c
4160@@ -758,7 +758,46 @@ void free_tcmmem(void)
4161 {
4162 #ifdef CONFIG_HAVE_TCM
4163 extern char __tcm_start, __tcm_end;
4164+#endif
4165
4166+#ifdef CONFIG_PAX_KERNEXEC
4167+ unsigned long addr;
4168+ pgd_t *pgd;
4169+ pud_t *pud;
4170+ pmd_t *pmd;
4171+ int cpu_arch = cpu_architecture();
4172+ unsigned int cr = get_cr();
4173+
4174+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4175+ /* make pages tables, etc before .text NX */
4176+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4177+ pgd = pgd_offset_k(addr);
4178+ pud = pud_offset(pgd, addr);
4179+ pmd = pmd_offset(pud, addr);
4180+ __section_update(pmd, addr, PMD_SECT_XN);
4181+ }
4182+ /* make init NX */
4183+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4184+ pgd = pgd_offset_k(addr);
4185+ pud = pud_offset(pgd, addr);
4186+ pmd = pmd_offset(pud, addr);
4187+ __section_update(pmd, addr, PMD_SECT_XN);
4188+ }
4189+ /* make kernel code/rodata RX */
4190+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4191+ pgd = pgd_offset_k(addr);
4192+ pud = pud_offset(pgd, addr);
4193+ pmd = pmd_offset(pud, addr);
4194+#ifdef CONFIG_ARM_LPAE
4195+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4196+#else
4197+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4198+#endif
4199+ }
4200+ }
4201+#endif
4202+
4203+#ifdef CONFIG_HAVE_TCM
4204 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4205 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4206 #endif
4207diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4208index d1e5ad7..84dcbf2 100644
4209--- a/arch/arm/mm/ioremap.c
4210+++ b/arch/arm/mm/ioremap.c
4211@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4212 unsigned int mtype;
4213
4214 if (cached)
4215- mtype = MT_MEMORY_RWX;
4216+ mtype = MT_MEMORY_RX;
4217 else
4218- mtype = MT_MEMORY_RWX_NONCACHED;
4219+ mtype = MT_MEMORY_RX_NONCACHED;
4220
4221 return __arm_ioremap_caller(phys_addr, size, mtype,
4222 __builtin_return_address(0));
4223diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4224index 5e85ed3..b10a7ed 100644
4225--- a/arch/arm/mm/mmap.c
4226+++ b/arch/arm/mm/mmap.c
4227@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4228 struct vm_area_struct *vma;
4229 int do_align = 0;
4230 int aliasing = cache_is_vipt_aliasing();
4231+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4232 struct vm_unmapped_area_info info;
4233
4234 /*
4235@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4236 if (len > TASK_SIZE)
4237 return -ENOMEM;
4238
4239+#ifdef CONFIG_PAX_RANDMMAP
4240+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4241+#endif
4242+
4243 if (addr) {
4244 if (do_align)
4245 addr = COLOUR_ALIGN(addr, pgoff);
4246@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4247 addr = PAGE_ALIGN(addr);
4248
4249 vma = find_vma(mm, addr);
4250- if (TASK_SIZE - len >= addr &&
4251- (!vma || addr + len <= vma->vm_start))
4252+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4253 return addr;
4254 }
4255
4256@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4257 info.high_limit = TASK_SIZE;
4258 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4259 info.align_offset = pgoff << PAGE_SHIFT;
4260+ info.threadstack_offset = offset;
4261 return vm_unmapped_area(&info);
4262 }
4263
4264@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4265 unsigned long addr = addr0;
4266 int do_align = 0;
4267 int aliasing = cache_is_vipt_aliasing();
4268+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4269 struct vm_unmapped_area_info info;
4270
4271 /*
4272@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4273 return addr;
4274 }
4275
4276+#ifdef CONFIG_PAX_RANDMMAP
4277+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4278+#endif
4279+
4280 /* requesting a specific address */
4281 if (addr) {
4282 if (do_align)
4283@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4284 else
4285 addr = PAGE_ALIGN(addr);
4286 vma = find_vma(mm, addr);
4287- if (TASK_SIZE - len >= addr &&
4288- (!vma || addr + len <= vma->vm_start))
4289+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4290 return addr;
4291 }
4292
4293@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4294 info.high_limit = mm->mmap_base;
4295 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4296 info.align_offset = pgoff << PAGE_SHIFT;
4297+ info.threadstack_offset = offset;
4298 addr = vm_unmapped_area(&info);
4299
4300 /*
4301@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4302 {
4303 unsigned long random_factor = 0UL;
4304
4305+#ifdef CONFIG_PAX_RANDMMAP
4306+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4307+#endif
4308+
4309 /* 8 bits of randomness in 20 address space bits */
4310 if ((current->flags & PF_RANDOMIZE) &&
4311 !(current->personality & ADDR_NO_RANDOMIZE))
4312@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4313
4314 if (mmap_is_legacy()) {
4315 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4316+
4317+#ifdef CONFIG_PAX_RANDMMAP
4318+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4319+ mm->mmap_base += mm->delta_mmap;
4320+#endif
4321+
4322 mm->get_unmapped_area = arch_get_unmapped_area;
4323 } else {
4324 mm->mmap_base = mmap_base(random_factor);
4325+
4326+#ifdef CONFIG_PAX_RANDMMAP
4327+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4328+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4329+#endif
4330+
4331 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4332 }
4333 }
4334diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4335index 4e6ef89..21c27f2 100644
4336--- a/arch/arm/mm/mmu.c
4337+++ b/arch/arm/mm/mmu.c
4338@@ -41,6 +41,22 @@
4339 #include "mm.h"
4340 #include "tcm.h"
4341
4342+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4343+void modify_domain(unsigned int dom, unsigned int type)
4344+{
4345+ struct thread_info *thread = current_thread_info();
4346+ unsigned int domain = thread->cpu_domain;
4347+ /*
4348+ * DOMAIN_MANAGER might be defined to some other value,
4349+ * use the arch-defined constant
4350+ */
4351+ domain &= ~domain_val(dom, 3);
4352+ thread->cpu_domain = domain | domain_val(dom, type);
4353+ set_domain(thread->cpu_domain);
4354+}
4355+EXPORT_SYMBOL(modify_domain);
4356+#endif
4357+
4358 /*
4359 * empty_zero_page is a special page that is used for
4360 * zero-initialized data and COW.
4361@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4362 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4363 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4364
4365-static struct mem_type mem_types[] = {
4366+#ifdef CONFIG_PAX_KERNEXEC
4367+#define L_PTE_KERNEXEC L_PTE_RDONLY
4368+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4369+#else
4370+#define L_PTE_KERNEXEC L_PTE_DIRTY
4371+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4372+#endif
4373+
4374+static struct mem_type mem_types[] __read_only = {
4375 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4376 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4377 L_PTE_SHARED,
4378@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4379 .prot_sect = PROT_SECT_DEVICE,
4380 .domain = DOMAIN_IO,
4381 },
4382- [MT_UNCACHED] = {
4383+ [MT_UNCACHED_RW] = {
4384 .prot_pte = PROT_PTE_DEVICE,
4385 .prot_l1 = PMD_TYPE_TABLE,
4386 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4387 .domain = DOMAIN_IO,
4388 },
4389- [MT_CACHECLEAN] = {
4390- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4391+ [MT_CACHECLEAN_RO] = {
4392+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4393 .domain = DOMAIN_KERNEL,
4394 },
4395 #ifndef CONFIG_ARM_LPAE
4396- [MT_MINICLEAN] = {
4397- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4398+ [MT_MINICLEAN_RO] = {
4399+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4400 .domain = DOMAIN_KERNEL,
4401 },
4402 #endif
4403@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4404 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4405 L_PTE_RDONLY,
4406 .prot_l1 = PMD_TYPE_TABLE,
4407- .domain = DOMAIN_USER,
4408+ .domain = DOMAIN_VECTORS,
4409 },
4410 [MT_HIGH_VECTORS] = {
4411 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4412 L_PTE_USER | L_PTE_RDONLY,
4413 .prot_l1 = PMD_TYPE_TABLE,
4414- .domain = DOMAIN_USER,
4415+ .domain = DOMAIN_VECTORS,
4416 },
4417- [MT_MEMORY_RWX] = {
4418+ [__MT_MEMORY_RWX] = {
4419 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4420 .prot_l1 = PMD_TYPE_TABLE,
4421 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4422@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4423 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4424 .domain = DOMAIN_KERNEL,
4425 },
4426- [MT_ROM] = {
4427- .prot_sect = PMD_TYPE_SECT,
4428+ [MT_MEMORY_RX] = {
4429+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4430+ .prot_l1 = PMD_TYPE_TABLE,
4431+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4432+ .domain = DOMAIN_KERNEL,
4433+ },
4434+ [MT_ROM_RX] = {
4435+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4436 .domain = DOMAIN_KERNEL,
4437 },
4438- [MT_MEMORY_RWX_NONCACHED] = {
4439+ [MT_MEMORY_RW_NONCACHED] = {
4440 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4441 L_PTE_MT_BUFFERABLE,
4442 .prot_l1 = PMD_TYPE_TABLE,
4443 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4444 .domain = DOMAIN_KERNEL,
4445 },
4446+ [MT_MEMORY_RX_NONCACHED] = {
4447+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4448+ L_PTE_MT_BUFFERABLE,
4449+ .prot_l1 = PMD_TYPE_TABLE,
4450+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4451+ .domain = DOMAIN_KERNEL,
4452+ },
4453 [MT_MEMORY_RW_DTCM] = {
4454 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4455 L_PTE_XN,
4456@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4457 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4458 .domain = DOMAIN_KERNEL,
4459 },
4460- [MT_MEMORY_RWX_ITCM] = {
4461- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4462+ [MT_MEMORY_RX_ITCM] = {
4463+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4464 .prot_l1 = PMD_TYPE_TABLE,
4465+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4466 .domain = DOMAIN_KERNEL,
4467 },
4468 [MT_MEMORY_RW_SO] = {
4469@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4470 * Mark cache clean areas and XIP ROM read only
4471 * from SVC mode and no access from userspace.
4472 */
4473- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4474- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4475- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4476+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4477+#ifdef CONFIG_PAX_KERNEXEC
4478+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4479+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4480+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4481+#endif
4482+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4483+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4484 #endif
4485
4486 /*
4487@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4488 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4489 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4490 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4491- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4492- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4493+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4494+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4495 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4496 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4497+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4498+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4499 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4500- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4501- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4502+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4503+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4504+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4505+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4506 }
4507 }
4508
4509@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4510 if (cpu_arch >= CPU_ARCH_ARMv6) {
4511 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4512 /* Non-cacheable Normal is XCB = 001 */
4513- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4514+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4515+ PMD_SECT_BUFFERED;
4516+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4517 PMD_SECT_BUFFERED;
4518 } else {
4519 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4520- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4521+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4522+ PMD_SECT_TEX(1);
4523+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4524 PMD_SECT_TEX(1);
4525 }
4526 } else {
4527- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4528+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4529+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4530 }
4531
4532 #ifdef CONFIG_ARM_LPAE
4533@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4534 user_pgprot |= PTE_EXT_PXN;
4535 #endif
4536
4537+ user_pgprot |= __supported_pte_mask;
4538+
4539 for (i = 0; i < 16; i++) {
4540 pteval_t v = pgprot_val(protection_map[i]);
4541 protection_map[i] = __pgprot(v | user_pgprot);
4542@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4543
4544 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4545 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4546- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4547- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4548+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4549+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4550 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4551 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4552+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4553+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4554 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4555- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4556- mem_types[MT_ROM].prot_sect |= cp->pmd;
4557+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4558+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4559+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4560
4561 switch (cp->pmd) {
4562 case PMD_SECT_WT:
4563- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4564+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4565 break;
4566 case PMD_SECT_WB:
4567 case PMD_SECT_WBWA:
4568- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4569+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4570 break;
4571 }
4572 pr_info("Memory policy: %sData cache %s\n",
4573@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4574 return;
4575 }
4576
4577- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4578+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4579 md->virtual >= PAGE_OFFSET &&
4580 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4581 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4582@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4583 * called function. This means you can't use any function or debugging
4584 * method which may touch any device, otherwise the kernel _will_ crash.
4585 */
4586+
4587+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4588+
4589 static void __init devicemaps_init(const struct machine_desc *mdesc)
4590 {
4591 struct map_desc map;
4592 unsigned long addr;
4593- void *vectors;
4594
4595- /*
4596- * Allocate the vector page early.
4597- */
4598- vectors = early_alloc(PAGE_SIZE * 2);
4599-
4600- early_trap_init(vectors);
4601+ early_trap_init(&vectors);
4602
4603 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4604 pmd_clear(pmd_off_k(addr));
4605@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4606 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4607 map.virtual = MODULES_VADDR;
4608 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4609- map.type = MT_ROM;
4610+ map.type = MT_ROM_RX;
4611 create_mapping(&map);
4612 #endif
4613
4614@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4615 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4616 map.virtual = FLUSH_BASE;
4617 map.length = SZ_1M;
4618- map.type = MT_CACHECLEAN;
4619+ map.type = MT_CACHECLEAN_RO;
4620 create_mapping(&map);
4621 #endif
4622 #ifdef FLUSH_BASE_MINICACHE
4623 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4624 map.virtual = FLUSH_BASE_MINICACHE;
4625 map.length = SZ_1M;
4626- map.type = MT_MINICLEAN;
4627+ map.type = MT_MINICLEAN_RO;
4628 create_mapping(&map);
4629 #endif
4630
4631@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4632 * location (0xffff0000). If we aren't using high-vectors, also
4633 * create a mapping at the low-vectors virtual address.
4634 */
4635- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4636+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4637 map.virtual = 0xffff0000;
4638 map.length = PAGE_SIZE;
4639 #ifdef CONFIG_KUSER_HELPERS
4640@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4641 static void __init map_lowmem(void)
4642 {
4643 struct memblock_region *reg;
4644+#ifndef CONFIG_PAX_KERNEXEC
4645 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4646 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4647+#endif
4648
4649 /* Map all the lowmem memory banks. */
4650 for_each_memblock(memory, reg) {
4651@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4652 if (start >= end)
4653 break;
4654
4655+#ifdef CONFIG_PAX_KERNEXEC
4656+ map.pfn = __phys_to_pfn(start);
4657+ map.virtual = __phys_to_virt(start);
4658+ map.length = end - start;
4659+
4660+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4661+ struct map_desc kernel;
4662+ struct map_desc initmap;
4663+
4664+ /* when freeing initmem we will make this RW */
4665+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4666+ initmap.virtual = (unsigned long)__init_begin;
4667+ initmap.length = _sdata - __init_begin;
4668+ initmap.type = __MT_MEMORY_RWX;
4669+ create_mapping(&initmap);
4670+
4671+ /* when freeing initmem we will make this RX */
4672+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4673+ kernel.virtual = (unsigned long)_stext;
4674+ kernel.length = __init_begin - _stext;
4675+ kernel.type = __MT_MEMORY_RWX;
4676+ create_mapping(&kernel);
4677+
4678+ if (map.virtual < (unsigned long)_stext) {
4679+ map.length = (unsigned long)_stext - map.virtual;
4680+ map.type = __MT_MEMORY_RWX;
4681+ create_mapping(&map);
4682+ }
4683+
4684+ map.pfn = __phys_to_pfn(__pa(_sdata));
4685+ map.virtual = (unsigned long)_sdata;
4686+ map.length = end - __pa(_sdata);
4687+ }
4688+
4689+ map.type = MT_MEMORY_RW;
4690+ create_mapping(&map);
4691+#else
4692 if (end < kernel_x_start) {
4693 map.pfn = __phys_to_pfn(start);
4694 map.virtual = __phys_to_virt(start);
4695 map.length = end - start;
4696- map.type = MT_MEMORY_RWX;
4697+ map.type = __MT_MEMORY_RWX;
4698
4699 create_mapping(&map);
4700 } else if (start >= kernel_x_end) {
4701@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4702 map.pfn = __phys_to_pfn(kernel_x_start);
4703 map.virtual = __phys_to_virt(kernel_x_start);
4704 map.length = kernel_x_end - kernel_x_start;
4705- map.type = MT_MEMORY_RWX;
4706+ map.type = __MT_MEMORY_RWX;
4707
4708 create_mapping(&map);
4709
4710@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4711 create_mapping(&map);
4712 }
4713 }
4714+#endif
4715 }
4716 }
4717
4718diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4719index e1268f9..a9755a7 100644
4720--- a/arch/arm/net/bpf_jit_32.c
4721+++ b/arch/arm/net/bpf_jit_32.c
4722@@ -20,6 +20,7 @@
4723 #include <asm/cacheflush.h>
4724 #include <asm/hwcap.h>
4725 #include <asm/opcodes.h>
4726+#include <asm/pgtable.h>
4727
4728 #include "bpf_jit_32.h"
4729
4730@@ -71,7 +72,11 @@ struct jit_ctx {
4731 #endif
4732 };
4733
4734+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4735+int bpf_jit_enable __read_only;
4736+#else
4737 int bpf_jit_enable __read_mostly;
4738+#endif
4739
4740 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4741 {
4742@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4743 {
4744 u32 *ptr;
4745 /* We are guaranteed to have aligned memory. */
4746+ pax_open_kernel();
4747 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4748 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4749+ pax_close_kernel();
4750 }
4751
4752 static void build_prologue(struct jit_ctx *ctx)
4753diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4754index 5b217f4..c23f40e 100644
4755--- a/arch/arm/plat-iop/setup.c
4756+++ b/arch/arm/plat-iop/setup.c
4757@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4758 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4759 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4760 .length = IOP3XX_PERIPHERAL_SIZE,
4761- .type = MT_UNCACHED,
4762+ .type = MT_UNCACHED_RW,
4763 },
4764 };
4765
4766diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4767index a5bc92d..0bb4730 100644
4768--- a/arch/arm/plat-omap/sram.c
4769+++ b/arch/arm/plat-omap/sram.c
4770@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4771 * Looks like we need to preserve some bootloader code at the
4772 * beginning of SRAM for jumping to flash for reboot to work...
4773 */
4774+ pax_open_kernel();
4775 memset_io(omap_sram_base + omap_sram_skip, 0,
4776 omap_sram_size - omap_sram_skip);
4777+ pax_close_kernel();
4778 }
4779diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4780index ce6d763..cfea917 100644
4781--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4782+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4783@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4784 int (*started)(unsigned ch);
4785 int (*flush)(unsigned ch);
4786 int (*stop)(unsigned ch);
4787-};
4788+} __no_const;
4789
4790 extern void *samsung_dmadev_get_ops(void);
4791 extern void *s3c_dma_get_ops(void);
4792diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4793index a5abb00..9cbca9a 100644
4794--- a/arch/arm64/include/asm/barrier.h
4795+++ b/arch/arm64/include/asm/barrier.h
4796@@ -44,7 +44,7 @@
4797 do { \
4798 compiletime_assert_atomic_type(*p); \
4799 barrier(); \
4800- ACCESS_ONCE(*p) = (v); \
4801+ ACCESS_ONCE_RW(*p) = (v); \
4802 } while (0)
4803
4804 #define smp_load_acquire(p) \
4805diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4806index 09da25b..3ea0d64 100644
4807--- a/arch/arm64/include/asm/percpu.h
4808+++ b/arch/arm64/include/asm/percpu.h
4809@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4810 {
4811 switch (size) {
4812 case 1:
4813- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4814+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4815 break;
4816 case 2:
4817- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4818+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4819 break;
4820 case 4:
4821- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4822+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4823 break;
4824 case 8:
4825- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4826+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4827 break;
4828 default:
4829 BUILD_BUG();
4830diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4831index 3bf8f4e..5dd5491 100644
4832--- a/arch/arm64/include/asm/uaccess.h
4833+++ b/arch/arm64/include/asm/uaccess.h
4834@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4835 flag; \
4836 })
4837
4838+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4839 #define access_ok(type, addr, size) __range_ok(addr, size)
4840 #define user_addr_max get_fs
4841
4842diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4843index c3a58a1..78fbf54 100644
4844--- a/arch/avr32/include/asm/cache.h
4845+++ b/arch/avr32/include/asm/cache.h
4846@@ -1,8 +1,10 @@
4847 #ifndef __ASM_AVR32_CACHE_H
4848 #define __ASM_AVR32_CACHE_H
4849
4850+#include <linux/const.h>
4851+
4852 #define L1_CACHE_SHIFT 5
4853-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4854+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4855
4856 /*
4857 * Memory returned by kmalloc() may be used for DMA, so we must make
4858diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4859index d232888..87c8df1 100644
4860--- a/arch/avr32/include/asm/elf.h
4861+++ b/arch/avr32/include/asm/elf.h
4862@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4863 the loader. We need to make sure that it is out of the way of the program
4864 that it will "exec", and that there is sufficient room for the brk. */
4865
4866-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4867+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4868
4869+#ifdef CONFIG_PAX_ASLR
4870+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4871+
4872+#define PAX_DELTA_MMAP_LEN 15
4873+#define PAX_DELTA_STACK_LEN 15
4874+#endif
4875
4876 /* This yields a mask that user programs can use to figure out what
4877 instruction set this CPU supports. This could be done in user space,
4878diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4879index 479330b..53717a8 100644
4880--- a/arch/avr32/include/asm/kmap_types.h
4881+++ b/arch/avr32/include/asm/kmap_types.h
4882@@ -2,9 +2,9 @@
4883 #define __ASM_AVR32_KMAP_TYPES_H
4884
4885 #ifdef CONFIG_DEBUG_HIGHMEM
4886-# define KM_TYPE_NR 29
4887+# define KM_TYPE_NR 30
4888 #else
4889-# define KM_TYPE_NR 14
4890+# define KM_TYPE_NR 15
4891 #endif
4892
4893 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4894diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4895index d223a8b..69c5210 100644
4896--- a/arch/avr32/mm/fault.c
4897+++ b/arch/avr32/mm/fault.c
4898@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4899
4900 int exception_trace = 1;
4901
4902+#ifdef CONFIG_PAX_PAGEEXEC
4903+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4904+{
4905+ unsigned long i;
4906+
4907+ printk(KERN_ERR "PAX: bytes at PC: ");
4908+ for (i = 0; i < 20; i++) {
4909+ unsigned char c;
4910+ if (get_user(c, (unsigned char *)pc+i))
4911+ printk(KERN_CONT "???????? ");
4912+ else
4913+ printk(KERN_CONT "%02x ", c);
4914+ }
4915+ printk("\n");
4916+}
4917+#endif
4918+
4919 /*
4920 * This routine handles page faults. It determines the address and the
4921 * problem, and then passes it off to one of the appropriate routines.
4922@@ -178,6 +195,16 @@ bad_area:
4923 up_read(&mm->mmap_sem);
4924
4925 if (user_mode(regs)) {
4926+
4927+#ifdef CONFIG_PAX_PAGEEXEC
4928+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4929+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4930+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4931+ do_group_exit(SIGKILL);
4932+ }
4933+ }
4934+#endif
4935+
4936 if (exception_trace && printk_ratelimit())
4937 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4938 "sp %08lx ecr %lu\n",
4939diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4940index 568885a..f8008df 100644
4941--- a/arch/blackfin/include/asm/cache.h
4942+++ b/arch/blackfin/include/asm/cache.h
4943@@ -7,6 +7,7 @@
4944 #ifndef __ARCH_BLACKFIN_CACHE_H
4945 #define __ARCH_BLACKFIN_CACHE_H
4946
4947+#include <linux/const.h>
4948 #include <linux/linkage.h> /* for asmlinkage */
4949
4950 /*
4951@@ -14,7 +15,7 @@
4952 * Blackfin loads 32 bytes for cache
4953 */
4954 #define L1_CACHE_SHIFT 5
4955-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4956+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4957 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4958
4959 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4960diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4961index aea2718..3639a60 100644
4962--- a/arch/cris/include/arch-v10/arch/cache.h
4963+++ b/arch/cris/include/arch-v10/arch/cache.h
4964@@ -1,8 +1,9 @@
4965 #ifndef _ASM_ARCH_CACHE_H
4966 #define _ASM_ARCH_CACHE_H
4967
4968+#include <linux/const.h>
4969 /* Etrax 100LX have 32-byte cache-lines. */
4970-#define L1_CACHE_BYTES 32
4971 #define L1_CACHE_SHIFT 5
4972+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4973
4974 #endif /* _ASM_ARCH_CACHE_H */
4975diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4976index 7caf25d..ee65ac5 100644
4977--- a/arch/cris/include/arch-v32/arch/cache.h
4978+++ b/arch/cris/include/arch-v32/arch/cache.h
4979@@ -1,11 +1,12 @@
4980 #ifndef _ASM_CRIS_ARCH_CACHE_H
4981 #define _ASM_CRIS_ARCH_CACHE_H
4982
4983+#include <linux/const.h>
4984 #include <arch/hwregs/dma.h>
4985
4986 /* A cache-line is 32 bytes. */
4987-#define L1_CACHE_BYTES 32
4988 #define L1_CACHE_SHIFT 5
4989+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4990
4991 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4992
4993diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4994index 102190a..5334cea 100644
4995--- a/arch/frv/include/asm/atomic.h
4996+++ b/arch/frv/include/asm/atomic.h
4997@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
4998 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4999 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5000
5001+#define atomic64_read_unchecked(v) atomic64_read(v)
5002+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5003+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5004+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5005+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5006+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5007+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5008+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5009+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5010+
5011 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5012 {
5013 int c, old;
5014diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5015index 2797163..c2a401df9 100644
5016--- a/arch/frv/include/asm/cache.h
5017+++ b/arch/frv/include/asm/cache.h
5018@@ -12,10 +12,11 @@
5019 #ifndef __ASM_CACHE_H
5020 #define __ASM_CACHE_H
5021
5022+#include <linux/const.h>
5023
5024 /* bytes per L1 cache line */
5025 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5026-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5027+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5028
5029 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5030 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5031diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5032index 43901f2..0d8b865 100644
5033--- a/arch/frv/include/asm/kmap_types.h
5034+++ b/arch/frv/include/asm/kmap_types.h
5035@@ -2,6 +2,6 @@
5036 #ifndef _ASM_KMAP_TYPES_H
5037 #define _ASM_KMAP_TYPES_H
5038
5039-#define KM_TYPE_NR 17
5040+#define KM_TYPE_NR 18
5041
5042 #endif
5043diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5044index 836f147..4cf23f5 100644
5045--- a/arch/frv/mm/elf-fdpic.c
5046+++ b/arch/frv/mm/elf-fdpic.c
5047@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5048 {
5049 struct vm_area_struct *vma;
5050 struct vm_unmapped_area_info info;
5051+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5052
5053 if (len > TASK_SIZE)
5054 return -ENOMEM;
5055@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5056 if (addr) {
5057 addr = PAGE_ALIGN(addr);
5058 vma = find_vma(current->mm, addr);
5059- if (TASK_SIZE - len >= addr &&
5060- (!vma || addr + len <= vma->vm_start))
5061+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5062 goto success;
5063 }
5064
5065@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5066 info.high_limit = (current->mm->start_stack - 0x00200000);
5067 info.align_mask = 0;
5068 info.align_offset = 0;
5069+ info.threadstack_offset = offset;
5070 addr = vm_unmapped_area(&info);
5071 if (!(addr & ~PAGE_MASK))
5072 goto success;
5073diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5074index 69952c1..4fa2908 100644
5075--- a/arch/hexagon/include/asm/cache.h
5076+++ b/arch/hexagon/include/asm/cache.h
5077@@ -21,9 +21,11 @@
5078 #ifndef __ASM_CACHE_H
5079 #define __ASM_CACHE_H
5080
5081+#include <linux/const.h>
5082+
5083 /* Bytes per L1 cache line */
5084-#define L1_CACHE_SHIFT (5)
5085-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5086+#define L1_CACHE_SHIFT 5
5087+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5088
5089 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5090
5091diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5092index 074e52b..76afdac 100644
5093--- a/arch/ia64/Kconfig
5094+++ b/arch/ia64/Kconfig
5095@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5096 config KEXEC
5097 bool "kexec system call"
5098 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5099+ depends on !GRKERNSEC_KMEM
5100 help
5101 kexec is a system call that implements the ability to shutdown your
5102 current kernel, and to start another kernel. It is like a reboot
5103diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5104index 970d0bd..e750b9b 100644
5105--- a/arch/ia64/Makefile
5106+++ b/arch/ia64/Makefile
5107@@ -98,5 +98,6 @@ endef
5108 archprepare: make_nr_irqs_h FORCE
5109 PHONY += make_nr_irqs_h FORCE
5110
5111+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5112 make_nr_irqs_h: FORCE
5113 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5114diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5115index 0bf0350..2ad1957 100644
5116--- a/arch/ia64/include/asm/atomic.h
5117+++ b/arch/ia64/include/asm/atomic.h
5118@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5119 #define atomic64_inc(v) atomic64_add(1, (v))
5120 #define atomic64_dec(v) atomic64_sub(1, (v))
5121
5122+#define atomic64_read_unchecked(v) atomic64_read(v)
5123+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5124+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5125+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5126+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5127+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5128+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5129+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5130+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5131+
5132 #endif /* _ASM_IA64_ATOMIC_H */
5133diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5134index f6769eb..1cdb590 100644
5135--- a/arch/ia64/include/asm/barrier.h
5136+++ b/arch/ia64/include/asm/barrier.h
5137@@ -66,7 +66,7 @@
5138 do { \
5139 compiletime_assert_atomic_type(*p); \
5140 barrier(); \
5141- ACCESS_ONCE(*p) = (v); \
5142+ ACCESS_ONCE_RW(*p) = (v); \
5143 } while (0)
5144
5145 #define smp_load_acquire(p) \
5146diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5147index 988254a..e1ee885 100644
5148--- a/arch/ia64/include/asm/cache.h
5149+++ b/arch/ia64/include/asm/cache.h
5150@@ -1,6 +1,7 @@
5151 #ifndef _ASM_IA64_CACHE_H
5152 #define _ASM_IA64_CACHE_H
5153
5154+#include <linux/const.h>
5155
5156 /*
5157 * Copyright (C) 1998-2000 Hewlett-Packard Co
5158@@ -9,7 +10,7 @@
5159
5160 /* Bytes per L1 (data) cache line. */
5161 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5162-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5163+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5164
5165 #ifdef CONFIG_SMP
5166 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5167diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5168index 5a83c5c..4d7f553 100644
5169--- a/arch/ia64/include/asm/elf.h
5170+++ b/arch/ia64/include/asm/elf.h
5171@@ -42,6 +42,13 @@
5172 */
5173 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5174
5175+#ifdef CONFIG_PAX_ASLR
5176+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5177+
5178+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5179+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5180+#endif
5181+
5182 #define PT_IA_64_UNWIND 0x70000001
5183
5184 /* IA-64 relocations: */
5185diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5186index 5767cdf..7462574 100644
5187--- a/arch/ia64/include/asm/pgalloc.h
5188+++ b/arch/ia64/include/asm/pgalloc.h
5189@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5190 pgd_val(*pgd_entry) = __pa(pud);
5191 }
5192
5193+static inline void
5194+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5195+{
5196+ pgd_populate(mm, pgd_entry, pud);
5197+}
5198+
5199 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5200 {
5201 return quicklist_alloc(0, GFP_KERNEL, NULL);
5202@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5203 pud_val(*pud_entry) = __pa(pmd);
5204 }
5205
5206+static inline void
5207+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5208+{
5209+ pud_populate(mm, pud_entry, pmd);
5210+}
5211+
5212 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5213 {
5214 return quicklist_alloc(0, GFP_KERNEL, NULL);
5215diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5216index 7935115..c0eca6a 100644
5217--- a/arch/ia64/include/asm/pgtable.h
5218+++ b/arch/ia64/include/asm/pgtable.h
5219@@ -12,7 +12,7 @@
5220 * David Mosberger-Tang <davidm@hpl.hp.com>
5221 */
5222
5223-
5224+#include <linux/const.h>
5225 #include <asm/mman.h>
5226 #include <asm/page.h>
5227 #include <asm/processor.h>
5228@@ -142,6 +142,17 @@
5229 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5230 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5231 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5232+
5233+#ifdef CONFIG_PAX_PAGEEXEC
5234+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5235+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5236+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5237+#else
5238+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5239+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5240+# define PAGE_COPY_NOEXEC PAGE_COPY
5241+#endif
5242+
5243 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5244 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5245 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5246diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5247index 45698cd..e8e2dbc 100644
5248--- a/arch/ia64/include/asm/spinlock.h
5249+++ b/arch/ia64/include/asm/spinlock.h
5250@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5251 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5252
5253 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5254- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5255+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5256 }
5257
5258 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5259diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5260index 103bedc..0210597 100644
5261--- a/arch/ia64/include/asm/uaccess.h
5262+++ b/arch/ia64/include/asm/uaccess.h
5263@@ -70,6 +70,7 @@
5264 && ((segment).seg == KERNEL_DS.seg \
5265 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5266 })
5267+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5268 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5269
5270 /*
5271@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5272 static inline unsigned long
5273 __copy_to_user (void __user *to, const void *from, unsigned long count)
5274 {
5275+ if (count > INT_MAX)
5276+ return count;
5277+
5278+ if (!__builtin_constant_p(count))
5279+ check_object_size(from, count, true);
5280+
5281 return __copy_user(to, (__force void __user *) from, count);
5282 }
5283
5284 static inline unsigned long
5285 __copy_from_user (void *to, const void __user *from, unsigned long count)
5286 {
5287+ if (count > INT_MAX)
5288+ return count;
5289+
5290+ if (!__builtin_constant_p(count))
5291+ check_object_size(to, count, false);
5292+
5293 return __copy_user((__force void __user *) to, from, count);
5294 }
5295
5296@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5297 ({ \
5298 void __user *__cu_to = (to); \
5299 const void *__cu_from = (from); \
5300- long __cu_len = (n); \
5301+ unsigned long __cu_len = (n); \
5302 \
5303- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5304+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5305+ if (!__builtin_constant_p(n)) \
5306+ check_object_size(__cu_from, __cu_len, true); \
5307 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5308+ } \
5309 __cu_len; \
5310 })
5311
5312@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5313 ({ \
5314 void *__cu_to = (to); \
5315 const void __user *__cu_from = (from); \
5316- long __cu_len = (n); \
5317+ unsigned long __cu_len = (n); \
5318 \
5319 __chk_user_ptr(__cu_from); \
5320- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5321+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5322+ if (!__builtin_constant_p(n)) \
5323+ check_object_size(__cu_to, __cu_len, false); \
5324 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5325+ } \
5326 __cu_len; \
5327 })
5328
5329diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5330index 29754aa..06d2838 100644
5331--- a/arch/ia64/kernel/module.c
5332+++ b/arch/ia64/kernel/module.c
5333@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5334 }
5335
5336 static inline int
5337+in_init_rx (const struct module *mod, uint64_t addr)
5338+{
5339+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5340+}
5341+
5342+static inline int
5343+in_init_rw (const struct module *mod, uint64_t addr)
5344+{
5345+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5346+}
5347+
5348+static inline int
5349 in_init (const struct module *mod, uint64_t addr)
5350 {
5351- return addr - (uint64_t) mod->module_init < mod->init_size;
5352+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5353+}
5354+
5355+static inline int
5356+in_core_rx (const struct module *mod, uint64_t addr)
5357+{
5358+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5359+}
5360+
5361+static inline int
5362+in_core_rw (const struct module *mod, uint64_t addr)
5363+{
5364+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5365 }
5366
5367 static inline int
5368 in_core (const struct module *mod, uint64_t addr)
5369 {
5370- return addr - (uint64_t) mod->module_core < mod->core_size;
5371+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5372 }
5373
5374 static inline int
5375@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5376 break;
5377
5378 case RV_BDREL:
5379- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5380+ if (in_init_rx(mod, val))
5381+ val -= (uint64_t) mod->module_init_rx;
5382+ else if (in_init_rw(mod, val))
5383+ val -= (uint64_t) mod->module_init_rw;
5384+ else if (in_core_rx(mod, val))
5385+ val -= (uint64_t) mod->module_core_rx;
5386+ else if (in_core_rw(mod, val))
5387+ val -= (uint64_t) mod->module_core_rw;
5388 break;
5389
5390 case RV_LTV:
5391@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5392 * addresses have been selected...
5393 */
5394 uint64_t gp;
5395- if (mod->core_size > MAX_LTOFF)
5396+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5397 /*
5398 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5399 * at the end of the module.
5400 */
5401- gp = mod->core_size - MAX_LTOFF / 2;
5402+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5403 else
5404- gp = mod->core_size / 2;
5405- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5406+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5407+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5408 mod->arch.gp = gp;
5409 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5410 }
5411diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5412index c39c3cd..3c77738 100644
5413--- a/arch/ia64/kernel/palinfo.c
5414+++ b/arch/ia64/kernel/palinfo.c
5415@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5416 return NOTIFY_OK;
5417 }
5418
5419-static struct notifier_block __refdata palinfo_cpu_notifier =
5420+static struct notifier_block palinfo_cpu_notifier =
5421 {
5422 .notifier_call = palinfo_cpu_callback,
5423 .priority = 0,
5424diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5425index 41e33f8..65180b2a 100644
5426--- a/arch/ia64/kernel/sys_ia64.c
5427+++ b/arch/ia64/kernel/sys_ia64.c
5428@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5429 unsigned long align_mask = 0;
5430 struct mm_struct *mm = current->mm;
5431 struct vm_unmapped_area_info info;
5432+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5433
5434 if (len > RGN_MAP_LIMIT)
5435 return -ENOMEM;
5436@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5437 if (REGION_NUMBER(addr) == RGN_HPAGE)
5438 addr = 0;
5439 #endif
5440+
5441+#ifdef CONFIG_PAX_RANDMMAP
5442+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5443+ addr = mm->free_area_cache;
5444+ else
5445+#endif
5446+
5447 if (!addr)
5448 addr = TASK_UNMAPPED_BASE;
5449
5450@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5451 info.high_limit = TASK_SIZE;
5452 info.align_mask = align_mask;
5453 info.align_offset = 0;
5454+ info.threadstack_offset = offset;
5455 return vm_unmapped_area(&info);
5456 }
5457
5458diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5459index 84f8a52..7c76178 100644
5460--- a/arch/ia64/kernel/vmlinux.lds.S
5461+++ b/arch/ia64/kernel/vmlinux.lds.S
5462@@ -192,7 +192,7 @@ SECTIONS {
5463 /* Per-cpu data: */
5464 . = ALIGN(PERCPU_PAGE_SIZE);
5465 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5466- __phys_per_cpu_start = __per_cpu_load;
5467+ __phys_per_cpu_start = per_cpu_load;
5468 /*
5469 * ensure percpu data fits
5470 * into percpu page size
5471diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5472index ba5ba7a..36e9d3a 100644
5473--- a/arch/ia64/mm/fault.c
5474+++ b/arch/ia64/mm/fault.c
5475@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5476 return pte_present(pte);
5477 }
5478
5479+#ifdef CONFIG_PAX_PAGEEXEC
5480+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5481+{
5482+ unsigned long i;
5483+
5484+ printk(KERN_ERR "PAX: bytes at PC: ");
5485+ for (i = 0; i < 8; i++) {
5486+ unsigned int c;
5487+ if (get_user(c, (unsigned int *)pc+i))
5488+ printk(KERN_CONT "???????? ");
5489+ else
5490+ printk(KERN_CONT "%08x ", c);
5491+ }
5492+ printk("\n");
5493+}
5494+#endif
5495+
5496 # define VM_READ_BIT 0
5497 # define VM_WRITE_BIT 1
5498 # define VM_EXEC_BIT 2
5499@@ -151,8 +168,21 @@ retry:
5500 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5501 goto bad_area;
5502
5503- if ((vma->vm_flags & mask) != mask)
5504+ if ((vma->vm_flags & mask) != mask) {
5505+
5506+#ifdef CONFIG_PAX_PAGEEXEC
5507+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5508+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5509+ goto bad_area;
5510+
5511+ up_read(&mm->mmap_sem);
5512+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5513+ do_group_exit(SIGKILL);
5514+ }
5515+#endif
5516+
5517 goto bad_area;
5518+ }
5519
5520 /*
5521 * If for any reason at all we couldn't handle the fault, make
5522diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5523index 76069c1..c2aa816 100644
5524--- a/arch/ia64/mm/hugetlbpage.c
5525+++ b/arch/ia64/mm/hugetlbpage.c
5526@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5527 unsigned long pgoff, unsigned long flags)
5528 {
5529 struct vm_unmapped_area_info info;
5530+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5531
5532 if (len > RGN_MAP_LIMIT)
5533 return -ENOMEM;
5534@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5535 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5536 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5537 info.align_offset = 0;
5538+ info.threadstack_offset = offset;
5539 return vm_unmapped_area(&info);
5540 }
5541
5542diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5543index 6b33457..88b5124 100644
5544--- a/arch/ia64/mm/init.c
5545+++ b/arch/ia64/mm/init.c
5546@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5547 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5548 vma->vm_end = vma->vm_start + PAGE_SIZE;
5549 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5550+
5551+#ifdef CONFIG_PAX_PAGEEXEC
5552+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5553+ vma->vm_flags &= ~VM_EXEC;
5554+
5555+#ifdef CONFIG_PAX_MPROTECT
5556+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5557+ vma->vm_flags &= ~VM_MAYEXEC;
5558+#endif
5559+
5560+ }
5561+#endif
5562+
5563 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5564 down_write(&current->mm->mmap_sem);
5565 if (insert_vm_struct(current->mm, vma)) {
5566@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5567 gate_vma.vm_start = FIXADDR_USER_START;
5568 gate_vma.vm_end = FIXADDR_USER_END;
5569 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5570- gate_vma.vm_page_prot = __P101;
5571+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5572
5573 return 0;
5574 }
5575diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5576index 40b3ee98..8c2c112 100644
5577--- a/arch/m32r/include/asm/cache.h
5578+++ b/arch/m32r/include/asm/cache.h
5579@@ -1,8 +1,10 @@
5580 #ifndef _ASM_M32R_CACHE_H
5581 #define _ASM_M32R_CACHE_H
5582
5583+#include <linux/const.h>
5584+
5585 /* L1 cache line size */
5586 #define L1_CACHE_SHIFT 4
5587-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5588+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5589
5590 #endif /* _ASM_M32R_CACHE_H */
5591diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5592index 82abd15..d95ae5d 100644
5593--- a/arch/m32r/lib/usercopy.c
5594+++ b/arch/m32r/lib/usercopy.c
5595@@ -14,6 +14,9 @@
5596 unsigned long
5597 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5598 {
5599+ if ((long)n < 0)
5600+ return n;
5601+
5602 prefetch(from);
5603 if (access_ok(VERIFY_WRITE, to, n))
5604 __copy_user(to,from,n);
5605@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5606 unsigned long
5607 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5608 {
5609+ if ((long)n < 0)
5610+ return n;
5611+
5612 prefetchw(to);
5613 if (access_ok(VERIFY_READ, from, n))
5614 __copy_user_zeroing(to,from,n);
5615diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5616index 0395c51..5f26031 100644
5617--- a/arch/m68k/include/asm/cache.h
5618+++ b/arch/m68k/include/asm/cache.h
5619@@ -4,9 +4,11 @@
5620 #ifndef __ARCH_M68K_CACHE_H
5621 #define __ARCH_M68K_CACHE_H
5622
5623+#include <linux/const.h>
5624+
5625 /* bytes per L1 cache line */
5626 #define L1_CACHE_SHIFT 4
5627-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5628+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5629
5630 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5631
5632diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5633index d703d8e..a8e2d70 100644
5634--- a/arch/metag/include/asm/barrier.h
5635+++ b/arch/metag/include/asm/barrier.h
5636@@ -90,7 +90,7 @@ static inline void fence(void)
5637 do { \
5638 compiletime_assert_atomic_type(*p); \
5639 smp_mb(); \
5640- ACCESS_ONCE(*p) = (v); \
5641+ ACCESS_ONCE_RW(*p) = (v); \
5642 } while (0)
5643
5644 #define smp_load_acquire(p) \
5645diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5646index 3c32075..ae0ae75 100644
5647--- a/arch/metag/mm/hugetlbpage.c
5648+++ b/arch/metag/mm/hugetlbpage.c
5649@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5650 info.high_limit = TASK_SIZE;
5651 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5652 info.align_offset = 0;
5653+ info.threadstack_offset = 0;
5654 return vm_unmapped_area(&info);
5655 }
5656
5657diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5658index 4efe96a..60e8699 100644
5659--- a/arch/microblaze/include/asm/cache.h
5660+++ b/arch/microblaze/include/asm/cache.h
5661@@ -13,11 +13,12 @@
5662 #ifndef _ASM_MICROBLAZE_CACHE_H
5663 #define _ASM_MICROBLAZE_CACHE_H
5664
5665+#include <linux/const.h>
5666 #include <asm/registers.h>
5667
5668 #define L1_CACHE_SHIFT 5
5669 /* word-granular cache in microblaze */
5670-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5671+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5672
5673 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5674
5675diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5676index 843713c..b6a87b9 100644
5677--- a/arch/mips/Kconfig
5678+++ b/arch/mips/Kconfig
5679@@ -2439,6 +2439,7 @@ source "kernel/Kconfig.preempt"
5680
5681 config KEXEC
5682 bool "Kexec system call"
5683+ depends on !GRKERNSEC_KMEM
5684 help
5685 kexec is a system call that implements the ability to shutdown your
5686 current kernel, and to start another kernel. It is like a reboot
5687diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5688index 3778655..1dff0a9 100644
5689--- a/arch/mips/cavium-octeon/dma-octeon.c
5690+++ b/arch/mips/cavium-octeon/dma-octeon.c
5691@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5692 if (dma_release_from_coherent(dev, order, vaddr))
5693 return;
5694
5695- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5696+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5697 }
5698
5699 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5700diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5701index 857da84..3f4458b 100644
5702--- a/arch/mips/include/asm/atomic.h
5703+++ b/arch/mips/include/asm/atomic.h
5704@@ -22,15 +22,39 @@
5705 #include <asm/cmpxchg.h>
5706 #include <asm/war.h>
5707
5708+#ifdef CONFIG_GENERIC_ATOMIC64
5709+#include <asm-generic/atomic64.h>
5710+#endif
5711+
5712 #define ATOMIC_INIT(i) { (i) }
5713
5714+#ifdef CONFIG_64BIT
5715+#define _ASM_EXTABLE(from, to) \
5716+" .section __ex_table,\"a\"\n" \
5717+" .dword " #from ", " #to"\n" \
5718+" .previous\n"
5719+#else
5720+#define _ASM_EXTABLE(from, to) \
5721+" .section __ex_table,\"a\"\n" \
5722+" .word " #from ", " #to"\n" \
5723+" .previous\n"
5724+#endif
5725+
5726 /*
5727 * atomic_read - read atomic variable
5728 * @v: pointer of type atomic_t
5729 *
5730 * Atomically reads the value of @v.
5731 */
5732-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5733+static inline int atomic_read(const atomic_t *v)
5734+{
5735+ return ACCESS_ONCE(v->counter);
5736+}
5737+
5738+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5739+{
5740+ return ACCESS_ONCE(v->counter);
5741+}
5742
5743 /*
5744 * atomic_set - set atomic variable
5745@@ -39,47 +63,77 @@
5746 *
5747 * Atomically sets the value of @v to @i.
5748 */
5749-#define atomic_set(v, i) ((v)->counter = (i))
5750+static inline void atomic_set(atomic_t *v, int i)
5751+{
5752+ v->counter = i;
5753+}
5754
5755-#define ATOMIC_OP(op, c_op, asm_op) \
5756-static __inline__ void atomic_##op(int i, atomic_t * v) \
5757+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5758+{
5759+ v->counter = i;
5760+}
5761+
5762+#ifdef CONFIG_PAX_REFCOUNT
5763+#define __OVERFLOW_POST \
5764+ " b 4f \n" \
5765+ " .set noreorder \n" \
5766+ "3: b 5f \n" \
5767+ " move %0, %1 \n" \
5768+ " .set reorder \n"
5769+#define __OVERFLOW_EXTABLE \
5770+ "3:\n" \
5771+ _ASM_EXTABLE(2b, 3b)
5772+#else
5773+#define __OVERFLOW_POST
5774+#define __OVERFLOW_EXTABLE
5775+#endif
5776+
5777+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5778+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5779 { \
5780 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5781 int temp; \
5782 \
5783 __asm__ __volatile__( \
5784- " .set arch=r4000 \n" \
5785- "1: ll %0, %1 # atomic_" #op " \n" \
5786- " " #asm_op " %0, %2 \n" \
5787+ " .set mips3 \n" \
5788+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5789+ "2: " #asm_op " %0, %2 \n" \
5790 " sc %0, %1 \n" \
5791 " beqzl %0, 1b \n" \
5792+ extable \
5793 " .set mips0 \n" \
5794 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5795 : "Ir" (i)); \
5796 } else if (kernel_uses_llsc) { \
5797 int temp; \
5798 \
5799- do { \
5800- __asm__ __volatile__( \
5801- " .set arch=r4000 \n" \
5802- " ll %0, %1 # atomic_" #op "\n" \
5803- " " #asm_op " %0, %2 \n" \
5804- " sc %0, %1 \n" \
5805- " .set mips0 \n" \
5806- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5807- : "Ir" (i)); \
5808- } while (unlikely(!temp)); \
5809+ __asm__ __volatile__( \
5810+ " .set mips3 \n" \
5811+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5812+ "2: " #asm_op " %0, %2 \n" \
5813+ " sc %0, %1 \n" \
5814+ " beqz %0, 1b \n" \
5815+ extable \
5816+ " .set mips0 \n" \
5817+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5818+ : "Ir" (i)); \
5819 } else { \
5820 unsigned long flags; \
5821 \
5822 raw_local_irq_save(flags); \
5823- v->counter c_op i; \
5824+ __asm__ __volatile__( \
5825+ "2: " #asm_op " %0, %1 \n" \
5826+ extable \
5827+ : "+r" (v->counter) : "Ir" (i)); \
5828 raw_local_irq_restore(flags); \
5829 } \
5830 }
5831
5832-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5833-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5834+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , asm_op##u) \
5835+ __ATOMIC_OP(op, _unchecked, asm_op)
5836+
5837+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5838+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5839 { \
5840 int result; \
5841 \
5842@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5843 int temp; \
5844 \
5845 __asm__ __volatile__( \
5846- " .set arch=r4000 \n" \
5847- "1: ll %1, %2 # atomic_" #op "_return \n" \
5848- " " #asm_op " %0, %1, %3 \n" \
5849+ " .set mips3 \n" \
5850+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5851+ "2: " #asm_op " %0, %1, %3 \n" \
5852 " sc %0, %2 \n" \
5853 " beqzl %0, 1b \n" \
5854- " " #asm_op " %0, %1, %3 \n" \
5855+ post_op \
5856+ extable \
5857+ "4: " #asm_op " %0, %1, %3 \n" \
5858+ "5: \n" \
5859 " .set mips0 \n" \
5860 : "=&r" (result), "=&r" (temp), \
5861 "+" GCC_OFF12_ASM() (v->counter) \
5862@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5863 } else if (kernel_uses_llsc) { \
5864 int temp; \
5865 \
5866- do { \
5867- __asm__ __volatile__( \
5868- " .set arch=r4000 \n" \
5869- " ll %1, %2 # atomic_" #op "_return \n" \
5870- " " #asm_op " %0, %1, %3 \n" \
5871- " sc %0, %2 \n" \
5872- " .set mips0 \n" \
5873- : "=&r" (result), "=&r" (temp), \
5874- "+" GCC_OFF12_ASM() (v->counter) \
5875- : "Ir" (i)); \
5876- } while (unlikely(!result)); \
5877+ __asm__ __volatile__( \
5878+ " .set mips3 \n" \
5879+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5880+ "2: " #asm_op " %0, %1, %3 \n" \
5881+ " sc %0, %2 \n" \
5882+ post_op \
5883+ extable \
5884+ "4: " #asm_op " %0, %1, %3 \n" \
5885+ "5: \n" \
5886+ " .set mips0 \n" \
5887+ : "=&r" (result), "=&r" (temp), \
5888+ "+" GCC_OFF12_ASM() (v->counter) \
5889+ : "Ir" (i)); \
5890 \
5891 result = temp; result c_op i; \
5892 } else { \
5893 unsigned long flags; \
5894 \
5895 raw_local_irq_save(flags); \
5896- result = v->counter; \
5897- result c_op i; \
5898- v->counter = result; \
5899+ __asm__ __volatile__( \
5900+ " lw %0, %1 \n" \
5901+ "2: " #asm_op " %0, %1, %2 \n" \
5902+ " sw %0, %1 \n" \
5903+ "3: \n" \
5904+ extable \
5905+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
5906+ : "Ir" (i)); \
5907 raw_local_irq_restore(flags); \
5908 } \
5909 \
5910@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5911 return result; \
5912 }
5913
5914-#define ATOMIC_OPS(op, c_op, asm_op) \
5915- ATOMIC_OP(op, c_op, asm_op) \
5916- ATOMIC_OP_RETURN(op, c_op, asm_op)
5917+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
5918+ __ATOMIC_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5919
5920-ATOMIC_OPS(add, +=, addu)
5921-ATOMIC_OPS(sub, -=, subu)
5922+#define ATOMIC_OPS(op, asm_op) \
5923+ ATOMIC_OP(op, asm_op) \
5924+ ATOMIC_OP_RETURN(op, asm_op)
5925+
5926+ATOMIC_OPS(add, add)
5927+ATOMIC_OPS(sub, sub)
5928
5929 #undef ATOMIC_OPS
5930 #undef ATOMIC_OP_RETURN
5931+#undef __ATOMIC_OP_RETURN
5932 #undef ATOMIC_OP
5933+#undef __ATOMIC_OP
5934
5935 /*
5936 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5937@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5938 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5939 * The function returns the old value of @v minus @i.
5940 */
5941-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5942+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5943 {
5944 int result;
5945
5946@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5947 return result;
5948 }
5949
5950-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5951-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5952+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5953+{
5954+ return cmpxchg(&v->counter, old, new);
5955+}
5956+
5957+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5958+ int new)
5959+{
5960+ return cmpxchg(&(v->counter), old, new);
5961+}
5962+
5963+static inline int atomic_xchg(atomic_t *v, int new)
5964+{
5965+ return xchg(&v->counter, new);
5966+}
5967+
5968+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5969+{
5970+ return xchg(&(v->counter), new);
5971+}
5972
5973 /**
5974 * __atomic_add_unless - add unless the number is a given value
5975@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5976
5977 #define atomic_dec_return(v) atomic_sub_return(1, (v))
5978 #define atomic_inc_return(v) atomic_add_return(1, (v))
5979+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5980+{
5981+ return atomic_add_return_unchecked(1, v);
5982+}
5983
5984 /*
5985 * atomic_sub_and_test - subtract value from variable and test result
5986@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5987 * other cases.
5988 */
5989 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5990+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5991+{
5992+ return atomic_add_return_unchecked(1, v) == 0;
5993+}
5994
5995 /*
5996 * atomic_dec_and_test - decrement by 1 and test
5997@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5998 * Atomically increments @v by 1.
5999 */
6000 #define atomic_inc(v) atomic_add(1, (v))
6001+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6002+{
6003+ atomic_add_unchecked(1, v);
6004+}
6005
6006 /*
6007 * atomic_dec - decrement and test
6008@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6009 * Atomically decrements @v by 1.
6010 */
6011 #define atomic_dec(v) atomic_sub(1, (v))
6012+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6013+{
6014+ atomic_sub_unchecked(1, v);
6015+}
6016
6017 /*
6018 * atomic_add_negative - add and test if negative
6019@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6020 * @v: pointer of type atomic64_t
6021 *
6022 */
6023-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6024+static inline long atomic64_read(const atomic64_t *v)
6025+{
6026+ return ACCESS_ONCE(v->counter);
6027+}
6028+
6029+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6030+{
6031+ return ACCESS_ONCE(v->counter);
6032+}
6033
6034 /*
6035 * atomic64_set - set atomic variable
6036 * @v: pointer of type atomic64_t
6037 * @i: required value
6038 */
6039-#define atomic64_set(v, i) ((v)->counter = (i))
6040+static inline void atomic64_set(atomic64_t *v, long i)
6041+{
6042+ v->counter = i;
6043+}
6044
6045-#define ATOMIC64_OP(op, c_op, asm_op) \
6046-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6047+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6048+{
6049+ v->counter = i;
6050+}
6051+
6052+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6053+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6054 { \
6055 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6056 long temp; \
6057 \
6058 __asm__ __volatile__( \
6059- " .set arch=r4000 \n" \
6060- "1: lld %0, %1 # atomic64_" #op " \n" \
6061- " " #asm_op " %0, %2 \n" \
6062+ " .set mips3 \n" \
6063+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6064+ "2: " #asm_op " %0, %2 \n" \
6065 " scd %0, %1 \n" \
6066 " beqzl %0, 1b \n" \
6067+ extable \
6068 " .set mips0 \n" \
6069 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6070 : "Ir" (i)); \
6071 } else if (kernel_uses_llsc) { \
6072 long temp; \
6073 \
6074- do { \
6075- __asm__ __volatile__( \
6076- " .set arch=r4000 \n" \
6077- " lld %0, %1 # atomic64_" #op "\n" \
6078- " " #asm_op " %0, %2 \n" \
6079- " scd %0, %1 \n" \
6080- " .set mips0 \n" \
6081- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6082- : "Ir" (i)); \
6083- } while (unlikely(!temp)); \
6084+ __asm__ __volatile__( \
6085+ " .set mips3 \n" \
6086+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6087+ "2: " #asm_op " %0, %2 \n" \
6088+ " scd %0, %1 \n" \
6089+ " beqz %0, 1b \n" \
6090+ extable \
6091+ " .set mips0 \n" \
6092+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6093+ : "Ir" (i)); \
6094 } else { \
6095 unsigned long flags; \
6096 \
6097 raw_local_irq_save(flags); \
6098- v->counter c_op i; \
6099+ __asm__ __volatile__( \
6100+ "2: " #asm_op " %0, %1 \n" \
6101+ extable \
6102+ : "+" GCC_OFF12_ASM() (v->counter) : "Ir" (i)); \
6103 raw_local_irq_restore(flags); \
6104 } \
6105 }
6106
6107-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6108-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6109+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , asm_op##u) \
6110+ __ATOMIC64_OP(op, _unchecked, asm_op)
6111+
6112+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6113+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6114 { \
6115 long result; \
6116 \
6117@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6118 long temp; \
6119 \
6120 __asm__ __volatile__( \
6121- " .set arch=r4000 \n" \
6122+ " .set mips3 \n" \
6123 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6124- " " #asm_op " %0, %1, %3 \n" \
6125+ "2: " #asm_op " %0, %1, %3 \n" \
6126 " scd %0, %2 \n" \
6127 " beqzl %0, 1b \n" \
6128- " " #asm_op " %0, %1, %3 \n" \
6129+ post_op \
6130+ extable \
6131+ "4: " #asm_op " %0, %1, %3 \n" \
6132+ "5: \n" \
6133 " .set mips0 \n" \
6134 : "=&r" (result), "=&r" (temp), \
6135 "+" GCC_OFF12_ASM() (v->counter) \
6136@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6137 } else if (kernel_uses_llsc) { \
6138 long temp; \
6139 \
6140- do { \
6141- __asm__ __volatile__( \
6142- " .set arch=r4000 \n" \
6143- " lld %1, %2 # atomic64_" #op "_return\n" \
6144- " " #asm_op " %0, %1, %3 \n" \
6145- " scd %0, %2 \n" \
6146- " .set mips0 \n" \
6147- : "=&r" (result), "=&r" (temp), \
6148- "=" GCC_OFF12_ASM() (v->counter) \
6149- : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6150- : "memory"); \
6151- } while (unlikely(!result)); \
6152+ __asm__ __volatile__( \
6153+ " .set mips3 \n" \
6154+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6155+ "2: " #asm_op " %0, %1, %3 \n" \
6156+ " scd %0, %2 \n" \
6157+ " beqz %0, 1b \n" \
6158+ post_op \
6159+ extable \
6160+ "4: " #asm_op " %0, %1, %3 \n" \
6161+ "5: \n" \
6162+ " .set mips0 \n" \
6163+ : "=&r" (result), "=&r" (temp), \
6164+ "=" GCC_OFF12_ASM() (v->counter) \
6165+ : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6166+ : "memory"); \
6167 \
6168 result = temp; result c_op i; \
6169 } else { \
6170 unsigned long flags; \
6171 \
6172 raw_local_irq_save(flags); \
6173- result = v->counter; \
6174- result c_op i; \
6175- v->counter = result; \
6176+ __asm__ __volatile__( \
6177+ " ld %0, %1 \n" \
6178+ "2: " #asm_op " %0, %1, %2 \n" \
6179+ " sd %0, %1 \n" \
6180+ "3: \n" \
6181+ extable \
6182+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
6183+ : "Ir" (i)); \
6184 raw_local_irq_restore(flags); \
6185 } \
6186 \
6187@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6188 return result; \
6189 }
6190
6191-#define ATOMIC64_OPS(op, c_op, asm_op) \
6192- ATOMIC64_OP(op, c_op, asm_op) \
6193- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6194+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
6195+ __ATOMIC64_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6196
6197-ATOMIC64_OPS(add, +=, daddu)
6198-ATOMIC64_OPS(sub, -=, dsubu)
6199+#define ATOMIC64_OPS(op, asm_op) \
6200+ ATOMIC64_OP(op, asm_op) \
6201+ ATOMIC64_OP_RETURN(op, asm_op)
6202+
6203+ATOMIC64_OPS(add, dadd)
6204+ATOMIC64_OPS(sub, dsub)
6205
6206 #undef ATOMIC64_OPS
6207 #undef ATOMIC64_OP_RETURN
6208+#undef __ATOMIC64_OP_RETURN
6209 #undef ATOMIC64_OP
6210+#undef __ATOMIC64_OP
6211+#undef __OVERFLOW_EXTABLE
6212+#undef __OVERFLOW_POST
6213
6214 /*
6215 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6216@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6217 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6218 * The function returns the old value of @v minus @i.
6219 */
6220-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6221+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6222 {
6223 long result;
6224
6225@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6226 return result;
6227 }
6228
6229-#define atomic64_cmpxchg(v, o, n) \
6230- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6231-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6232+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6233+{
6234+ return cmpxchg(&v->counter, old, new);
6235+}
6236+
6237+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6238+ long new)
6239+{
6240+ return cmpxchg(&(v->counter), old, new);
6241+}
6242+
6243+static inline long atomic64_xchg(atomic64_t *v, long new)
6244+{
6245+ return xchg(&v->counter, new);
6246+}
6247+
6248+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6249+{
6250+ return xchg(&(v->counter), new);
6251+}
6252
6253 /**
6254 * atomic64_add_unless - add unless the number is a given value
6255@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6256
6257 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6258 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6259+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6260
6261 /*
6262 * atomic64_sub_and_test - subtract value from variable and test result
6263@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6264 * other cases.
6265 */
6266 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6267+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6268
6269 /*
6270 * atomic64_dec_and_test - decrement by 1 and test
6271@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6272 * Atomically increments @v by 1.
6273 */
6274 #define atomic64_inc(v) atomic64_add(1, (v))
6275+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6276
6277 /*
6278 * atomic64_dec - decrement and test
6279@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6280 * Atomically decrements @v by 1.
6281 */
6282 #define atomic64_dec(v) atomic64_sub(1, (v))
6283+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6284
6285 /*
6286 * atomic64_add_negative - add and test if negative
6287diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6288index 2b8bbbc..4556df6 100644
6289--- a/arch/mips/include/asm/barrier.h
6290+++ b/arch/mips/include/asm/barrier.h
6291@@ -133,7 +133,7 @@
6292 do { \
6293 compiletime_assert_atomic_type(*p); \
6294 smp_mb(); \
6295- ACCESS_ONCE(*p) = (v); \
6296+ ACCESS_ONCE_RW(*p) = (v); \
6297 } while (0)
6298
6299 #define smp_load_acquire(p) \
6300diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6301index b4db69f..8f3b093 100644
6302--- a/arch/mips/include/asm/cache.h
6303+++ b/arch/mips/include/asm/cache.h
6304@@ -9,10 +9,11 @@
6305 #ifndef _ASM_CACHE_H
6306 #define _ASM_CACHE_H
6307
6308+#include <linux/const.h>
6309 #include <kmalloc.h>
6310
6311 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6312-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6313+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6314
6315 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6316 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6317diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6318index eb4d95d..f2f7f93 100644
6319--- a/arch/mips/include/asm/elf.h
6320+++ b/arch/mips/include/asm/elf.h
6321@@ -405,15 +405,18 @@ extern const char *__elf_platform;
6322 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6323 #endif
6324
6325+#ifdef CONFIG_PAX_ASLR
6326+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6327+
6328+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6329+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6330+#endif
6331+
6332 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6333 struct linux_binprm;
6334 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6335 int uses_interp);
6336
6337-struct mm_struct;
6338-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6339-#define arch_randomize_brk arch_randomize_brk
6340-
6341 struct arch_elf_state {
6342 int fp_abi;
6343 int interp_fp_abi;
6344diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6345index c1f6afa..38cc6e9 100644
6346--- a/arch/mips/include/asm/exec.h
6347+++ b/arch/mips/include/asm/exec.h
6348@@ -12,6 +12,6 @@
6349 #ifndef _ASM_EXEC_H
6350 #define _ASM_EXEC_H
6351
6352-extern unsigned long arch_align_stack(unsigned long sp);
6353+#define arch_align_stack(x) ((x) & ~0xfUL)
6354
6355 #endif /* _ASM_EXEC_H */
6356diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6357index 9e8ef59..1139d6b 100644
6358--- a/arch/mips/include/asm/hw_irq.h
6359+++ b/arch/mips/include/asm/hw_irq.h
6360@@ -10,7 +10,7 @@
6361
6362 #include <linux/atomic.h>
6363
6364-extern atomic_t irq_err_count;
6365+extern atomic_unchecked_t irq_err_count;
6366
6367 /*
6368 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6369diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6370index 46dfc3c..a16b13a 100644
6371--- a/arch/mips/include/asm/local.h
6372+++ b/arch/mips/include/asm/local.h
6373@@ -12,15 +12,25 @@ typedef struct
6374 atomic_long_t a;
6375 } local_t;
6376
6377+typedef struct {
6378+ atomic_long_unchecked_t a;
6379+} local_unchecked_t;
6380+
6381 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6382
6383 #define local_read(l) atomic_long_read(&(l)->a)
6384+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6385 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6386+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6387
6388 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6389+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6390 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6391+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6392 #define local_inc(l) atomic_long_inc(&(l)->a)
6393+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6394 #define local_dec(l) atomic_long_dec(&(l)->a)
6395+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6396
6397 /*
6398 * Same as above, but return the result value
6399@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6400 return result;
6401 }
6402
6403+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6404+{
6405+ unsigned long result;
6406+
6407+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6408+ unsigned long temp;
6409+
6410+ __asm__ __volatile__(
6411+ " .set mips3 \n"
6412+ "1:" __LL "%1, %2 # local_add_return \n"
6413+ " addu %0, %1, %3 \n"
6414+ __SC "%0, %2 \n"
6415+ " beqzl %0, 1b \n"
6416+ " addu %0, %1, %3 \n"
6417+ " .set mips0 \n"
6418+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6419+ : "Ir" (i), "m" (l->a.counter)
6420+ : "memory");
6421+ } else if (kernel_uses_llsc) {
6422+ unsigned long temp;
6423+
6424+ __asm__ __volatile__(
6425+ " .set mips3 \n"
6426+ "1:" __LL "%1, %2 # local_add_return \n"
6427+ " addu %0, %1, %3 \n"
6428+ __SC "%0, %2 \n"
6429+ " beqz %0, 1b \n"
6430+ " addu %0, %1, %3 \n"
6431+ " .set mips0 \n"
6432+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6433+ : "Ir" (i), "m" (l->a.counter)
6434+ : "memory");
6435+ } else {
6436+ unsigned long flags;
6437+
6438+ local_irq_save(flags);
6439+ result = l->a.counter;
6440+ result += i;
6441+ l->a.counter = result;
6442+ local_irq_restore(flags);
6443+ }
6444+
6445+ return result;
6446+}
6447+
6448 static __inline__ long local_sub_return(long i, local_t * l)
6449 {
6450 unsigned long result;
6451@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6452
6453 #define local_cmpxchg(l, o, n) \
6454 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6455+#define local_cmpxchg_unchecked(l, o, n) \
6456+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6457 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6458
6459 /**
6460diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6461index 154b70a..426ae3d 100644
6462--- a/arch/mips/include/asm/page.h
6463+++ b/arch/mips/include/asm/page.h
6464@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6465 #ifdef CONFIG_CPU_MIPS32
6466 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6467 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6468- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6469+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6470 #else
6471 typedef struct { unsigned long long pte; } pte_t;
6472 #define pte_val(x) ((x).pte)
6473diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6474index b336037..5b874cc 100644
6475--- a/arch/mips/include/asm/pgalloc.h
6476+++ b/arch/mips/include/asm/pgalloc.h
6477@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6478 {
6479 set_pud(pud, __pud((unsigned long)pmd));
6480 }
6481+
6482+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6483+{
6484+ pud_populate(mm, pud, pmd);
6485+}
6486 #endif
6487
6488 /*
6489diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6490index 845016d..3303268 100644
6491--- a/arch/mips/include/asm/pgtable.h
6492+++ b/arch/mips/include/asm/pgtable.h
6493@@ -20,6 +20,9 @@
6494 #include <asm/io.h>
6495 #include <asm/pgtable-bits.h>
6496
6497+#define ktla_ktva(addr) (addr)
6498+#define ktva_ktla(addr) (addr)
6499+
6500 struct mm_struct;
6501 struct vm_area_struct;
6502
6503diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6504index e4440f9..8fb0005 100644
6505--- a/arch/mips/include/asm/thread_info.h
6506+++ b/arch/mips/include/asm/thread_info.h
6507@@ -106,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
6508 #define TIF_SECCOMP 4 /* secure computing */
6509 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6510 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6511+/* li takes a 32bit immediate */
6512+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6513+
6514 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6515 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6516 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6517@@ -141,14 +144,16 @@ static inline struct thread_info *current_thread_info(void)
6518 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6519 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6520 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6521+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6522
6523 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6524 _TIF_SYSCALL_AUDIT | \
6525- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6526+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6527+ _TIF_GRSEC_SETXID)
6528
6529 /* work to do in syscall_trace_leave() */
6530 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6531- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6532+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6533
6534 /* work to do on interrupt/exception return */
6535 #define _TIF_WORK_MASK \
6536@@ -156,7 +161,7 @@ static inline struct thread_info *current_thread_info(void)
6537 /* work to do on any return to u-space */
6538 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6539 _TIF_WORK_SYSCALL_EXIT | \
6540- _TIF_SYSCALL_TRACEPOINT)
6541+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6542
6543 /*
6544 * We stash processor id into a COP0 register to retrieve it fast
6545diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6546index bf8b324..cec5705 100644
6547--- a/arch/mips/include/asm/uaccess.h
6548+++ b/arch/mips/include/asm/uaccess.h
6549@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6550 __ok == 0; \
6551 })
6552
6553+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6554 #define access_ok(type, addr, size) \
6555 likely(__access_ok((addr), (size), __access_mask))
6556
6557diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6558index 1188e00..41cf144 100644
6559--- a/arch/mips/kernel/binfmt_elfn32.c
6560+++ b/arch/mips/kernel/binfmt_elfn32.c
6561@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6562 #undef ELF_ET_DYN_BASE
6563 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6564
6565+#ifdef CONFIG_PAX_ASLR
6566+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6567+
6568+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6569+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6570+#endif
6571+
6572 #include <asm/processor.h>
6573 #include <linux/module.h>
6574 #include <linux/elfcore.h>
6575diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6576index 9287678..f870e47 100644
6577--- a/arch/mips/kernel/binfmt_elfo32.c
6578+++ b/arch/mips/kernel/binfmt_elfo32.c
6579@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6580 #undef ELF_ET_DYN_BASE
6581 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6582
6583+#ifdef CONFIG_PAX_ASLR
6584+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6585+
6586+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6587+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6588+#endif
6589+
6590 #include <asm/processor.h>
6591
6592 #include <linux/module.h>
6593diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6594index a74ec3a..4f06f18 100644
6595--- a/arch/mips/kernel/i8259.c
6596+++ b/arch/mips/kernel/i8259.c
6597@@ -202,7 +202,7 @@ spurious_8259A_irq:
6598 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6599 spurious_irq_mask |= irqmask;
6600 }
6601- atomic_inc(&irq_err_count);
6602+ atomic_inc_unchecked(&irq_err_count);
6603 /*
6604 * Theoretically we do not have to handle this IRQ,
6605 * but in Linux this does not cause problems and is
6606diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6607index 44a1f79..2bd6aa3 100644
6608--- a/arch/mips/kernel/irq-gt641xx.c
6609+++ b/arch/mips/kernel/irq-gt641xx.c
6610@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6611 }
6612 }
6613
6614- atomic_inc(&irq_err_count);
6615+ atomic_inc_unchecked(&irq_err_count);
6616 }
6617
6618 void __init gt641xx_irq_init(void)
6619diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6620index d2bfbc2..a8eacd2 100644
6621--- a/arch/mips/kernel/irq.c
6622+++ b/arch/mips/kernel/irq.c
6623@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6624 printk("unexpected IRQ # %d\n", irq);
6625 }
6626
6627-atomic_t irq_err_count;
6628+atomic_unchecked_t irq_err_count;
6629
6630 int arch_show_interrupts(struct seq_file *p, int prec)
6631 {
6632- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6633+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6634 return 0;
6635 }
6636
6637 asmlinkage void spurious_interrupt(void)
6638 {
6639- atomic_inc(&irq_err_count);
6640+ atomic_inc_unchecked(&irq_err_count);
6641 }
6642
6643 void __init init_IRQ(void)
6644@@ -109,7 +109,10 @@ void __init init_IRQ(void)
6645 #endif
6646 }
6647
6648+
6649 #ifdef DEBUG_STACKOVERFLOW
6650+extern void gr_handle_kernel_exploit(void);
6651+
6652 static inline void check_stack_overflow(void)
6653 {
6654 unsigned long sp;
6655@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
6656 printk("do_IRQ: stack overflow: %ld\n",
6657 sp - sizeof(struct thread_info));
6658 dump_stack();
6659+ gr_handle_kernel_exploit();
6660 }
6661 }
6662 #else
6663diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6664index 0614717..002fa43 100644
6665--- a/arch/mips/kernel/pm-cps.c
6666+++ b/arch/mips/kernel/pm-cps.c
6667@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6668 nc_core_ready_count = nc_addr;
6669
6670 /* Ensure ready_count is zero-initialised before the assembly runs */
6671- ACCESS_ONCE(*nc_core_ready_count) = 0;
6672+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6673 coupled_barrier(&per_cpu(pm_barrier, core), online);
6674
6675 /* Run the generated entry code */
6676diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6677index 85bff5d..39bc202 100644
6678--- a/arch/mips/kernel/process.c
6679+++ b/arch/mips/kernel/process.c
6680@@ -534,18 +534,6 @@ out:
6681 return pc;
6682 }
6683
6684-/*
6685- * Don't forget that the stack pointer must be aligned on a 8 bytes
6686- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6687- */
6688-unsigned long arch_align_stack(unsigned long sp)
6689-{
6690- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6691- sp -= get_random_int() & ~PAGE_MASK;
6692-
6693- return sp & ALMASK;
6694-}
6695-
6696 static void arch_dump_stack(void *info)
6697 {
6698 struct pt_regs *regs;
6699diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6700index 5104528..950bbdc 100644
6701--- a/arch/mips/kernel/ptrace.c
6702+++ b/arch/mips/kernel/ptrace.c
6703@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6704 return ret;
6705 }
6706
6707+#ifdef CONFIG_GRKERNSEC_SETXID
6708+extern void gr_delayed_cred_worker(void);
6709+#endif
6710+
6711 /*
6712 * Notification of system call entry/exit
6713 * - triggered by current->work.syscall_trace
6714@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6715 tracehook_report_syscall_entry(regs))
6716 ret = -1;
6717
6718+#ifdef CONFIG_GRKERNSEC_SETXID
6719+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6720+ gr_delayed_cred_worker();
6721+#endif
6722+
6723 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6724 trace_sys_enter(regs, regs->regs[2]);
6725
6726diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6727index 07fc524..b9d7f28 100644
6728--- a/arch/mips/kernel/reset.c
6729+++ b/arch/mips/kernel/reset.c
6730@@ -13,6 +13,7 @@
6731 #include <linux/reboot.h>
6732
6733 #include <asm/reboot.h>
6734+#include <asm/bug.h>
6735
6736 /*
6737 * Urgs ... Too many MIPS machines to handle this in a generic way.
6738@@ -29,16 +30,19 @@ void machine_restart(char *command)
6739 {
6740 if (_machine_restart)
6741 _machine_restart(command);
6742+ BUG();
6743 }
6744
6745 void machine_halt(void)
6746 {
6747 if (_machine_halt)
6748 _machine_halt();
6749+ BUG();
6750 }
6751
6752 void machine_power_off(void)
6753 {
6754 if (pm_power_off)
6755 pm_power_off();
6756+ BUG();
6757 }
6758diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6759index 2242bdd..b284048 100644
6760--- a/arch/mips/kernel/sync-r4k.c
6761+++ b/arch/mips/kernel/sync-r4k.c
6762@@ -18,8 +18,8 @@
6763 #include <asm/mipsregs.h>
6764
6765 static atomic_t count_start_flag = ATOMIC_INIT(0);
6766-static atomic_t count_count_start = ATOMIC_INIT(0);
6767-static atomic_t count_count_stop = ATOMIC_INIT(0);
6768+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6769+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6770 static atomic_t count_reference = ATOMIC_INIT(0);
6771
6772 #define COUNTON 100
6773@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6774
6775 for (i = 0; i < NR_LOOPS; i++) {
6776 /* slaves loop on '!= 2' */
6777- while (atomic_read(&count_count_start) != 1)
6778+ while (atomic_read_unchecked(&count_count_start) != 1)
6779 mb();
6780- atomic_set(&count_count_stop, 0);
6781+ atomic_set_unchecked(&count_count_stop, 0);
6782 smp_wmb();
6783
6784 /* this lets the slaves write their count register */
6785- atomic_inc(&count_count_start);
6786+ atomic_inc_unchecked(&count_count_start);
6787
6788 /*
6789 * Everyone initialises count in the last loop:
6790@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6791 /*
6792 * Wait for all slaves to leave the synchronization point:
6793 */
6794- while (atomic_read(&count_count_stop) != 1)
6795+ while (atomic_read_unchecked(&count_count_stop) != 1)
6796 mb();
6797- atomic_set(&count_count_start, 0);
6798+ atomic_set_unchecked(&count_count_start, 0);
6799 smp_wmb();
6800- atomic_inc(&count_count_stop);
6801+ atomic_inc_unchecked(&count_count_stop);
6802 }
6803 /* Arrange for an interrupt in a short while */
6804 write_c0_compare(read_c0_count() + COUNTON);
6805@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6806 initcount = atomic_read(&count_reference);
6807
6808 for (i = 0; i < NR_LOOPS; i++) {
6809- atomic_inc(&count_count_start);
6810- while (atomic_read(&count_count_start) != 2)
6811+ atomic_inc_unchecked(&count_count_start);
6812+ while (atomic_read_unchecked(&count_count_start) != 2)
6813 mb();
6814
6815 /*
6816@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6817 if (i == NR_LOOPS-1)
6818 write_c0_count(initcount);
6819
6820- atomic_inc(&count_count_stop);
6821- while (atomic_read(&count_count_stop) != 2)
6822+ atomic_inc_unchecked(&count_count_stop);
6823+ while (atomic_read_unchecked(&count_count_stop) != 2)
6824 mb();
6825 }
6826 /* Arrange for an interrupt in a short while */
6827diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6828index c3b41e2..46c32e9 100644
6829--- a/arch/mips/kernel/traps.c
6830+++ b/arch/mips/kernel/traps.c
6831@@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6832 siginfo_t info;
6833
6834 prev_state = exception_enter();
6835- die_if_kernel("Integer overflow", regs);
6836+ if (unlikely(!user_mode(regs))) {
6837+
6838+#ifdef CONFIG_PAX_REFCOUNT
6839+ if (fixup_exception(regs)) {
6840+ pax_report_refcount_overflow(regs);
6841+ exception_exit(prev_state);
6842+ return;
6843+ }
6844+#endif
6845+
6846+ die("Integer overflow", regs);
6847+ }
6848
6849 info.si_code = FPE_INTOVF;
6850 info.si_signo = SIGFPE;
6851diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6852index 270bbd4..c01932a 100644
6853--- a/arch/mips/kvm/mips.c
6854+++ b/arch/mips/kvm/mips.c
6855@@ -815,7 +815,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6856 return r;
6857 }
6858
6859-int kvm_arch_init(void *opaque)
6860+int kvm_arch_init(const void *opaque)
6861 {
6862 if (kvm_mips_callbacks) {
6863 kvm_err("kvm: module already exists\n");
6864diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6865index 70ab5d6..62940fe 100644
6866--- a/arch/mips/mm/fault.c
6867+++ b/arch/mips/mm/fault.c
6868@@ -28,6 +28,23 @@
6869 #include <asm/highmem.h> /* For VMALLOC_END */
6870 #include <linux/kdebug.h>
6871
6872+#ifdef CONFIG_PAX_PAGEEXEC
6873+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6874+{
6875+ unsigned long i;
6876+
6877+ printk(KERN_ERR "PAX: bytes at PC: ");
6878+ for (i = 0; i < 5; i++) {
6879+ unsigned int c;
6880+ if (get_user(c, (unsigned int *)pc+i))
6881+ printk(KERN_CONT "???????? ");
6882+ else
6883+ printk(KERN_CONT "%08x ", c);
6884+ }
6885+ printk("\n");
6886+}
6887+#endif
6888+
6889 /*
6890 * This routine handles page faults. It determines the address,
6891 * and the problem, and then passes it off to one of the appropriate
6892@@ -201,6 +218,14 @@ bad_area:
6893 bad_area_nosemaphore:
6894 /* User mode accesses just cause a SIGSEGV */
6895 if (user_mode(regs)) {
6896+
6897+#ifdef CONFIG_PAX_PAGEEXEC
6898+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6899+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6900+ do_group_exit(SIGKILL);
6901+ }
6902+#endif
6903+
6904 tsk->thread.cp0_badvaddr = address;
6905 tsk->thread.error_code = write;
6906 #if 0
6907diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6908index f1baadd..5472dca 100644
6909--- a/arch/mips/mm/mmap.c
6910+++ b/arch/mips/mm/mmap.c
6911@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6912 struct vm_area_struct *vma;
6913 unsigned long addr = addr0;
6914 int do_color_align;
6915+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6916 struct vm_unmapped_area_info info;
6917
6918 if (unlikely(len > TASK_SIZE))
6919@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6920 do_color_align = 1;
6921
6922 /* requesting a specific address */
6923+
6924+#ifdef CONFIG_PAX_RANDMMAP
6925+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6926+#endif
6927+
6928 if (addr) {
6929 if (do_color_align)
6930 addr = COLOUR_ALIGN(addr, pgoff);
6931@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6932 addr = PAGE_ALIGN(addr);
6933
6934 vma = find_vma(mm, addr);
6935- if (TASK_SIZE - len >= addr &&
6936- (!vma || addr + len <= vma->vm_start))
6937+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6938 return addr;
6939 }
6940
6941 info.length = len;
6942 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6943 info.align_offset = pgoff << PAGE_SHIFT;
6944+ info.threadstack_offset = offset;
6945
6946 if (dir == DOWN) {
6947 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6948@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6949 {
6950 unsigned long random_factor = 0UL;
6951
6952+#ifdef CONFIG_PAX_RANDMMAP
6953+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6954+#endif
6955+
6956 if (current->flags & PF_RANDOMIZE) {
6957 random_factor = get_random_int();
6958 random_factor = random_factor << PAGE_SHIFT;
6959@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6960
6961 if (mmap_is_legacy()) {
6962 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6963+
6964+#ifdef CONFIG_PAX_RANDMMAP
6965+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6966+ mm->mmap_base += mm->delta_mmap;
6967+#endif
6968+
6969 mm->get_unmapped_area = arch_get_unmapped_area;
6970 } else {
6971 mm->mmap_base = mmap_base(random_factor);
6972+
6973+#ifdef CONFIG_PAX_RANDMMAP
6974+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6975+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6976+#endif
6977+
6978 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6979 }
6980 }
6981
6982-static inline unsigned long brk_rnd(void)
6983-{
6984- unsigned long rnd = get_random_int();
6985-
6986- rnd = rnd << PAGE_SHIFT;
6987- /* 8MB for 32bit, 256MB for 64bit */
6988- if (TASK_IS_32BIT_ADDR)
6989- rnd = rnd & 0x7ffffful;
6990- else
6991- rnd = rnd & 0xffffffful;
6992-
6993- return rnd;
6994-}
6995-
6996-unsigned long arch_randomize_brk(struct mm_struct *mm)
6997-{
6998- unsigned long base = mm->brk;
6999- unsigned long ret;
7000-
7001- ret = PAGE_ALIGN(base + brk_rnd());
7002-
7003- if (ret < mm->brk)
7004- return mm->brk;
7005-
7006- return ret;
7007-}
7008-
7009 int __virt_addr_valid(const volatile void *kaddr)
7010 {
7011 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7012diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7013index d07e041..bedb72b 100644
7014--- a/arch/mips/pci/pci-octeon.c
7015+++ b/arch/mips/pci/pci-octeon.c
7016@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7017
7018
7019 static struct pci_ops octeon_pci_ops = {
7020- octeon_read_config,
7021- octeon_write_config,
7022+ .read = octeon_read_config,
7023+ .write = octeon_write_config,
7024 };
7025
7026 static struct resource octeon_pci_mem_resource = {
7027diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7028index 5e36c33..eb4a17b 100644
7029--- a/arch/mips/pci/pcie-octeon.c
7030+++ b/arch/mips/pci/pcie-octeon.c
7031@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7032 }
7033
7034 static struct pci_ops octeon_pcie0_ops = {
7035- octeon_pcie0_read_config,
7036- octeon_pcie0_write_config,
7037+ .read = octeon_pcie0_read_config,
7038+ .write = octeon_pcie0_write_config,
7039 };
7040
7041 static struct resource octeon_pcie0_mem_resource = {
7042@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7043 };
7044
7045 static struct pci_ops octeon_pcie1_ops = {
7046- octeon_pcie1_read_config,
7047- octeon_pcie1_write_config,
7048+ .read = octeon_pcie1_read_config,
7049+ .write = octeon_pcie1_write_config,
7050 };
7051
7052 static struct resource octeon_pcie1_mem_resource = {
7053@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7054 };
7055
7056 static struct pci_ops octeon_dummy_ops = {
7057- octeon_dummy_read_config,
7058- octeon_dummy_write_config,
7059+ .read = octeon_dummy_read_config,
7060+ .write = octeon_dummy_write_config,
7061 };
7062
7063 static struct resource octeon_dummy_mem_resource = {
7064diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7065index a2358b4..7cead4f 100644
7066--- a/arch/mips/sgi-ip27/ip27-nmi.c
7067+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7068@@ -187,9 +187,9 @@ void
7069 cont_nmi_dump(void)
7070 {
7071 #ifndef REAL_NMI_SIGNAL
7072- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7073+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7074
7075- atomic_inc(&nmied_cpus);
7076+ atomic_inc_unchecked(&nmied_cpus);
7077 #endif
7078 /*
7079 * Only allow 1 cpu to proceed
7080@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7081 udelay(10000);
7082 }
7083 #else
7084- while (atomic_read(&nmied_cpus) != num_online_cpus());
7085+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7086 #endif
7087
7088 /*
7089diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7090index a046b30..6799527 100644
7091--- a/arch/mips/sni/rm200.c
7092+++ b/arch/mips/sni/rm200.c
7093@@ -270,7 +270,7 @@ spurious_8259A_irq:
7094 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7095 spurious_irq_mask |= irqmask;
7096 }
7097- atomic_inc(&irq_err_count);
7098+ atomic_inc_unchecked(&irq_err_count);
7099 /*
7100 * Theoretically we do not have to handle this IRQ,
7101 * but in Linux this does not cause problems and is
7102diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7103index 41e873b..34d33a7 100644
7104--- a/arch/mips/vr41xx/common/icu.c
7105+++ b/arch/mips/vr41xx/common/icu.c
7106@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7107
7108 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7109
7110- atomic_inc(&irq_err_count);
7111+ atomic_inc_unchecked(&irq_err_count);
7112
7113 return -1;
7114 }
7115diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7116index ae0e4ee..e8f0692 100644
7117--- a/arch/mips/vr41xx/common/irq.c
7118+++ b/arch/mips/vr41xx/common/irq.c
7119@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7120 irq_cascade_t *cascade;
7121
7122 if (irq >= NR_IRQS) {
7123- atomic_inc(&irq_err_count);
7124+ atomic_inc_unchecked(&irq_err_count);
7125 return;
7126 }
7127
7128@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7129 ret = cascade->get_irq(irq);
7130 irq = ret;
7131 if (ret < 0)
7132- atomic_inc(&irq_err_count);
7133+ atomic_inc_unchecked(&irq_err_count);
7134 else
7135 irq_dispatch(irq);
7136 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7137diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7138index 967d144..db12197 100644
7139--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7140+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7141@@ -11,12 +11,14 @@
7142 #ifndef _ASM_PROC_CACHE_H
7143 #define _ASM_PROC_CACHE_H
7144
7145+#include <linux/const.h>
7146+
7147 /* L1 cache */
7148
7149 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7150 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7151-#define L1_CACHE_BYTES 16 /* bytes per entry */
7152 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7153+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7154 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7155
7156 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7157diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7158index bcb5df2..84fabd2 100644
7159--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7160+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7161@@ -16,13 +16,15 @@
7162 #ifndef _ASM_PROC_CACHE_H
7163 #define _ASM_PROC_CACHE_H
7164
7165+#include <linux/const.h>
7166+
7167 /*
7168 * L1 cache
7169 */
7170 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7171 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7172-#define L1_CACHE_BYTES 32 /* bytes per entry */
7173 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7174+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7175 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7176
7177 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7178diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7179index 4ce7a01..449202a 100644
7180--- a/arch/openrisc/include/asm/cache.h
7181+++ b/arch/openrisc/include/asm/cache.h
7182@@ -19,11 +19,13 @@
7183 #ifndef __ASM_OPENRISC_CACHE_H
7184 #define __ASM_OPENRISC_CACHE_H
7185
7186+#include <linux/const.h>
7187+
7188 /* FIXME: How can we replace these with values from the CPU...
7189 * they shouldn't be hard-coded!
7190 */
7191
7192-#define L1_CACHE_BYTES 16
7193 #define L1_CACHE_SHIFT 4
7194+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7195
7196 #endif /* __ASM_OPENRISC_CACHE_H */
7197diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7198index 226f8ca..9d9b87d 100644
7199--- a/arch/parisc/include/asm/atomic.h
7200+++ b/arch/parisc/include/asm/atomic.h
7201@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7202 return dec;
7203 }
7204
7205+#define atomic64_read_unchecked(v) atomic64_read(v)
7206+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7207+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7208+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7209+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7210+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7211+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7212+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7213+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7214+
7215 #endif /* !CONFIG_64BIT */
7216
7217
7218diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7219index 47f11c7..3420df2 100644
7220--- a/arch/parisc/include/asm/cache.h
7221+++ b/arch/parisc/include/asm/cache.h
7222@@ -5,6 +5,7 @@
7223 #ifndef __ARCH_PARISC_CACHE_H
7224 #define __ARCH_PARISC_CACHE_H
7225
7226+#include <linux/const.h>
7227
7228 /*
7229 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7230@@ -15,13 +16,13 @@
7231 * just ruin performance.
7232 */
7233 #ifdef CONFIG_PA20
7234-#define L1_CACHE_BYTES 64
7235 #define L1_CACHE_SHIFT 6
7236 #else
7237-#define L1_CACHE_BYTES 32
7238 #define L1_CACHE_SHIFT 5
7239 #endif
7240
7241+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7242+
7243 #ifndef __ASSEMBLY__
7244
7245 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7246diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7247index 3391d06..c23a2cc 100644
7248--- a/arch/parisc/include/asm/elf.h
7249+++ b/arch/parisc/include/asm/elf.h
7250@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7251
7252 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7253
7254+#ifdef CONFIG_PAX_ASLR
7255+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7256+
7257+#define PAX_DELTA_MMAP_LEN 16
7258+#define PAX_DELTA_STACK_LEN 16
7259+#endif
7260+
7261 /* This yields a mask that user programs can use to figure out what
7262 instruction set this CPU supports. This could be done in user space,
7263 but it's not easy, and we've already done it here. */
7264diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7265index f213f5b..0af3e8e 100644
7266--- a/arch/parisc/include/asm/pgalloc.h
7267+++ b/arch/parisc/include/asm/pgalloc.h
7268@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7269 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7270 }
7271
7272+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7273+{
7274+ pgd_populate(mm, pgd, pmd);
7275+}
7276+
7277 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7278 {
7279 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7280@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7281 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7282 #define pmd_free(mm, x) do { } while (0)
7283 #define pgd_populate(mm, pmd, pte) BUG()
7284+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7285
7286 #endif
7287
7288diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7289index 22b89d1..ce34230 100644
7290--- a/arch/parisc/include/asm/pgtable.h
7291+++ b/arch/parisc/include/asm/pgtable.h
7292@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7293 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7294 #define PAGE_COPY PAGE_EXECREAD
7295 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7296+
7297+#ifdef CONFIG_PAX_PAGEEXEC
7298+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7299+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7300+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7301+#else
7302+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7303+# define PAGE_COPY_NOEXEC PAGE_COPY
7304+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7305+#endif
7306+
7307 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7308 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7309 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7310diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7311index a5cb070..8604ddc 100644
7312--- a/arch/parisc/include/asm/uaccess.h
7313+++ b/arch/parisc/include/asm/uaccess.h
7314@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7315 const void __user *from,
7316 unsigned long n)
7317 {
7318- int sz = __compiletime_object_size(to);
7319+ size_t sz = __compiletime_object_size(to);
7320 int ret = -EFAULT;
7321
7322- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7323+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7324 ret = __copy_from_user(to, from, n);
7325 else
7326 copy_from_user_overflow();
7327diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7328index 5822e8e..bc5e638 100644
7329--- a/arch/parisc/kernel/module.c
7330+++ b/arch/parisc/kernel/module.c
7331@@ -98,16 +98,38 @@
7332
7333 /* three functions to determine where in the module core
7334 * or init pieces the location is */
7335+static inline int in_init_rx(struct module *me, void *loc)
7336+{
7337+ return (loc >= me->module_init_rx &&
7338+ loc < (me->module_init_rx + me->init_size_rx));
7339+}
7340+
7341+static inline int in_init_rw(struct module *me, void *loc)
7342+{
7343+ return (loc >= me->module_init_rw &&
7344+ loc < (me->module_init_rw + me->init_size_rw));
7345+}
7346+
7347 static inline int in_init(struct module *me, void *loc)
7348 {
7349- return (loc >= me->module_init &&
7350- loc <= (me->module_init + me->init_size));
7351+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7352+}
7353+
7354+static inline int in_core_rx(struct module *me, void *loc)
7355+{
7356+ return (loc >= me->module_core_rx &&
7357+ loc < (me->module_core_rx + me->core_size_rx));
7358+}
7359+
7360+static inline int in_core_rw(struct module *me, void *loc)
7361+{
7362+ return (loc >= me->module_core_rw &&
7363+ loc < (me->module_core_rw + me->core_size_rw));
7364 }
7365
7366 static inline int in_core(struct module *me, void *loc)
7367 {
7368- return (loc >= me->module_core &&
7369- loc <= (me->module_core + me->core_size));
7370+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7371 }
7372
7373 static inline int in_local(struct module *me, void *loc)
7374@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7375 }
7376
7377 /* align things a bit */
7378- me->core_size = ALIGN(me->core_size, 16);
7379- me->arch.got_offset = me->core_size;
7380- me->core_size += gots * sizeof(struct got_entry);
7381+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7382+ me->arch.got_offset = me->core_size_rw;
7383+ me->core_size_rw += gots * sizeof(struct got_entry);
7384
7385- me->core_size = ALIGN(me->core_size, 16);
7386- me->arch.fdesc_offset = me->core_size;
7387- me->core_size += fdescs * sizeof(Elf_Fdesc);
7388+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7389+ me->arch.fdesc_offset = me->core_size_rw;
7390+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7391
7392 me->arch.got_max = gots;
7393 me->arch.fdesc_max = fdescs;
7394@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7395
7396 BUG_ON(value == 0);
7397
7398- got = me->module_core + me->arch.got_offset;
7399+ got = me->module_core_rw + me->arch.got_offset;
7400 for (i = 0; got[i].addr; i++)
7401 if (got[i].addr == value)
7402 goto out;
7403@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7404 #ifdef CONFIG_64BIT
7405 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7406 {
7407- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7408+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7409
7410 if (!value) {
7411 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7412@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7413
7414 /* Create new one */
7415 fdesc->addr = value;
7416- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7417+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7418 return (Elf_Addr)fdesc;
7419 }
7420 #endif /* CONFIG_64BIT */
7421@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7422
7423 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7424 end = table + sechdrs[me->arch.unwind_section].sh_size;
7425- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7426+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7427
7428 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7429 me->arch.unwind_section, table, end, gp);
7430diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7431index e1ffea2..46ed66e 100644
7432--- a/arch/parisc/kernel/sys_parisc.c
7433+++ b/arch/parisc/kernel/sys_parisc.c
7434@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7435 unsigned long task_size = TASK_SIZE;
7436 int do_color_align, last_mmap;
7437 struct vm_unmapped_area_info info;
7438+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7439
7440 if (len > task_size)
7441 return -ENOMEM;
7442@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7443 goto found_addr;
7444 }
7445
7446+#ifdef CONFIG_PAX_RANDMMAP
7447+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7448+#endif
7449+
7450 if (addr) {
7451 if (do_color_align && last_mmap)
7452 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7453@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7454 info.high_limit = mmap_upper_limit();
7455 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7456 info.align_offset = shared_align_offset(last_mmap, pgoff);
7457+ info.threadstack_offset = offset;
7458 addr = vm_unmapped_area(&info);
7459
7460 found_addr:
7461@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7462 unsigned long addr = addr0;
7463 int do_color_align, last_mmap;
7464 struct vm_unmapped_area_info info;
7465+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7466
7467 #ifdef CONFIG_64BIT
7468 /* This should only ever run for 32-bit processes. */
7469@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7470 }
7471
7472 /* requesting a specific address */
7473+#ifdef CONFIG_PAX_RANDMMAP
7474+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7475+#endif
7476+
7477 if (addr) {
7478 if (do_color_align && last_mmap)
7479 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7480@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7481 info.high_limit = mm->mmap_base;
7482 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7483 info.align_offset = shared_align_offset(last_mmap, pgoff);
7484+ info.threadstack_offset = offset;
7485 addr = vm_unmapped_area(&info);
7486 if (!(addr & ~PAGE_MASK))
7487 goto found_addr;
7488@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7489 mm->mmap_legacy_base = mmap_legacy_base();
7490 mm->mmap_base = mmap_upper_limit();
7491
7492+#ifdef CONFIG_PAX_RANDMMAP
7493+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7494+ mm->mmap_legacy_base += mm->delta_mmap;
7495+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7496+ }
7497+#endif
7498+
7499 if (mmap_is_legacy()) {
7500 mm->mmap_base = mm->mmap_legacy_base;
7501 mm->get_unmapped_area = arch_get_unmapped_area;
7502diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7503index 47ee620..1107387 100644
7504--- a/arch/parisc/kernel/traps.c
7505+++ b/arch/parisc/kernel/traps.c
7506@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7507
7508 down_read(&current->mm->mmap_sem);
7509 vma = find_vma(current->mm,regs->iaoq[0]);
7510- if (vma && (regs->iaoq[0] >= vma->vm_start)
7511- && (vma->vm_flags & VM_EXEC)) {
7512-
7513+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7514 fault_address = regs->iaoq[0];
7515 fault_space = regs->iasq[0];
7516
7517diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7518index e5120e6..8ddb5cc 100644
7519--- a/arch/parisc/mm/fault.c
7520+++ b/arch/parisc/mm/fault.c
7521@@ -15,6 +15,7 @@
7522 #include <linux/sched.h>
7523 #include <linux/interrupt.h>
7524 #include <linux/module.h>
7525+#include <linux/unistd.h>
7526
7527 #include <asm/uaccess.h>
7528 #include <asm/traps.h>
7529@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7530 static unsigned long
7531 parisc_acctyp(unsigned long code, unsigned int inst)
7532 {
7533- if (code == 6 || code == 16)
7534+ if (code == 6 || code == 7 || code == 16)
7535 return VM_EXEC;
7536
7537 switch (inst & 0xf0000000) {
7538@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7539 }
7540 #endif
7541
7542+#ifdef CONFIG_PAX_PAGEEXEC
7543+/*
7544+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7545+ *
7546+ * returns 1 when task should be killed
7547+ * 2 when rt_sigreturn trampoline was detected
7548+ * 3 when unpatched PLT trampoline was detected
7549+ */
7550+static int pax_handle_fetch_fault(struct pt_regs *regs)
7551+{
7552+
7553+#ifdef CONFIG_PAX_EMUPLT
7554+ int err;
7555+
7556+ do { /* PaX: unpatched PLT emulation */
7557+ unsigned int bl, depwi;
7558+
7559+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7560+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7561+
7562+ if (err)
7563+ break;
7564+
7565+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7566+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7567+
7568+ err = get_user(ldw, (unsigned int *)addr);
7569+ err |= get_user(bv, (unsigned int *)(addr+4));
7570+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7571+
7572+ if (err)
7573+ break;
7574+
7575+ if (ldw == 0x0E801096U &&
7576+ bv == 0xEAC0C000U &&
7577+ ldw2 == 0x0E881095U)
7578+ {
7579+ unsigned int resolver, map;
7580+
7581+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7582+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7583+ if (err)
7584+ break;
7585+
7586+ regs->gr[20] = instruction_pointer(regs)+8;
7587+ regs->gr[21] = map;
7588+ regs->gr[22] = resolver;
7589+ regs->iaoq[0] = resolver | 3UL;
7590+ regs->iaoq[1] = regs->iaoq[0] + 4;
7591+ return 3;
7592+ }
7593+ }
7594+ } while (0);
7595+#endif
7596+
7597+#ifdef CONFIG_PAX_EMUTRAMP
7598+
7599+#ifndef CONFIG_PAX_EMUSIGRT
7600+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7601+ return 1;
7602+#endif
7603+
7604+ do { /* PaX: rt_sigreturn emulation */
7605+ unsigned int ldi1, ldi2, bel, nop;
7606+
7607+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7608+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7609+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7610+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7611+
7612+ if (err)
7613+ break;
7614+
7615+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7616+ ldi2 == 0x3414015AU &&
7617+ bel == 0xE4008200U &&
7618+ nop == 0x08000240U)
7619+ {
7620+ regs->gr[25] = (ldi1 & 2) >> 1;
7621+ regs->gr[20] = __NR_rt_sigreturn;
7622+ regs->gr[31] = regs->iaoq[1] + 16;
7623+ regs->sr[0] = regs->iasq[1];
7624+ regs->iaoq[0] = 0x100UL;
7625+ regs->iaoq[1] = regs->iaoq[0] + 4;
7626+ regs->iasq[0] = regs->sr[2];
7627+ regs->iasq[1] = regs->sr[2];
7628+ return 2;
7629+ }
7630+ } while (0);
7631+#endif
7632+
7633+ return 1;
7634+}
7635+
7636+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7637+{
7638+ unsigned long i;
7639+
7640+ printk(KERN_ERR "PAX: bytes at PC: ");
7641+ for (i = 0; i < 5; i++) {
7642+ unsigned int c;
7643+ if (get_user(c, (unsigned int *)pc+i))
7644+ printk(KERN_CONT "???????? ");
7645+ else
7646+ printk(KERN_CONT "%08x ", c);
7647+ }
7648+ printk("\n");
7649+}
7650+#endif
7651+
7652 int fixup_exception(struct pt_regs *regs)
7653 {
7654 const struct exception_table_entry *fix;
7655@@ -234,8 +345,33 @@ retry:
7656
7657 good_area:
7658
7659- if ((vma->vm_flags & acc_type) != acc_type)
7660+ if ((vma->vm_flags & acc_type) != acc_type) {
7661+
7662+#ifdef CONFIG_PAX_PAGEEXEC
7663+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7664+ (address & ~3UL) == instruction_pointer(regs))
7665+ {
7666+ up_read(&mm->mmap_sem);
7667+ switch (pax_handle_fetch_fault(regs)) {
7668+
7669+#ifdef CONFIG_PAX_EMUPLT
7670+ case 3:
7671+ return;
7672+#endif
7673+
7674+#ifdef CONFIG_PAX_EMUTRAMP
7675+ case 2:
7676+ return;
7677+#endif
7678+
7679+ }
7680+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7681+ do_group_exit(SIGKILL);
7682+ }
7683+#endif
7684+
7685 goto bad_area;
7686+ }
7687
7688 /*
7689 * If for any reason at all we couldn't handle the fault, make
7690diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7691index a2a168e..e484682 100644
7692--- a/arch/powerpc/Kconfig
7693+++ b/arch/powerpc/Kconfig
7694@@ -408,6 +408,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7695 config KEXEC
7696 bool "kexec system call"
7697 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7698+ depends on !GRKERNSEC_KMEM
7699 help
7700 kexec is a system call that implements the ability to shutdown your
7701 current kernel, and to start another kernel. It is like a reboot
7702diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7703index 512d278..d31fadd 100644
7704--- a/arch/powerpc/include/asm/atomic.h
7705+++ b/arch/powerpc/include/asm/atomic.h
7706@@ -12,6 +12,11 @@
7707
7708 #define ATOMIC_INIT(i) { (i) }
7709
7710+#define _ASM_EXTABLE(from, to) \
7711+" .section __ex_table,\"a\"\n" \
7712+ PPC_LONG" " #from ", " #to"\n" \
7713+" .previous\n"
7714+
7715 static __inline__ int atomic_read(const atomic_t *v)
7716 {
7717 int t;
7718@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7719 return t;
7720 }
7721
7722+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7723+{
7724+ int t;
7725+
7726+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7727+
7728+ return t;
7729+}
7730+
7731 static __inline__ void atomic_set(atomic_t *v, int i)
7732 {
7733 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7734 }
7735
7736-#define ATOMIC_OP(op, asm_op) \
7737-static __inline__ void atomic_##op(int a, atomic_t *v) \
7738+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7739+{
7740+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7741+}
7742+
7743+#ifdef CONFIG_PAX_REFCOUNT
7744+#define __REFCOUNT_OP(op) op##o.
7745+#define __OVERFLOW_PRE \
7746+ " mcrxr cr0\n"
7747+#define __OVERFLOW_POST \
7748+ " bf 4*cr0+so, 3f\n" \
7749+ "2: .long 0x00c00b00\n" \
7750+ "3:\n"
7751+#define __OVERFLOW_EXTABLE \
7752+ "\n4:\n"
7753+ _ASM_EXTABLE(2b, 4b)
7754+#else
7755+#define __REFCOUNT_OP(op) op
7756+#define __OVERFLOW_PRE
7757+#define __OVERFLOW_POST
7758+#define __OVERFLOW_EXTABLE
7759+#endif
7760+
7761+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7762+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7763 { \
7764 int t; \
7765 \
7766 __asm__ __volatile__( \
7767-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7768+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7769+ pre_op \
7770 #asm_op " %0,%2,%0\n" \
7771+ post_op \
7772 PPC405_ERR77(0,%3) \
7773 " stwcx. %0,0,%3 \n" \
7774 " bne- 1b\n" \
7775+ extable \
7776 : "=&r" (t), "+m" (v->counter) \
7777 : "r" (a), "r" (&v->counter) \
7778 : "cc"); \
7779 } \
7780
7781-#define ATOMIC_OP_RETURN(op, asm_op) \
7782-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7783+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7784+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7785+
7786+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7787+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7788 { \
7789 int t; \
7790 \
7791 __asm__ __volatile__( \
7792 PPC_ATOMIC_ENTRY_BARRIER \
7793-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7794+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7795+ pre_op \
7796 #asm_op " %0,%1,%0\n" \
7797+ post_op \
7798 PPC405_ERR77(0,%2) \
7799 " stwcx. %0,0,%2 \n" \
7800 " bne- 1b\n" \
7801+ extable \
7802 PPC_ATOMIC_EXIT_BARRIER \
7803 : "=&r" (t) \
7804 : "r" (a), "r" (&v->counter) \
7805@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7806 return t; \
7807 }
7808
7809+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7810+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7811+
7812 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7813
7814 ATOMIC_OPS(add, add)
7815@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7816
7817 #undef ATOMIC_OPS
7818 #undef ATOMIC_OP_RETURN
7819+#undef __ATOMIC_OP_RETURN
7820 #undef ATOMIC_OP
7821+#undef __ATOMIC_OP
7822
7823 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7824
7825-static __inline__ void atomic_inc(atomic_t *v)
7826-{
7827- int t;
7828+/*
7829+ * atomic_inc - increment atomic variable
7830+ * @v: pointer of type atomic_t
7831+ *
7832+ * Automatically increments @v by 1
7833+ */
7834+#define atomic_inc(v) atomic_add(1, (v))
7835+#define atomic_inc_return(v) atomic_add_return(1, (v))
7836
7837- __asm__ __volatile__(
7838-"1: lwarx %0,0,%2 # atomic_inc\n\
7839- addic %0,%0,1\n"
7840- PPC405_ERR77(0,%2)
7841-" stwcx. %0,0,%2 \n\
7842- bne- 1b"
7843- : "=&r" (t), "+m" (v->counter)
7844- : "r" (&v->counter)
7845- : "cc", "xer");
7846+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7847+{
7848+ atomic_add_unchecked(1, v);
7849 }
7850
7851-static __inline__ int atomic_inc_return(atomic_t *v)
7852+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7853 {
7854- int t;
7855-
7856- __asm__ __volatile__(
7857- PPC_ATOMIC_ENTRY_BARRIER
7858-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7859- addic %0,%0,1\n"
7860- PPC405_ERR77(0,%1)
7861-" stwcx. %0,0,%1 \n\
7862- bne- 1b"
7863- PPC_ATOMIC_EXIT_BARRIER
7864- : "=&r" (t)
7865- : "r" (&v->counter)
7866- : "cc", "xer", "memory");
7867-
7868- return t;
7869+ return atomic_add_return_unchecked(1, v);
7870 }
7871
7872 /*
7873@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7874 */
7875 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7876
7877-static __inline__ void atomic_dec(atomic_t *v)
7878+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7879 {
7880- int t;
7881-
7882- __asm__ __volatile__(
7883-"1: lwarx %0,0,%2 # atomic_dec\n\
7884- addic %0,%0,-1\n"
7885- PPC405_ERR77(0,%2)\
7886-" stwcx. %0,0,%2\n\
7887- bne- 1b"
7888- : "=&r" (t), "+m" (v->counter)
7889- : "r" (&v->counter)
7890- : "cc", "xer");
7891+ return atomic_add_return_unchecked(1, v) == 0;
7892 }
7893
7894-static __inline__ int atomic_dec_return(atomic_t *v)
7895+/*
7896+ * atomic_dec - decrement atomic variable
7897+ * @v: pointer of type atomic_t
7898+ *
7899+ * Atomically decrements @v by 1
7900+ */
7901+#define atomic_dec(v) atomic_sub(1, (v))
7902+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7903+
7904+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7905 {
7906- int t;
7907-
7908- __asm__ __volatile__(
7909- PPC_ATOMIC_ENTRY_BARRIER
7910-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7911- addic %0,%0,-1\n"
7912- PPC405_ERR77(0,%1)
7913-" stwcx. %0,0,%1\n\
7914- bne- 1b"
7915- PPC_ATOMIC_EXIT_BARRIER
7916- : "=&r" (t)
7917- : "r" (&v->counter)
7918- : "cc", "xer", "memory");
7919-
7920- return t;
7921+ atomic_sub_unchecked(1, v);
7922 }
7923
7924 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7925 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7926
7927+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7928+{
7929+ return cmpxchg(&(v->counter), old, new);
7930+}
7931+
7932+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7933+{
7934+ return xchg(&(v->counter), new);
7935+}
7936+
7937 /**
7938 * __atomic_add_unless - add unless the number is a given value
7939 * @v: pointer of type atomic_t
7940@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7941 PPC_ATOMIC_ENTRY_BARRIER
7942 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7943 cmpw 0,%0,%3 \n\
7944- beq- 2f \n\
7945- add %0,%2,%0 \n"
7946+ beq- 2f \n"
7947+
7948+#ifdef CONFIG_PAX_REFCOUNT
7949+" mcrxr cr0\n"
7950+" addo. %0,%2,%0\n"
7951+" bf 4*cr0+so, 4f\n"
7952+"3:.long " "0x00c00b00""\n"
7953+"4:\n"
7954+#else
7955+ "add %0,%2,%0 \n"
7956+#endif
7957+
7958 PPC405_ERR77(0,%2)
7959 " stwcx. %0,0,%1 \n\
7960 bne- 1b \n"
7961+"5:"
7962+
7963+#ifdef CONFIG_PAX_REFCOUNT
7964+ _ASM_EXTABLE(3b, 5b)
7965+#endif
7966+
7967 PPC_ATOMIC_EXIT_BARRIER
7968 " subf %0,%2,%0 \n\
7969 2:"
7970@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
7971 }
7972 #define atomic_dec_if_positive atomic_dec_if_positive
7973
7974+#define smp_mb__before_atomic_dec() smp_mb()
7975+#define smp_mb__after_atomic_dec() smp_mb()
7976+#define smp_mb__before_atomic_inc() smp_mb()
7977+#define smp_mb__after_atomic_inc() smp_mb()
7978+
7979 #ifdef __powerpc64__
7980
7981 #define ATOMIC64_INIT(i) { (i) }
7982@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
7983 return t;
7984 }
7985
7986+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7987+{
7988+ long t;
7989+
7990+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7991+
7992+ return t;
7993+}
7994+
7995 static __inline__ void atomic64_set(atomic64_t *v, long i)
7996 {
7997 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7998 }
7999
8000-#define ATOMIC64_OP(op, asm_op) \
8001-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
8002+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8003+{
8004+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8005+}
8006+
8007+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
8008+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
8009 { \
8010 long t; \
8011 \
8012 __asm__ __volatile__( \
8013 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8014+ pre_op \
8015 #asm_op " %0,%2,%0\n" \
8016+ post_op \
8017 " stdcx. %0,0,%3 \n" \
8018 " bne- 1b\n" \
8019+ extable \
8020 : "=&r" (t), "+m" (v->counter) \
8021 : "r" (a), "r" (&v->counter) \
8022 : "cc"); \
8023 }
8024
8025-#define ATOMIC64_OP_RETURN(op, asm_op) \
8026-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8027+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8028+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8029+
8030+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8031+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8032 { \
8033 long t; \
8034 \
8035 __asm__ __volatile__( \
8036 PPC_ATOMIC_ENTRY_BARRIER \
8037 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8038+ pre_op \
8039 #asm_op " %0,%1,%0\n" \
8040+ post_op \
8041 " stdcx. %0,0,%2 \n" \
8042 " bne- 1b\n" \
8043+ extable \
8044 PPC_ATOMIC_EXIT_BARRIER \
8045 : "=&r" (t) \
8046 : "r" (a), "r" (&v->counter) \
8047@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8048 return t; \
8049 }
8050
8051+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8052+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8053+
8054 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8055
8056 ATOMIC64_OPS(add, add)
8057@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8058
8059 #undef ATOMIC64_OPS
8060 #undef ATOMIC64_OP_RETURN
8061+#undef __ATOMIC64_OP_RETURN
8062 #undef ATOMIC64_OP
8063+#undef __ATOMIC64_OP
8064+#undef __OVERFLOW_EXTABLE
8065+#undef __OVERFLOW_POST
8066+#undef __OVERFLOW_PRE
8067+#undef __REFCOUNT_OP
8068
8069 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8070
8071-static __inline__ void atomic64_inc(atomic64_t *v)
8072-{
8073- long t;
8074+/*
8075+ * atomic64_inc - increment atomic variable
8076+ * @v: pointer of type atomic64_t
8077+ *
8078+ * Automatically increments @v by 1
8079+ */
8080+#define atomic64_inc(v) atomic64_add(1, (v))
8081+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8082
8083- __asm__ __volatile__(
8084-"1: ldarx %0,0,%2 # atomic64_inc\n\
8085- addic %0,%0,1\n\
8086- stdcx. %0,0,%2 \n\
8087- bne- 1b"
8088- : "=&r" (t), "+m" (v->counter)
8089- : "r" (&v->counter)
8090- : "cc", "xer");
8091+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8092+{
8093+ atomic64_add_unchecked(1, v);
8094 }
8095
8096-static __inline__ long atomic64_inc_return(atomic64_t *v)
8097+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8098 {
8099- long t;
8100-
8101- __asm__ __volatile__(
8102- PPC_ATOMIC_ENTRY_BARRIER
8103-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8104- addic %0,%0,1\n\
8105- stdcx. %0,0,%1 \n\
8106- bne- 1b"
8107- PPC_ATOMIC_EXIT_BARRIER
8108- : "=&r" (t)
8109- : "r" (&v->counter)
8110- : "cc", "xer", "memory");
8111-
8112- return t;
8113+ return atomic64_add_return_unchecked(1, v);
8114 }
8115
8116 /*
8117@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8118 */
8119 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8120
8121-static __inline__ void atomic64_dec(atomic64_t *v)
8122+/*
8123+ * atomic64_dec - decrement atomic variable
8124+ * @v: pointer of type atomic64_t
8125+ *
8126+ * Atomically decrements @v by 1
8127+ */
8128+#define atomic64_dec(v) atomic64_sub(1, (v))
8129+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8130+
8131+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8132 {
8133- long t;
8134-
8135- __asm__ __volatile__(
8136-"1: ldarx %0,0,%2 # atomic64_dec\n\
8137- addic %0,%0,-1\n\
8138- stdcx. %0,0,%2\n\
8139- bne- 1b"
8140- : "=&r" (t), "+m" (v->counter)
8141- : "r" (&v->counter)
8142- : "cc", "xer");
8143-}
8144-
8145-static __inline__ long atomic64_dec_return(atomic64_t *v)
8146-{
8147- long t;
8148-
8149- __asm__ __volatile__(
8150- PPC_ATOMIC_ENTRY_BARRIER
8151-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8152- addic %0,%0,-1\n\
8153- stdcx. %0,0,%1\n\
8154- bne- 1b"
8155- PPC_ATOMIC_EXIT_BARRIER
8156- : "=&r" (t)
8157- : "r" (&v->counter)
8158- : "cc", "xer", "memory");
8159-
8160- return t;
8161+ atomic64_sub_unchecked(1, v);
8162 }
8163
8164 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8165@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8166 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8167 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8168
8169+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8170+{
8171+ return cmpxchg(&(v->counter), old, new);
8172+}
8173+
8174+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8175+{
8176+ return xchg(&(v->counter), new);
8177+}
8178+
8179 /**
8180 * atomic64_add_unless - add unless the number is a given value
8181 * @v: pointer of type atomic64_t
8182@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8183
8184 __asm__ __volatile__ (
8185 PPC_ATOMIC_ENTRY_BARRIER
8186-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8187+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8188 cmpd 0,%0,%3 \n\
8189- beq- 2f \n\
8190- add %0,%2,%0 \n"
8191+ beq- 2f \n"
8192+
8193+#ifdef CONFIG_PAX_REFCOUNT
8194+" mcrxr cr0\n"
8195+" addo. %0,%2,%0\n"
8196+" bf 4*cr0+so, 4f\n"
8197+"3:.long " "0x00c00b00""\n"
8198+"4:\n"
8199+#else
8200+ "add %0,%2,%0 \n"
8201+#endif
8202+
8203 " stdcx. %0,0,%1 \n\
8204 bne- 1b \n"
8205 PPC_ATOMIC_EXIT_BARRIER
8206+"5:"
8207+
8208+#ifdef CONFIG_PAX_REFCOUNT
8209+ _ASM_EXTABLE(3b, 5b)
8210+#endif
8211+
8212 " subf %0,%2,%0 \n\
8213 2:"
8214 : "=&r" (t)
8215diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8216index a3bf5be..e03ba81 100644
8217--- a/arch/powerpc/include/asm/barrier.h
8218+++ b/arch/powerpc/include/asm/barrier.h
8219@@ -76,7 +76,7 @@
8220 do { \
8221 compiletime_assert_atomic_type(*p); \
8222 smp_lwsync(); \
8223- ACCESS_ONCE(*p) = (v); \
8224+ ACCESS_ONCE_RW(*p) = (v); \
8225 } while (0)
8226
8227 #define smp_load_acquire(p) \
8228diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8229index 34a05a1..a1f2c67 100644
8230--- a/arch/powerpc/include/asm/cache.h
8231+++ b/arch/powerpc/include/asm/cache.h
8232@@ -4,6 +4,7 @@
8233 #ifdef __KERNEL__
8234
8235 #include <asm/reg.h>
8236+#include <linux/const.h>
8237
8238 /* bytes per L1 cache line */
8239 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8240@@ -23,7 +24,7 @@
8241 #define L1_CACHE_SHIFT 7
8242 #endif
8243
8244-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8245+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8246
8247 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8248
8249diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8250index 57d289a..b36c98c 100644
8251--- a/arch/powerpc/include/asm/elf.h
8252+++ b/arch/powerpc/include/asm/elf.h
8253@@ -30,6 +30,18 @@
8254
8255 #define ELF_ET_DYN_BASE 0x20000000
8256
8257+#ifdef CONFIG_PAX_ASLR
8258+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8259+
8260+#ifdef __powerpc64__
8261+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8262+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8263+#else
8264+#define PAX_DELTA_MMAP_LEN 15
8265+#define PAX_DELTA_STACK_LEN 15
8266+#endif
8267+#endif
8268+
8269 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8270
8271 /*
8272@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8273 (0x7ff >> (PAGE_SHIFT - 12)) : \
8274 (0x3ffff >> (PAGE_SHIFT - 12)))
8275
8276-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8277-#define arch_randomize_brk arch_randomize_brk
8278-
8279-
8280 #ifdef CONFIG_SPU_BASE
8281 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8282 #define NT_SPU 1
8283diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8284index 8196e9c..d83a9f3 100644
8285--- a/arch/powerpc/include/asm/exec.h
8286+++ b/arch/powerpc/include/asm/exec.h
8287@@ -4,6 +4,6 @@
8288 #ifndef _ASM_POWERPC_EXEC_H
8289 #define _ASM_POWERPC_EXEC_H
8290
8291-extern unsigned long arch_align_stack(unsigned long sp);
8292+#define arch_align_stack(x) ((x) & ~0xfUL)
8293
8294 #endif /* _ASM_POWERPC_EXEC_H */
8295diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8296index 5acabbd..7ea14fa 100644
8297--- a/arch/powerpc/include/asm/kmap_types.h
8298+++ b/arch/powerpc/include/asm/kmap_types.h
8299@@ -10,7 +10,7 @@
8300 * 2 of the License, or (at your option) any later version.
8301 */
8302
8303-#define KM_TYPE_NR 16
8304+#define KM_TYPE_NR 17
8305
8306 #endif /* __KERNEL__ */
8307 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8308diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8309index b8da913..c02b593 100644
8310--- a/arch/powerpc/include/asm/local.h
8311+++ b/arch/powerpc/include/asm/local.h
8312@@ -9,21 +9,65 @@ typedef struct
8313 atomic_long_t a;
8314 } local_t;
8315
8316+typedef struct
8317+{
8318+ atomic_long_unchecked_t a;
8319+} local_unchecked_t;
8320+
8321 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8322
8323 #define local_read(l) atomic_long_read(&(l)->a)
8324+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8325 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8326+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8327
8328 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8329+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8330 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8331+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8332 #define local_inc(l) atomic_long_inc(&(l)->a)
8333+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8334 #define local_dec(l) atomic_long_dec(&(l)->a)
8335+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8336
8337 static __inline__ long local_add_return(long a, local_t *l)
8338 {
8339 long t;
8340
8341 __asm__ __volatile__(
8342+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8343+
8344+#ifdef CONFIG_PAX_REFCOUNT
8345+" mcrxr cr0\n"
8346+" addo. %0,%1,%0\n"
8347+" bf 4*cr0+so, 3f\n"
8348+"2:.long " "0x00c00b00""\n"
8349+#else
8350+" add %0,%1,%0\n"
8351+#endif
8352+
8353+"3:\n"
8354+ PPC405_ERR77(0,%2)
8355+ PPC_STLCX "%0,0,%2 \n\
8356+ bne- 1b"
8357+
8358+#ifdef CONFIG_PAX_REFCOUNT
8359+"\n4:\n"
8360+ _ASM_EXTABLE(2b, 4b)
8361+#endif
8362+
8363+ : "=&r" (t)
8364+ : "r" (a), "r" (&(l->a.counter))
8365+ : "cc", "memory");
8366+
8367+ return t;
8368+}
8369+
8370+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8371+{
8372+ long t;
8373+
8374+ __asm__ __volatile__(
8375 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8376 add %0,%1,%0\n"
8377 PPC405_ERR77(0,%2)
8378@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8379
8380 #define local_cmpxchg(l, o, n) \
8381 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8382+#define local_cmpxchg_unchecked(l, o, n) \
8383+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8384 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8385
8386 /**
8387diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8388index 8565c25..2865190 100644
8389--- a/arch/powerpc/include/asm/mman.h
8390+++ b/arch/powerpc/include/asm/mman.h
8391@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8392 }
8393 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8394
8395-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8396+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8397 {
8398 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8399 }
8400diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8401index 69c0598..2c56964 100644
8402--- a/arch/powerpc/include/asm/page.h
8403+++ b/arch/powerpc/include/asm/page.h
8404@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8405 * and needs to be executable. This means the whole heap ends
8406 * up being executable.
8407 */
8408-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8409- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8410+#define VM_DATA_DEFAULT_FLAGS32 \
8411+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8412+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8413
8414 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8415 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8416@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8417 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8418 #endif
8419
8420+#define ktla_ktva(addr) (addr)
8421+#define ktva_ktla(addr) (addr)
8422+
8423 #ifndef CONFIG_PPC_BOOK3S_64
8424 /*
8425 * Use the top bit of the higher-level page table entries to indicate whether
8426diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8427index d908a46..3753f71 100644
8428--- a/arch/powerpc/include/asm/page_64.h
8429+++ b/arch/powerpc/include/asm/page_64.h
8430@@ -172,15 +172,18 @@ do { \
8431 * stack by default, so in the absence of a PT_GNU_STACK program header
8432 * we turn execute permission off.
8433 */
8434-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8435- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8436+#define VM_STACK_DEFAULT_FLAGS32 \
8437+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8438+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8439
8440 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8441 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8442
8443+#ifndef CONFIG_PAX_PAGEEXEC
8444 #define VM_STACK_DEFAULT_FLAGS \
8445 (is_32bit_task() ? \
8446 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8447+#endif
8448
8449 #include <asm-generic/getorder.h>
8450
8451diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8452index 4b0be20..c15a27d 100644
8453--- a/arch/powerpc/include/asm/pgalloc-64.h
8454+++ b/arch/powerpc/include/asm/pgalloc-64.h
8455@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8456 #ifndef CONFIG_PPC_64K_PAGES
8457
8458 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8459+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8460
8461 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8462 {
8463@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8464 pud_set(pud, (unsigned long)pmd);
8465 }
8466
8467+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8468+{
8469+ pud_populate(mm, pud, pmd);
8470+}
8471+
8472 #define pmd_populate(mm, pmd, pte_page) \
8473 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8474 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8475@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8476 #endif
8477
8478 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8479+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8480
8481 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8482 pte_t *pte)
8483diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8484index a8805fe..6d69617 100644
8485--- a/arch/powerpc/include/asm/pgtable.h
8486+++ b/arch/powerpc/include/asm/pgtable.h
8487@@ -2,6 +2,7 @@
8488 #define _ASM_POWERPC_PGTABLE_H
8489 #ifdef __KERNEL__
8490
8491+#include <linux/const.h>
8492 #ifndef __ASSEMBLY__
8493 #include <linux/mmdebug.h>
8494 #include <linux/mmzone.h>
8495diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8496index 4aad413..85d86bf 100644
8497--- a/arch/powerpc/include/asm/pte-hash32.h
8498+++ b/arch/powerpc/include/asm/pte-hash32.h
8499@@ -21,6 +21,7 @@
8500 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8501 #define _PAGE_USER 0x004 /* usermode access allowed */
8502 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8503+#define _PAGE_EXEC _PAGE_GUARDED
8504 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8505 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8506 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8507diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8508index 1c874fb..e8480a4 100644
8509--- a/arch/powerpc/include/asm/reg.h
8510+++ b/arch/powerpc/include/asm/reg.h
8511@@ -253,6 +253,7 @@
8512 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8513 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8514 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8515+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8516 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8517 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8518 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8519diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8520index 5a6614a..d89995d1 100644
8521--- a/arch/powerpc/include/asm/smp.h
8522+++ b/arch/powerpc/include/asm/smp.h
8523@@ -51,7 +51,7 @@ struct smp_ops_t {
8524 int (*cpu_disable)(void);
8525 void (*cpu_die)(unsigned int nr);
8526 int (*cpu_bootable)(unsigned int nr);
8527-};
8528+} __no_const;
8529
8530 extern void smp_send_debugger_break(void);
8531 extern void start_secondary_resume(void);
8532diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8533index 4dbe072..b803275 100644
8534--- a/arch/powerpc/include/asm/spinlock.h
8535+++ b/arch/powerpc/include/asm/spinlock.h
8536@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8537 __asm__ __volatile__(
8538 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8539 __DO_SIGN_EXTEND
8540-" addic. %0,%0,1\n\
8541- ble- 2f\n"
8542+
8543+#ifdef CONFIG_PAX_REFCOUNT
8544+" mcrxr cr0\n"
8545+" addico. %0,%0,1\n"
8546+" bf 4*cr0+so, 3f\n"
8547+"2:.long " "0x00c00b00""\n"
8548+#else
8549+" addic. %0,%0,1\n"
8550+#endif
8551+
8552+"3:\n"
8553+ "ble- 4f\n"
8554 PPC405_ERR77(0,%1)
8555 " stwcx. %0,0,%1\n\
8556 bne- 1b\n"
8557 PPC_ACQUIRE_BARRIER
8558-"2:" : "=&r" (tmp)
8559+"4:"
8560+
8561+#ifdef CONFIG_PAX_REFCOUNT
8562+ _ASM_EXTABLE(2b,4b)
8563+#endif
8564+
8565+ : "=&r" (tmp)
8566 : "r" (&rw->lock)
8567 : "cr0", "xer", "memory");
8568
8569@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8570 __asm__ __volatile__(
8571 "# read_unlock\n\t"
8572 PPC_RELEASE_BARRIER
8573-"1: lwarx %0,0,%1\n\
8574- addic %0,%0,-1\n"
8575+"1: lwarx %0,0,%1\n"
8576+
8577+#ifdef CONFIG_PAX_REFCOUNT
8578+" mcrxr cr0\n"
8579+" addico. %0,%0,-1\n"
8580+" bf 4*cr0+so, 3f\n"
8581+"2:.long " "0x00c00b00""\n"
8582+#else
8583+" addic. %0,%0,-1\n"
8584+#endif
8585+
8586+"3:\n"
8587 PPC405_ERR77(0,%1)
8588 " stwcx. %0,0,%1\n\
8589 bne- 1b"
8590+
8591+#ifdef CONFIG_PAX_REFCOUNT
8592+"\n4:\n"
8593+ _ASM_EXTABLE(2b, 4b)
8594+#endif
8595+
8596 : "=&r"(tmp)
8597 : "r"(&rw->lock)
8598 : "cr0", "xer", "memory");
8599diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8600index 0be6c68..9c3c6ee 100644
8601--- a/arch/powerpc/include/asm/thread_info.h
8602+++ b/arch/powerpc/include/asm/thread_info.h
8603@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
8604 #if defined(CONFIG_PPC64)
8605 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8606 #endif
8607+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8608+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8609
8610 /* as above, but as bit values */
8611 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8612@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
8613 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8614 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8615 #define _TIF_NOHZ (1<<TIF_NOHZ)
8616+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8617 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8618 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8619- _TIF_NOHZ)
8620+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8621
8622 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8623 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8624diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8625index a0c071d..49cdc7f 100644
8626--- a/arch/powerpc/include/asm/uaccess.h
8627+++ b/arch/powerpc/include/asm/uaccess.h
8628@@ -58,6 +58,7 @@
8629
8630 #endif
8631
8632+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8633 #define access_ok(type, addr, size) \
8634 (__chk_user_ptr(addr), \
8635 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8636@@ -318,52 +319,6 @@ do { \
8637 extern unsigned long __copy_tofrom_user(void __user *to,
8638 const void __user *from, unsigned long size);
8639
8640-#ifndef __powerpc64__
8641-
8642-static inline unsigned long copy_from_user(void *to,
8643- const void __user *from, unsigned long n)
8644-{
8645- unsigned long over;
8646-
8647- if (access_ok(VERIFY_READ, from, n))
8648- return __copy_tofrom_user((__force void __user *)to, from, n);
8649- if ((unsigned long)from < TASK_SIZE) {
8650- over = (unsigned long)from + n - TASK_SIZE;
8651- return __copy_tofrom_user((__force void __user *)to, from,
8652- n - over) + over;
8653- }
8654- return n;
8655-}
8656-
8657-static inline unsigned long copy_to_user(void __user *to,
8658- const void *from, unsigned long n)
8659-{
8660- unsigned long over;
8661-
8662- if (access_ok(VERIFY_WRITE, to, n))
8663- return __copy_tofrom_user(to, (__force void __user *)from, n);
8664- if ((unsigned long)to < TASK_SIZE) {
8665- over = (unsigned long)to + n - TASK_SIZE;
8666- return __copy_tofrom_user(to, (__force void __user *)from,
8667- n - over) + over;
8668- }
8669- return n;
8670-}
8671-
8672-#else /* __powerpc64__ */
8673-
8674-#define __copy_in_user(to, from, size) \
8675- __copy_tofrom_user((to), (from), (size))
8676-
8677-extern unsigned long copy_from_user(void *to, const void __user *from,
8678- unsigned long n);
8679-extern unsigned long copy_to_user(void __user *to, const void *from,
8680- unsigned long n);
8681-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8682- unsigned long n);
8683-
8684-#endif /* __powerpc64__ */
8685-
8686 static inline unsigned long __copy_from_user_inatomic(void *to,
8687 const void __user *from, unsigned long n)
8688 {
8689@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8690 if (ret == 0)
8691 return 0;
8692 }
8693+
8694+ if (!__builtin_constant_p(n))
8695+ check_object_size(to, n, false);
8696+
8697 return __copy_tofrom_user((__force void __user *)to, from, n);
8698 }
8699
8700@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8701 if (ret == 0)
8702 return 0;
8703 }
8704+
8705+ if (!__builtin_constant_p(n))
8706+ check_object_size(from, n, true);
8707+
8708 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8709 }
8710
8711@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8712 return __copy_to_user_inatomic(to, from, size);
8713 }
8714
8715+#ifndef __powerpc64__
8716+
8717+static inline unsigned long __must_check copy_from_user(void *to,
8718+ const void __user *from, unsigned long n)
8719+{
8720+ unsigned long over;
8721+
8722+ if ((long)n < 0)
8723+ return n;
8724+
8725+ if (access_ok(VERIFY_READ, from, n)) {
8726+ if (!__builtin_constant_p(n))
8727+ check_object_size(to, n, false);
8728+ return __copy_tofrom_user((__force void __user *)to, from, n);
8729+ }
8730+ if ((unsigned long)from < TASK_SIZE) {
8731+ over = (unsigned long)from + n - TASK_SIZE;
8732+ if (!__builtin_constant_p(n - over))
8733+ check_object_size(to, n - over, false);
8734+ return __copy_tofrom_user((__force void __user *)to, from,
8735+ n - over) + over;
8736+ }
8737+ return n;
8738+}
8739+
8740+static inline unsigned long __must_check copy_to_user(void __user *to,
8741+ const void *from, unsigned long n)
8742+{
8743+ unsigned long over;
8744+
8745+ if ((long)n < 0)
8746+ return n;
8747+
8748+ if (access_ok(VERIFY_WRITE, to, n)) {
8749+ if (!__builtin_constant_p(n))
8750+ check_object_size(from, n, true);
8751+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8752+ }
8753+ if ((unsigned long)to < TASK_SIZE) {
8754+ over = (unsigned long)to + n - TASK_SIZE;
8755+ if (!__builtin_constant_p(n))
8756+ check_object_size(from, n - over, true);
8757+ return __copy_tofrom_user(to, (__force void __user *)from,
8758+ n - over) + over;
8759+ }
8760+ return n;
8761+}
8762+
8763+#else /* __powerpc64__ */
8764+
8765+#define __copy_in_user(to, from, size) \
8766+ __copy_tofrom_user((to), (from), (size))
8767+
8768+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8769+{
8770+ if ((long)n < 0 || n > INT_MAX)
8771+ return n;
8772+
8773+ if (!__builtin_constant_p(n))
8774+ check_object_size(to, n, false);
8775+
8776+ if (likely(access_ok(VERIFY_READ, from, n)))
8777+ n = __copy_from_user(to, from, n);
8778+ else
8779+ memset(to, 0, n);
8780+ return n;
8781+}
8782+
8783+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8784+{
8785+ if ((long)n < 0 || n > INT_MAX)
8786+ return n;
8787+
8788+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8789+ if (!__builtin_constant_p(n))
8790+ check_object_size(from, n, true);
8791+ n = __copy_to_user(to, from, n);
8792+ }
8793+ return n;
8794+}
8795+
8796+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8797+ unsigned long n);
8798+
8799+#endif /* __powerpc64__ */
8800+
8801 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8802
8803 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8804diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8805index 502cf69..53936a1 100644
8806--- a/arch/powerpc/kernel/Makefile
8807+++ b/arch/powerpc/kernel/Makefile
8808@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8809 CFLAGS_btext.o += -fPIC
8810 endif
8811
8812+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8813+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8814+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8815+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8816+
8817 ifdef CONFIG_FUNCTION_TRACER
8818 # Do not trace early boot code
8819 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8820@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8821 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8822 endif
8823
8824+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8825+
8826 obj-y := cputable.o ptrace.o syscalls.o \
8827 irq.o align.o signal_32.o pmc.o vdso.o \
8828 process.o systbl.o idle.o \
8829diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8830index 3e68d1c..72a5ee6 100644
8831--- a/arch/powerpc/kernel/exceptions-64e.S
8832+++ b/arch/powerpc/kernel/exceptions-64e.S
8833@@ -1010,6 +1010,7 @@ storage_fault_common:
8834 std r14,_DAR(r1)
8835 std r15,_DSISR(r1)
8836 addi r3,r1,STACK_FRAME_OVERHEAD
8837+ bl save_nvgprs
8838 mr r4,r14
8839 mr r5,r15
8840 ld r14,PACA_EXGEN+EX_R14(r13)
8841@@ -1018,8 +1019,7 @@ storage_fault_common:
8842 cmpdi r3,0
8843 bne- 1f
8844 b ret_from_except_lite
8845-1: bl save_nvgprs
8846- mr r5,r3
8847+1: mr r5,r3
8848 addi r3,r1,STACK_FRAME_OVERHEAD
8849 ld r4,_DAR(r1)
8850 bl bad_page_fault
8851diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8852index c2df815..bae3d12 100644
8853--- a/arch/powerpc/kernel/exceptions-64s.S
8854+++ b/arch/powerpc/kernel/exceptions-64s.S
8855@@ -1599,10 +1599,10 @@ handle_page_fault:
8856 11: ld r4,_DAR(r1)
8857 ld r5,_DSISR(r1)
8858 addi r3,r1,STACK_FRAME_OVERHEAD
8859+ bl save_nvgprs
8860 bl do_page_fault
8861 cmpdi r3,0
8862 beq+ 12f
8863- bl save_nvgprs
8864 mr r5,r3
8865 addi r3,r1,STACK_FRAME_OVERHEAD
8866 lwz r4,_DAR(r1)
8867diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8868index 4509603..cdb491f 100644
8869--- a/arch/powerpc/kernel/irq.c
8870+++ b/arch/powerpc/kernel/irq.c
8871@@ -460,6 +460,8 @@ void migrate_irqs(void)
8872 }
8873 #endif
8874
8875+extern void gr_handle_kernel_exploit(void);
8876+
8877 static inline void check_stack_overflow(void)
8878 {
8879 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8880@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8881 pr_err("do_IRQ: stack overflow: %ld\n",
8882 sp - sizeof(struct thread_info));
8883 dump_stack();
8884+ gr_handle_kernel_exploit();
8885 }
8886 #endif
8887 }
8888diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8889index c94d2e0..992a9ce 100644
8890--- a/arch/powerpc/kernel/module_32.c
8891+++ b/arch/powerpc/kernel/module_32.c
8892@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8893 me->arch.core_plt_section = i;
8894 }
8895 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8896- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8897+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8898 return -ENOEXEC;
8899 }
8900
8901@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8902
8903 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8904 /* Init, or core PLT? */
8905- if (location >= mod->module_core
8906- && location < mod->module_core + mod->core_size)
8907+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8908+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8909 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8910- else
8911+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8912+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8913 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8914+ else {
8915+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8916+ return ~0UL;
8917+ }
8918
8919 /* Find this entry, or if that fails, the next avail. entry */
8920 while (entry->jump[0]) {
8921@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8922 }
8923 #ifdef CONFIG_DYNAMIC_FTRACE
8924 module->arch.tramp =
8925- do_plt_call(module->module_core,
8926+ do_plt_call(module->module_core_rx,
8927 (unsigned long)ftrace_caller,
8928 sechdrs, module);
8929 #endif
8930diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8931index b4cc7be..1fe8bb3 100644
8932--- a/arch/powerpc/kernel/process.c
8933+++ b/arch/powerpc/kernel/process.c
8934@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8935 * Lookup NIP late so we have the best change of getting the
8936 * above info out without failing
8937 */
8938- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8939- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8940+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8941+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8942 #endif
8943 show_stack(current, (unsigned long *) regs->gpr[1]);
8944 if (!user_mode(regs))
8945@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8946 newsp = stack[0];
8947 ip = stack[STACK_FRAME_LR_SAVE];
8948 if (!firstframe || ip != lr) {
8949- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8950+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8951 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8952 if ((ip == rth) && curr_frame >= 0) {
8953- printk(" (%pS)",
8954+ printk(" (%pA)",
8955 (void *)current->ret_stack[curr_frame].ret);
8956 curr_frame--;
8957 }
8958@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8959 struct pt_regs *regs = (struct pt_regs *)
8960 (sp + STACK_FRAME_OVERHEAD);
8961 lr = regs->link;
8962- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
8963+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
8964 regs->trap, (void *)regs->nip, (void *)lr);
8965 firstframe = 1;
8966 }
8967@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
8968 mtspr(SPRN_CTRLT, ctrl);
8969 }
8970 #endif /* CONFIG_PPC64 */
8971-
8972-unsigned long arch_align_stack(unsigned long sp)
8973-{
8974- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8975- sp -= get_random_int() & ~PAGE_MASK;
8976- return sp & ~0xf;
8977-}
8978-
8979-static inline unsigned long brk_rnd(void)
8980-{
8981- unsigned long rnd = 0;
8982-
8983- /* 8MB for 32bit, 1GB for 64bit */
8984- if (is_32bit_task())
8985- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8986- else
8987- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8988-
8989- return rnd << PAGE_SHIFT;
8990-}
8991-
8992-unsigned long arch_randomize_brk(struct mm_struct *mm)
8993-{
8994- unsigned long base = mm->brk;
8995- unsigned long ret;
8996-
8997-#ifdef CONFIG_PPC_STD_MMU_64
8998- /*
8999- * If we are using 1TB segments and we are allowed to randomise
9000- * the heap, we can put it above 1TB so it is backed by a 1TB
9001- * segment. Otherwise the heap will be in the bottom 1TB
9002- * which always uses 256MB segments and this may result in a
9003- * performance penalty.
9004- */
9005- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9006- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9007-#endif
9008-
9009- ret = PAGE_ALIGN(base + brk_rnd());
9010-
9011- if (ret < mm->brk)
9012- return mm->brk;
9013-
9014- return ret;
9015-}
9016-
9017diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9018index f21897b..28c0428 100644
9019--- a/arch/powerpc/kernel/ptrace.c
9020+++ b/arch/powerpc/kernel/ptrace.c
9021@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9022 return ret;
9023 }
9024
9025+#ifdef CONFIG_GRKERNSEC_SETXID
9026+extern void gr_delayed_cred_worker(void);
9027+#endif
9028+
9029 /*
9030 * We must return the syscall number to actually look up in the table.
9031 * This can be -1L to skip running any syscall at all.
9032@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9033
9034 secure_computing_strict(regs->gpr[0]);
9035
9036+#ifdef CONFIG_GRKERNSEC_SETXID
9037+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9038+ gr_delayed_cred_worker();
9039+#endif
9040+
9041 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9042 tracehook_report_syscall_entry(regs))
9043 /*
9044@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9045 {
9046 int step;
9047
9048+#ifdef CONFIG_GRKERNSEC_SETXID
9049+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9050+ gr_delayed_cred_worker();
9051+#endif
9052+
9053 audit_syscall_exit(regs);
9054
9055 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9056diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9057index b171001..4ac7ac5 100644
9058--- a/arch/powerpc/kernel/signal_32.c
9059+++ b/arch/powerpc/kernel/signal_32.c
9060@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9061 /* Save user registers on the stack */
9062 frame = &rt_sf->uc.uc_mcontext;
9063 addr = frame;
9064- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9065+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9066 sigret = 0;
9067 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9068 } else {
9069diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9070index 2cb0c94..c0c0bc9 100644
9071--- a/arch/powerpc/kernel/signal_64.c
9072+++ b/arch/powerpc/kernel/signal_64.c
9073@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9074 current->thread.fp_state.fpscr = 0;
9075
9076 /* Set up to return from userspace. */
9077- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9078+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9079 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9080 } else {
9081 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9082diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9083index e6595b7..24bde6e 100644
9084--- a/arch/powerpc/kernel/traps.c
9085+++ b/arch/powerpc/kernel/traps.c
9086@@ -36,6 +36,7 @@
9087 #include <linux/debugfs.h>
9088 #include <linux/ratelimit.h>
9089 #include <linux/context_tracking.h>
9090+#include <linux/uaccess.h>
9091
9092 #include <asm/emulated_ops.h>
9093 #include <asm/pgtable.h>
9094@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9095 return flags;
9096 }
9097
9098+extern void gr_handle_kernel_exploit(void);
9099+
9100 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9101 int signr)
9102 {
9103@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9104 panic("Fatal exception in interrupt");
9105 if (panic_on_oops)
9106 panic("Fatal exception");
9107+
9108+ gr_handle_kernel_exploit();
9109+
9110 do_exit(signr);
9111 }
9112
9113@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9114 enum ctx_state prev_state = exception_enter();
9115 unsigned int reason = get_reason(regs);
9116
9117+#ifdef CONFIG_PAX_REFCOUNT
9118+ unsigned int bkpt;
9119+ const struct exception_table_entry *entry;
9120+
9121+ if (reason & REASON_ILLEGAL) {
9122+ /* Check if PaX bad instruction */
9123+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9124+ current->thread.trap_nr = 0;
9125+ pax_report_refcount_overflow(regs);
9126+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9127+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9128+ regs->nip = entry->fixup;
9129+ return;
9130+ }
9131+ /* fixup_exception() could not handle */
9132+ goto bail;
9133+ }
9134+ }
9135+#endif
9136+
9137 /* We can now get here via a FP Unavailable exception if the core
9138 * has no FPU, in that case the reason flags will be 0 */
9139
9140diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9141index 305eb0d..accc5b40 100644
9142--- a/arch/powerpc/kernel/vdso.c
9143+++ b/arch/powerpc/kernel/vdso.c
9144@@ -34,6 +34,7 @@
9145 #include <asm/vdso.h>
9146 #include <asm/vdso_datapage.h>
9147 #include <asm/setup.h>
9148+#include <asm/mman.h>
9149
9150 #undef DEBUG
9151
9152@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9153 vdso_base = VDSO32_MBASE;
9154 #endif
9155
9156- current->mm->context.vdso_base = 0;
9157+ current->mm->context.vdso_base = ~0UL;
9158
9159 /* vDSO has a problem and was disabled, just don't "enable" it for the
9160 * process
9161@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9162 vdso_base = get_unmapped_area(NULL, vdso_base,
9163 (vdso_pages << PAGE_SHIFT) +
9164 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9165- 0, 0);
9166+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9167 if (IS_ERR_VALUE(vdso_base)) {
9168 rc = vdso_base;
9169 goto fail_mmapsem;
9170diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9171index c45eaab..5f41b57 100644
9172--- a/arch/powerpc/kvm/powerpc.c
9173+++ b/arch/powerpc/kvm/powerpc.c
9174@@ -1403,7 +1403,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9175 }
9176 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9177
9178-int kvm_arch_init(void *opaque)
9179+int kvm_arch_init(const void *opaque)
9180 {
9181 return 0;
9182 }
9183diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9184index 5eea6f3..5d10396 100644
9185--- a/arch/powerpc/lib/usercopy_64.c
9186+++ b/arch/powerpc/lib/usercopy_64.c
9187@@ -9,22 +9,6 @@
9188 #include <linux/module.h>
9189 #include <asm/uaccess.h>
9190
9191-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9192-{
9193- if (likely(access_ok(VERIFY_READ, from, n)))
9194- n = __copy_from_user(to, from, n);
9195- else
9196- memset(to, 0, n);
9197- return n;
9198-}
9199-
9200-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9201-{
9202- if (likely(access_ok(VERIFY_WRITE, to, n)))
9203- n = __copy_to_user(to, from, n);
9204- return n;
9205-}
9206-
9207 unsigned long copy_in_user(void __user *to, const void __user *from,
9208 unsigned long n)
9209 {
9210@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9211 return n;
9212 }
9213
9214-EXPORT_SYMBOL(copy_from_user);
9215-EXPORT_SYMBOL(copy_to_user);
9216 EXPORT_SYMBOL(copy_in_user);
9217
9218diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9219index 6154b0a..4de2b19 100644
9220--- a/arch/powerpc/mm/fault.c
9221+++ b/arch/powerpc/mm/fault.c
9222@@ -33,6 +33,10 @@
9223 #include <linux/ratelimit.h>
9224 #include <linux/context_tracking.h>
9225 #include <linux/hugetlb.h>
9226+#include <linux/slab.h>
9227+#include <linux/pagemap.h>
9228+#include <linux/compiler.h>
9229+#include <linux/unistd.h>
9230
9231 #include <asm/firmware.h>
9232 #include <asm/page.h>
9233@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9234 }
9235 #endif
9236
9237+#ifdef CONFIG_PAX_PAGEEXEC
9238+/*
9239+ * PaX: decide what to do with offenders (regs->nip = fault address)
9240+ *
9241+ * returns 1 when task should be killed
9242+ */
9243+static int pax_handle_fetch_fault(struct pt_regs *regs)
9244+{
9245+ return 1;
9246+}
9247+
9248+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9249+{
9250+ unsigned long i;
9251+
9252+ printk(KERN_ERR "PAX: bytes at PC: ");
9253+ for (i = 0; i < 5; i++) {
9254+ unsigned int c;
9255+ if (get_user(c, (unsigned int __user *)pc+i))
9256+ printk(KERN_CONT "???????? ");
9257+ else
9258+ printk(KERN_CONT "%08x ", c);
9259+ }
9260+ printk("\n");
9261+}
9262+#endif
9263+
9264 /*
9265 * Check whether the instruction at regs->nip is a store using
9266 * an update addressing form which will update r1.
9267@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9268 * indicate errors in DSISR but can validly be set in SRR1.
9269 */
9270 if (trap == 0x400)
9271- error_code &= 0x48200000;
9272+ error_code &= 0x58200000;
9273 else
9274 is_write = error_code & DSISR_ISSTORE;
9275 #else
9276@@ -383,7 +414,7 @@ good_area:
9277 * "undefined". Of those that can be set, this is the only
9278 * one which seems bad.
9279 */
9280- if (error_code & 0x10000000)
9281+ if (error_code & DSISR_GUARDED)
9282 /* Guarded storage error. */
9283 goto bad_area;
9284 #endif /* CONFIG_8xx */
9285@@ -398,7 +429,7 @@ good_area:
9286 * processors use the same I/D cache coherency mechanism
9287 * as embedded.
9288 */
9289- if (error_code & DSISR_PROTFAULT)
9290+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9291 goto bad_area;
9292 #endif /* CONFIG_PPC_STD_MMU */
9293
9294@@ -490,6 +521,23 @@ bad_area:
9295 bad_area_nosemaphore:
9296 /* User mode accesses cause a SIGSEGV */
9297 if (user_mode(regs)) {
9298+
9299+#ifdef CONFIG_PAX_PAGEEXEC
9300+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9301+#ifdef CONFIG_PPC_STD_MMU
9302+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9303+#else
9304+ if (is_exec && regs->nip == address) {
9305+#endif
9306+ switch (pax_handle_fetch_fault(regs)) {
9307+ }
9308+
9309+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9310+ do_group_exit(SIGKILL);
9311+ }
9312+ }
9313+#endif
9314+
9315 _exception(SIGSEGV, regs, code, address);
9316 goto bail;
9317 }
9318diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9319index cb8bdbe..cde4bc7 100644
9320--- a/arch/powerpc/mm/mmap.c
9321+++ b/arch/powerpc/mm/mmap.c
9322@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9323 return sysctl_legacy_va_layout;
9324 }
9325
9326-static unsigned long mmap_rnd(void)
9327+static unsigned long mmap_rnd(struct mm_struct *mm)
9328 {
9329 unsigned long rnd = 0;
9330
9331+#ifdef CONFIG_PAX_RANDMMAP
9332+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9333+#endif
9334+
9335 if (current->flags & PF_RANDOMIZE) {
9336 /* 8MB for 32bit, 1GB for 64bit */
9337 if (is_32bit_task())
9338@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9339 return rnd << PAGE_SHIFT;
9340 }
9341
9342-static inline unsigned long mmap_base(void)
9343+static inline unsigned long mmap_base(struct mm_struct *mm)
9344 {
9345 unsigned long gap = rlimit(RLIMIT_STACK);
9346
9347@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9348 else if (gap > MAX_GAP)
9349 gap = MAX_GAP;
9350
9351- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9352+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9353 }
9354
9355 /*
9356@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9357 */
9358 if (mmap_is_legacy()) {
9359 mm->mmap_base = TASK_UNMAPPED_BASE;
9360+
9361+#ifdef CONFIG_PAX_RANDMMAP
9362+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9363+ mm->mmap_base += mm->delta_mmap;
9364+#endif
9365+
9366 mm->get_unmapped_area = arch_get_unmapped_area;
9367 } else {
9368- mm->mmap_base = mmap_base();
9369+ mm->mmap_base = mmap_base(mm);
9370+
9371+#ifdef CONFIG_PAX_RANDMMAP
9372+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9373+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9374+#endif
9375+
9376 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9377 }
9378 }
9379diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9380index ded0ea1..f213a9b 100644
9381--- a/arch/powerpc/mm/slice.c
9382+++ b/arch/powerpc/mm/slice.c
9383@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9384 if ((mm->task_size - len) < addr)
9385 return 0;
9386 vma = find_vma(mm, addr);
9387- return (!vma || (addr + len) <= vma->vm_start);
9388+ return check_heap_stack_gap(vma, addr, len, 0);
9389 }
9390
9391 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9392@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9393 info.align_offset = 0;
9394
9395 addr = TASK_UNMAPPED_BASE;
9396+
9397+#ifdef CONFIG_PAX_RANDMMAP
9398+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9399+ addr += mm->delta_mmap;
9400+#endif
9401+
9402 while (addr < TASK_SIZE) {
9403 info.low_limit = addr;
9404 if (!slice_scan_available(addr, available, 1, &addr))
9405@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9406 if (fixed && addr > (mm->task_size - len))
9407 return -ENOMEM;
9408
9409+#ifdef CONFIG_PAX_RANDMMAP
9410+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9411+ addr = 0;
9412+#endif
9413+
9414 /* If hint, make sure it matches our alignment restrictions */
9415 if (!fixed && addr) {
9416 addr = _ALIGN_UP(addr, 1ul << pshift);
9417diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9418index f223875..94170e4 100644
9419--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9420+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9421@@ -399,8 +399,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
9422 }
9423
9424 static struct pci_ops scc_pciex_pci_ops = {
9425- scc_pciex_read_config,
9426- scc_pciex_write_config,
9427+ .read = scc_pciex_read_config,
9428+ .write = scc_pciex_write_config,
9429 };
9430
9431 static void pciex_clear_intr_all(unsigned int __iomem *base)
9432diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9433index d966bbe..372124a 100644
9434--- a/arch/powerpc/platforms/cell/spufs/file.c
9435+++ b/arch/powerpc/platforms/cell/spufs/file.c
9436@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9437 return VM_FAULT_NOPAGE;
9438 }
9439
9440-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9441+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9442 unsigned long address,
9443- void *buf, int len, int write)
9444+ void *buf, size_t len, int write)
9445 {
9446 struct spu_context *ctx = vma->vm_file->private_data;
9447 unsigned long offset = address - vma->vm_start;
9448diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9449index fa934fe..c296056 100644
9450--- a/arch/s390/include/asm/atomic.h
9451+++ b/arch/s390/include/asm/atomic.h
9452@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9453 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9454 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9455
9456+#define atomic64_read_unchecked(v) atomic64_read(v)
9457+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9458+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9459+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9460+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9461+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9462+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9463+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9464+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9465+
9466 #endif /* __ARCH_S390_ATOMIC__ */
9467diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9468index 8d72471..5322500 100644
9469--- a/arch/s390/include/asm/barrier.h
9470+++ b/arch/s390/include/asm/barrier.h
9471@@ -42,7 +42,7 @@
9472 do { \
9473 compiletime_assert_atomic_type(*p); \
9474 barrier(); \
9475- ACCESS_ONCE(*p) = (v); \
9476+ ACCESS_ONCE_RW(*p) = (v); \
9477 } while (0)
9478
9479 #define smp_load_acquire(p) \
9480diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9481index 4d7ccac..d03d0ad 100644
9482--- a/arch/s390/include/asm/cache.h
9483+++ b/arch/s390/include/asm/cache.h
9484@@ -9,8 +9,10 @@
9485 #ifndef __ARCH_S390_CACHE_H
9486 #define __ARCH_S390_CACHE_H
9487
9488-#define L1_CACHE_BYTES 256
9489+#include <linux/const.h>
9490+
9491 #define L1_CACHE_SHIFT 8
9492+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9493 #define NET_SKB_PAD 32
9494
9495 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9496diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9497index f6e43d3..5f57681 100644
9498--- a/arch/s390/include/asm/elf.h
9499+++ b/arch/s390/include/asm/elf.h
9500@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9501 the loader. We need to make sure that it is out of the way of the program
9502 that it will "exec", and that there is sufficient room for the brk. */
9503
9504-extern unsigned long randomize_et_dyn(unsigned long base);
9505-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9506+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9507+
9508+#ifdef CONFIG_PAX_ASLR
9509+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9510+
9511+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9512+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9513+#endif
9514
9515 /* This yields a mask that user programs can use to figure out what
9516 instruction set this CPU supports. */
9517@@ -223,9 +229,6 @@ struct linux_binprm;
9518 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9519 int arch_setup_additional_pages(struct linux_binprm *, int);
9520
9521-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9522-#define arch_randomize_brk arch_randomize_brk
9523-
9524 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9525
9526 #endif
9527diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9528index c4a93d6..4d2a9b4 100644
9529--- a/arch/s390/include/asm/exec.h
9530+++ b/arch/s390/include/asm/exec.h
9531@@ -7,6 +7,6 @@
9532 #ifndef __ASM_EXEC_H
9533 #define __ASM_EXEC_H
9534
9535-extern unsigned long arch_align_stack(unsigned long sp);
9536+#define arch_align_stack(x) ((x) & ~0xfUL)
9537
9538 #endif /* __ASM_EXEC_H */
9539diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9540index cd4c68e..6764641 100644
9541--- a/arch/s390/include/asm/uaccess.h
9542+++ b/arch/s390/include/asm/uaccess.h
9543@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9544 __range_ok((unsigned long)(addr), (size)); \
9545 })
9546
9547+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9548 #define access_ok(type, addr, size) __access_ok(addr, size)
9549
9550 /*
9551@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9552 copy_to_user(void __user *to, const void *from, unsigned long n)
9553 {
9554 might_fault();
9555+
9556+ if ((long)n < 0)
9557+ return n;
9558+
9559 return __copy_to_user(to, from, n);
9560 }
9561
9562@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9563 static inline unsigned long __must_check
9564 copy_from_user(void *to, const void __user *from, unsigned long n)
9565 {
9566- unsigned int sz = __compiletime_object_size(to);
9567+ size_t sz = __compiletime_object_size(to);
9568
9569 might_fault();
9570- if (unlikely(sz != -1 && sz < n)) {
9571+
9572+ if ((long)n < 0)
9573+ return n;
9574+
9575+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9576 copy_from_user_overflow();
9577 return n;
9578 }
9579diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9580index 409d152..d90d368 100644
9581--- a/arch/s390/kernel/module.c
9582+++ b/arch/s390/kernel/module.c
9583@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9584
9585 /* Increase core size by size of got & plt and set start
9586 offsets for got and plt. */
9587- me->core_size = ALIGN(me->core_size, 4);
9588- me->arch.got_offset = me->core_size;
9589- me->core_size += me->arch.got_size;
9590- me->arch.plt_offset = me->core_size;
9591- me->core_size += me->arch.plt_size;
9592+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9593+ me->arch.got_offset = me->core_size_rw;
9594+ me->core_size_rw += me->arch.got_size;
9595+ me->arch.plt_offset = me->core_size_rx;
9596+ me->core_size_rx += me->arch.plt_size;
9597 return 0;
9598 }
9599
9600@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9601 if (info->got_initialized == 0) {
9602 Elf_Addr *gotent;
9603
9604- gotent = me->module_core + me->arch.got_offset +
9605+ gotent = me->module_core_rw + me->arch.got_offset +
9606 info->got_offset;
9607 *gotent = val;
9608 info->got_initialized = 1;
9609@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9610 rc = apply_rela_bits(loc, val, 0, 64, 0);
9611 else if (r_type == R_390_GOTENT ||
9612 r_type == R_390_GOTPLTENT) {
9613- val += (Elf_Addr) me->module_core - loc;
9614+ val += (Elf_Addr) me->module_core_rw - loc;
9615 rc = apply_rela_bits(loc, val, 1, 32, 1);
9616 }
9617 break;
9618@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9619 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9620 if (info->plt_initialized == 0) {
9621 unsigned int *ip;
9622- ip = me->module_core + me->arch.plt_offset +
9623+ ip = me->module_core_rx + me->arch.plt_offset +
9624 info->plt_offset;
9625 #ifndef CONFIG_64BIT
9626 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9627@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9628 val - loc + 0xffffUL < 0x1ffffeUL) ||
9629 (r_type == R_390_PLT32DBL &&
9630 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9631- val = (Elf_Addr) me->module_core +
9632+ val = (Elf_Addr) me->module_core_rx +
9633 me->arch.plt_offset +
9634 info->plt_offset;
9635 val += rela->r_addend - loc;
9636@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9637 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9638 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9639 val = val + rela->r_addend -
9640- ((Elf_Addr) me->module_core + me->arch.got_offset);
9641+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9642 if (r_type == R_390_GOTOFF16)
9643 rc = apply_rela_bits(loc, val, 0, 16, 0);
9644 else if (r_type == R_390_GOTOFF32)
9645@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9646 break;
9647 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9648 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9649- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9650+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9651 rela->r_addend - loc;
9652 if (r_type == R_390_GOTPC)
9653 rc = apply_rela_bits(loc, val, 1, 32, 0);
9654diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9655index aa7a839..6c2a916 100644
9656--- a/arch/s390/kernel/process.c
9657+++ b/arch/s390/kernel/process.c
9658@@ -219,37 +219,3 @@ unsigned long get_wchan(struct task_struct *p)
9659 }
9660 return 0;
9661 }
9662-
9663-unsigned long arch_align_stack(unsigned long sp)
9664-{
9665- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9666- sp -= get_random_int() & ~PAGE_MASK;
9667- return sp & ~0xf;
9668-}
9669-
9670-static inline unsigned long brk_rnd(void)
9671-{
9672- /* 8MB for 32bit, 1GB for 64bit */
9673- if (is_32bit_task())
9674- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9675- else
9676- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9677-}
9678-
9679-unsigned long arch_randomize_brk(struct mm_struct *mm)
9680-{
9681- unsigned long ret;
9682-
9683- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9684- return (ret > mm->brk) ? ret : mm->brk;
9685-}
9686-
9687-unsigned long randomize_et_dyn(unsigned long base)
9688-{
9689- unsigned long ret;
9690-
9691- if (!(current->flags & PF_RANDOMIZE))
9692- return base;
9693- ret = PAGE_ALIGN(base + brk_rnd());
9694- return (ret > base) ? ret : base;
9695-}
9696diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9697index 9b436c2..54fbf0a 100644
9698--- a/arch/s390/mm/mmap.c
9699+++ b/arch/s390/mm/mmap.c
9700@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9701 */
9702 if (mmap_is_legacy()) {
9703 mm->mmap_base = mmap_base_legacy();
9704+
9705+#ifdef CONFIG_PAX_RANDMMAP
9706+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9707+ mm->mmap_base += mm->delta_mmap;
9708+#endif
9709+
9710 mm->get_unmapped_area = arch_get_unmapped_area;
9711 } else {
9712 mm->mmap_base = mmap_base();
9713+
9714+#ifdef CONFIG_PAX_RANDMMAP
9715+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9716+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9717+#endif
9718+
9719 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9720 }
9721 }
9722@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9723 */
9724 if (mmap_is_legacy()) {
9725 mm->mmap_base = mmap_base_legacy();
9726+
9727+#ifdef CONFIG_PAX_RANDMMAP
9728+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9729+ mm->mmap_base += mm->delta_mmap;
9730+#endif
9731+
9732 mm->get_unmapped_area = s390_get_unmapped_area;
9733 } else {
9734 mm->mmap_base = mmap_base();
9735+
9736+#ifdef CONFIG_PAX_RANDMMAP
9737+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9738+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9739+#endif
9740+
9741 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9742 }
9743 }
9744diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9745index ae3d59f..f65f075 100644
9746--- a/arch/score/include/asm/cache.h
9747+++ b/arch/score/include/asm/cache.h
9748@@ -1,7 +1,9 @@
9749 #ifndef _ASM_SCORE_CACHE_H
9750 #define _ASM_SCORE_CACHE_H
9751
9752+#include <linux/const.h>
9753+
9754 #define L1_CACHE_SHIFT 4
9755-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9756+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9757
9758 #endif /* _ASM_SCORE_CACHE_H */
9759diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9760index f9f3cd5..58ff438 100644
9761--- a/arch/score/include/asm/exec.h
9762+++ b/arch/score/include/asm/exec.h
9763@@ -1,6 +1,6 @@
9764 #ifndef _ASM_SCORE_EXEC_H
9765 #define _ASM_SCORE_EXEC_H
9766
9767-extern unsigned long arch_align_stack(unsigned long sp);
9768+#define arch_align_stack(x) (x)
9769
9770 #endif /* _ASM_SCORE_EXEC_H */
9771diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9772index a1519ad3..e8ac1ff 100644
9773--- a/arch/score/kernel/process.c
9774+++ b/arch/score/kernel/process.c
9775@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9776
9777 return task_pt_regs(task)->cp0_epc;
9778 }
9779-
9780-unsigned long arch_align_stack(unsigned long sp)
9781-{
9782- return sp;
9783-}
9784diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9785index ef9e555..331bd29 100644
9786--- a/arch/sh/include/asm/cache.h
9787+++ b/arch/sh/include/asm/cache.h
9788@@ -9,10 +9,11 @@
9789 #define __ASM_SH_CACHE_H
9790 #ifdef __KERNEL__
9791
9792+#include <linux/const.h>
9793 #include <linux/init.h>
9794 #include <cpu/cache.h>
9795
9796-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9797+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9798
9799 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9800
9801diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9802index 6777177..cb5e44f 100644
9803--- a/arch/sh/mm/mmap.c
9804+++ b/arch/sh/mm/mmap.c
9805@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9806 struct mm_struct *mm = current->mm;
9807 struct vm_area_struct *vma;
9808 int do_colour_align;
9809+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9810 struct vm_unmapped_area_info info;
9811
9812 if (flags & MAP_FIXED) {
9813@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9814 if (filp || (flags & MAP_SHARED))
9815 do_colour_align = 1;
9816
9817+#ifdef CONFIG_PAX_RANDMMAP
9818+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9819+#endif
9820+
9821 if (addr) {
9822 if (do_colour_align)
9823 addr = COLOUR_ALIGN(addr, pgoff);
9824@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9825 addr = PAGE_ALIGN(addr);
9826
9827 vma = find_vma(mm, addr);
9828- if (TASK_SIZE - len >= addr &&
9829- (!vma || addr + len <= vma->vm_start))
9830+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9831 return addr;
9832 }
9833
9834 info.flags = 0;
9835 info.length = len;
9836- info.low_limit = TASK_UNMAPPED_BASE;
9837+ info.low_limit = mm->mmap_base;
9838 info.high_limit = TASK_SIZE;
9839 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9840 info.align_offset = pgoff << PAGE_SHIFT;
9841@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9842 struct mm_struct *mm = current->mm;
9843 unsigned long addr = addr0;
9844 int do_colour_align;
9845+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9846 struct vm_unmapped_area_info info;
9847
9848 if (flags & MAP_FIXED) {
9849@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9850 if (filp || (flags & MAP_SHARED))
9851 do_colour_align = 1;
9852
9853+#ifdef CONFIG_PAX_RANDMMAP
9854+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9855+#endif
9856+
9857 /* requesting a specific address */
9858 if (addr) {
9859 if (do_colour_align)
9860@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9861 addr = PAGE_ALIGN(addr);
9862
9863 vma = find_vma(mm, addr);
9864- if (TASK_SIZE - len >= addr &&
9865- (!vma || addr + len <= vma->vm_start))
9866+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9867 return addr;
9868 }
9869
9870@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9871 VM_BUG_ON(addr != -ENOMEM);
9872 info.flags = 0;
9873 info.low_limit = TASK_UNMAPPED_BASE;
9874+
9875+#ifdef CONFIG_PAX_RANDMMAP
9876+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9877+ info.low_limit += mm->delta_mmap;
9878+#endif
9879+
9880 info.high_limit = TASK_SIZE;
9881 addr = vm_unmapped_area(&info);
9882 }
9883diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9884index 4082749..fd97781 100644
9885--- a/arch/sparc/include/asm/atomic_64.h
9886+++ b/arch/sparc/include/asm/atomic_64.h
9887@@ -15,18 +15,38 @@
9888 #define ATOMIC64_INIT(i) { (i) }
9889
9890 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9891+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9892+{
9893+ return ACCESS_ONCE(v->counter);
9894+}
9895 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9896+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9897+{
9898+ return ACCESS_ONCE(v->counter);
9899+}
9900
9901 #define atomic_set(v, i) (((v)->counter) = i)
9902+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9903+{
9904+ v->counter = i;
9905+}
9906 #define atomic64_set(v, i) (((v)->counter) = i)
9907+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9908+{
9909+ v->counter = i;
9910+}
9911
9912-#define ATOMIC_OP(op) \
9913-void atomic_##op(int, atomic_t *); \
9914-void atomic64_##op(long, atomic64_t *);
9915+#define __ATOMIC_OP(op, suffix) \
9916+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9917+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9918
9919-#define ATOMIC_OP_RETURN(op) \
9920-int atomic_##op##_return(int, atomic_t *); \
9921-long atomic64_##op##_return(long, atomic64_t *);
9922+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9923+
9924+#define __ATOMIC_OP_RETURN(op, suffix) \
9925+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9926+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9927+
9928+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9929
9930 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9931
9932@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9933
9934 #undef ATOMIC_OPS
9935 #undef ATOMIC_OP_RETURN
9936+#undef __ATOMIC_OP_RETURN
9937 #undef ATOMIC_OP
9938+#undef __ATOMIC_OP
9939
9940 #define atomic_dec_return(v) atomic_sub_return(1, v)
9941 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9942
9943 #define atomic_inc_return(v) atomic_add_return(1, v)
9944+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9945+{
9946+ return atomic_add_return_unchecked(1, v);
9947+}
9948 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9949+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9950+{
9951+ return atomic64_add_return_unchecked(1, v);
9952+}
9953
9954 /*
9955 * atomic_inc_and_test - increment and test
9956@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
9957 * other cases.
9958 */
9959 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9960+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9961+{
9962+ return atomic_inc_return_unchecked(v) == 0;
9963+}
9964 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9965
9966 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
9967@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
9968 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
9969
9970 #define atomic_inc(v) atomic_add(1, v)
9971+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9972+{
9973+ atomic_add_unchecked(1, v);
9974+}
9975 #define atomic64_inc(v) atomic64_add(1, v)
9976+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9977+{
9978+ atomic64_add_unchecked(1, v);
9979+}
9980
9981 #define atomic_dec(v) atomic_sub(1, v)
9982+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9983+{
9984+ atomic_sub_unchecked(1, v);
9985+}
9986 #define atomic64_dec(v) atomic64_sub(1, v)
9987+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9988+{
9989+ atomic64_sub_unchecked(1, v);
9990+}
9991
9992 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
9993 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
9994
9995 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9996+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9997+{
9998+ return cmpxchg(&v->counter, old, new);
9999+}
10000 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
10001+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10002+{
10003+ return xchg(&v->counter, new);
10004+}
10005
10006 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10007 {
10008- int c, old;
10009+ int c, old, new;
10010 c = atomic_read(v);
10011 for (;;) {
10012- if (unlikely(c == (u)))
10013+ if (unlikely(c == u))
10014 break;
10015- old = atomic_cmpxchg((v), c, c + (a));
10016+
10017+ asm volatile("addcc %2, %0, %0\n"
10018+
10019+#ifdef CONFIG_PAX_REFCOUNT
10020+ "tvs %%icc, 6\n"
10021+#endif
10022+
10023+ : "=r" (new)
10024+ : "0" (c), "ir" (a)
10025+ : "cc");
10026+
10027+ old = atomic_cmpxchg(v, c, new);
10028 if (likely(old == c))
10029 break;
10030 c = old;
10031@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10032 #define atomic64_cmpxchg(v, o, n) \
10033 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10034 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10035+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10036+{
10037+ return xchg(&v->counter, new);
10038+}
10039
10040 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10041 {
10042- long c, old;
10043+ long c, old, new;
10044 c = atomic64_read(v);
10045 for (;;) {
10046- if (unlikely(c == (u)))
10047+ if (unlikely(c == u))
10048 break;
10049- old = atomic64_cmpxchg((v), c, c + (a));
10050+
10051+ asm volatile("addcc %2, %0, %0\n"
10052+
10053+#ifdef CONFIG_PAX_REFCOUNT
10054+ "tvs %%xcc, 6\n"
10055+#endif
10056+
10057+ : "=r" (new)
10058+ : "0" (c), "ir" (a)
10059+ : "cc");
10060+
10061+ old = atomic64_cmpxchg(v, c, new);
10062 if (likely(old == c))
10063 break;
10064 c = old;
10065 }
10066- return c != (u);
10067+ return c != u;
10068 }
10069
10070 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10071diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10072index 7664894..45a974b 100644
10073--- a/arch/sparc/include/asm/barrier_64.h
10074+++ b/arch/sparc/include/asm/barrier_64.h
10075@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10076 do { \
10077 compiletime_assert_atomic_type(*p); \
10078 barrier(); \
10079- ACCESS_ONCE(*p) = (v); \
10080+ ACCESS_ONCE_RW(*p) = (v); \
10081 } while (0)
10082
10083 #define smp_load_acquire(p) \
10084diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10085index 5bb6991..5c2132e 100644
10086--- a/arch/sparc/include/asm/cache.h
10087+++ b/arch/sparc/include/asm/cache.h
10088@@ -7,10 +7,12 @@
10089 #ifndef _SPARC_CACHE_H
10090 #define _SPARC_CACHE_H
10091
10092+#include <linux/const.h>
10093+
10094 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10095
10096 #define L1_CACHE_SHIFT 5
10097-#define L1_CACHE_BYTES 32
10098+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10099
10100 #ifdef CONFIG_SPARC32
10101 #define SMP_CACHE_BYTES_SHIFT 5
10102diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10103index a24e41f..47677ff 100644
10104--- a/arch/sparc/include/asm/elf_32.h
10105+++ b/arch/sparc/include/asm/elf_32.h
10106@@ -114,6 +114,13 @@ typedef struct {
10107
10108 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10109
10110+#ifdef CONFIG_PAX_ASLR
10111+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10112+
10113+#define PAX_DELTA_MMAP_LEN 16
10114+#define PAX_DELTA_STACK_LEN 16
10115+#endif
10116+
10117 /* This yields a mask that user programs can use to figure out what
10118 instruction set this cpu supports. This can NOT be done in userspace
10119 on Sparc. */
10120diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10121index 370ca1e..d4f4a98 100644
10122--- a/arch/sparc/include/asm/elf_64.h
10123+++ b/arch/sparc/include/asm/elf_64.h
10124@@ -189,6 +189,13 @@ typedef struct {
10125 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10126 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10127
10128+#ifdef CONFIG_PAX_ASLR
10129+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10130+
10131+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10132+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10133+#endif
10134+
10135 extern unsigned long sparc64_elf_hwcap;
10136 #define ELF_HWCAP sparc64_elf_hwcap
10137
10138diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10139index a3890da..f6a408e 100644
10140--- a/arch/sparc/include/asm/pgalloc_32.h
10141+++ b/arch/sparc/include/asm/pgalloc_32.h
10142@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10143 }
10144
10145 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10146+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10147
10148 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10149 unsigned long address)
10150diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10151index 5e31871..13469c6 100644
10152--- a/arch/sparc/include/asm/pgalloc_64.h
10153+++ b/arch/sparc/include/asm/pgalloc_64.h
10154@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10155 }
10156
10157 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10158+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10159
10160 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10161 {
10162@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10163 }
10164
10165 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10166+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10167
10168 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10169 {
10170diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10171index 59ba6f6..4518128 100644
10172--- a/arch/sparc/include/asm/pgtable.h
10173+++ b/arch/sparc/include/asm/pgtable.h
10174@@ -5,4 +5,8 @@
10175 #else
10176 #include <asm/pgtable_32.h>
10177 #endif
10178+
10179+#define ktla_ktva(addr) (addr)
10180+#define ktva_ktla(addr) (addr)
10181+
10182 #endif
10183diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10184index b9b91ae..950b91e 100644
10185--- a/arch/sparc/include/asm/pgtable_32.h
10186+++ b/arch/sparc/include/asm/pgtable_32.h
10187@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10188 #define PAGE_SHARED SRMMU_PAGE_SHARED
10189 #define PAGE_COPY SRMMU_PAGE_COPY
10190 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10191+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10192+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10193+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10194 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10195
10196 /* Top-level page directory - dummy used by init-mm.
10197@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10198
10199 /* xwr */
10200 #define __P000 PAGE_NONE
10201-#define __P001 PAGE_READONLY
10202-#define __P010 PAGE_COPY
10203-#define __P011 PAGE_COPY
10204+#define __P001 PAGE_READONLY_NOEXEC
10205+#define __P010 PAGE_COPY_NOEXEC
10206+#define __P011 PAGE_COPY_NOEXEC
10207 #define __P100 PAGE_READONLY
10208 #define __P101 PAGE_READONLY
10209 #define __P110 PAGE_COPY
10210 #define __P111 PAGE_COPY
10211
10212 #define __S000 PAGE_NONE
10213-#define __S001 PAGE_READONLY
10214-#define __S010 PAGE_SHARED
10215-#define __S011 PAGE_SHARED
10216+#define __S001 PAGE_READONLY_NOEXEC
10217+#define __S010 PAGE_SHARED_NOEXEC
10218+#define __S011 PAGE_SHARED_NOEXEC
10219 #define __S100 PAGE_READONLY
10220 #define __S101 PAGE_READONLY
10221 #define __S110 PAGE_SHARED
10222diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10223index 79da178..c2eede8 100644
10224--- a/arch/sparc/include/asm/pgtsrmmu.h
10225+++ b/arch/sparc/include/asm/pgtsrmmu.h
10226@@ -115,6 +115,11 @@
10227 SRMMU_EXEC | SRMMU_REF)
10228 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10229 SRMMU_EXEC | SRMMU_REF)
10230+
10231+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10232+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10233+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10234+
10235 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10236 SRMMU_DIRTY | SRMMU_REF)
10237
10238diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10239index 29d64b1..4272fe8 100644
10240--- a/arch/sparc/include/asm/setup.h
10241+++ b/arch/sparc/include/asm/setup.h
10242@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10243 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10244
10245 /* init_64.c */
10246-extern atomic_t dcpage_flushes;
10247-extern atomic_t dcpage_flushes_xcall;
10248+extern atomic_unchecked_t dcpage_flushes;
10249+extern atomic_unchecked_t dcpage_flushes_xcall;
10250
10251 extern int sysctl_tsb_ratio;
10252 #endif
10253diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10254index 9689176..63c18ea 100644
10255--- a/arch/sparc/include/asm/spinlock_64.h
10256+++ b/arch/sparc/include/asm/spinlock_64.h
10257@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10258
10259 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10260
10261-static void inline arch_read_lock(arch_rwlock_t *lock)
10262+static inline void arch_read_lock(arch_rwlock_t *lock)
10263 {
10264 unsigned long tmp1, tmp2;
10265
10266 __asm__ __volatile__ (
10267 "1: ldsw [%2], %0\n"
10268 " brlz,pn %0, 2f\n"
10269-"4: add %0, 1, %1\n"
10270+"4: addcc %0, 1, %1\n"
10271+
10272+#ifdef CONFIG_PAX_REFCOUNT
10273+" tvs %%icc, 6\n"
10274+#endif
10275+
10276 " cas [%2], %0, %1\n"
10277 " cmp %0, %1\n"
10278 " bne,pn %%icc, 1b\n"
10279@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10280 " .previous"
10281 : "=&r" (tmp1), "=&r" (tmp2)
10282 : "r" (lock)
10283- : "memory");
10284+ : "memory", "cc");
10285 }
10286
10287-static int inline arch_read_trylock(arch_rwlock_t *lock)
10288+static inline int arch_read_trylock(arch_rwlock_t *lock)
10289 {
10290 int tmp1, tmp2;
10291
10292@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10293 "1: ldsw [%2], %0\n"
10294 " brlz,a,pn %0, 2f\n"
10295 " mov 0, %0\n"
10296-" add %0, 1, %1\n"
10297+" addcc %0, 1, %1\n"
10298+
10299+#ifdef CONFIG_PAX_REFCOUNT
10300+" tvs %%icc, 6\n"
10301+#endif
10302+
10303 " cas [%2], %0, %1\n"
10304 " cmp %0, %1\n"
10305 " bne,pn %%icc, 1b\n"
10306@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10307 return tmp1;
10308 }
10309
10310-static void inline arch_read_unlock(arch_rwlock_t *lock)
10311+static inline void arch_read_unlock(arch_rwlock_t *lock)
10312 {
10313 unsigned long tmp1, tmp2;
10314
10315 __asm__ __volatile__(
10316 "1: lduw [%2], %0\n"
10317-" sub %0, 1, %1\n"
10318+" subcc %0, 1, %1\n"
10319+
10320+#ifdef CONFIG_PAX_REFCOUNT
10321+" tvs %%icc, 6\n"
10322+#endif
10323+
10324 " cas [%2], %0, %1\n"
10325 " cmp %0, %1\n"
10326 " bne,pn %%xcc, 1b\n"
10327@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10328 : "memory");
10329 }
10330
10331-static void inline arch_write_lock(arch_rwlock_t *lock)
10332+static inline void arch_write_lock(arch_rwlock_t *lock)
10333 {
10334 unsigned long mask, tmp1, tmp2;
10335
10336@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10337 : "memory");
10338 }
10339
10340-static void inline arch_write_unlock(arch_rwlock_t *lock)
10341+static inline void arch_write_unlock(arch_rwlock_t *lock)
10342 {
10343 __asm__ __volatile__(
10344 " stw %%g0, [%0]"
10345@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10346 : "memory");
10347 }
10348
10349-static int inline arch_write_trylock(arch_rwlock_t *lock)
10350+static inline int arch_write_trylock(arch_rwlock_t *lock)
10351 {
10352 unsigned long mask, tmp1, tmp2, result;
10353
10354diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10355index 025c984..a216504 100644
10356--- a/arch/sparc/include/asm/thread_info_32.h
10357+++ b/arch/sparc/include/asm/thread_info_32.h
10358@@ -49,6 +49,8 @@ struct thread_info {
10359 unsigned long w_saved;
10360
10361 struct restart_block restart_block;
10362+
10363+ unsigned long lowest_stack;
10364 };
10365
10366 /*
10367diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10368index 798f027..b009941 100644
10369--- a/arch/sparc/include/asm/thread_info_64.h
10370+++ b/arch/sparc/include/asm/thread_info_64.h
10371@@ -63,6 +63,8 @@ struct thread_info {
10372 struct pt_regs *kern_una_regs;
10373 unsigned int kern_una_insn;
10374
10375+ unsigned long lowest_stack;
10376+
10377 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10378 __attribute__ ((aligned(64)));
10379 };
10380@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10381 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10382 /* flag bit 4 is available */
10383 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10384-/* flag bit 6 is available */
10385+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10386 #define TIF_32BIT 7 /* 32-bit binary */
10387 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10388 #define TIF_SECCOMP 9 /* secure computing */
10389 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10390 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10391+
10392 /* NOTE: Thread flags >= 12 should be ones we have no interest
10393 * in using in assembly, else we can't use the mask as
10394 * an immediate value in instructions such as andcc.
10395@@ -215,12 +218,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10396 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10397 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10398 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10399+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10400
10401 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10402 _TIF_DO_NOTIFY_RESUME_MASK | \
10403 _TIF_NEED_RESCHED)
10404 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10405
10406+#define _TIF_WORK_SYSCALL \
10407+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10408+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10409+
10410 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10411
10412 /*
10413diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10414index bd56c28..4b63d83 100644
10415--- a/arch/sparc/include/asm/uaccess.h
10416+++ b/arch/sparc/include/asm/uaccess.h
10417@@ -1,5 +1,6 @@
10418 #ifndef ___ASM_SPARC_UACCESS_H
10419 #define ___ASM_SPARC_UACCESS_H
10420+
10421 #if defined(__sparc__) && defined(__arch64__)
10422 #include <asm/uaccess_64.h>
10423 #else
10424diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10425index 9634d08..f55fe4f 100644
10426--- a/arch/sparc/include/asm/uaccess_32.h
10427+++ b/arch/sparc/include/asm/uaccess_32.h
10428@@ -250,27 +250,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10429
10430 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10431 {
10432- if (n && __access_ok((unsigned long) to, n))
10433+ if ((long)n < 0)
10434+ return n;
10435+
10436+ if (n && __access_ok((unsigned long) to, n)) {
10437+ if (!__builtin_constant_p(n))
10438+ check_object_size(from, n, true);
10439 return __copy_user(to, (__force void __user *) from, n);
10440- else
10441+ } else
10442 return n;
10443 }
10444
10445 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10446 {
10447+ if ((long)n < 0)
10448+ return n;
10449+
10450+ if (!__builtin_constant_p(n))
10451+ check_object_size(from, n, true);
10452+
10453 return __copy_user(to, (__force void __user *) from, n);
10454 }
10455
10456 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10457 {
10458- if (n && __access_ok((unsigned long) from, n))
10459+ if ((long)n < 0)
10460+ return n;
10461+
10462+ if (n && __access_ok((unsigned long) from, n)) {
10463+ if (!__builtin_constant_p(n))
10464+ check_object_size(to, n, false);
10465 return __copy_user((__force void __user *) to, from, n);
10466- else
10467+ } else
10468 return n;
10469 }
10470
10471 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10472 {
10473+ if ((long)n < 0)
10474+ return n;
10475+
10476 return __copy_user((__force void __user *) to, from, n);
10477 }
10478
10479diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10480index c990a5e..f17b9c1 100644
10481--- a/arch/sparc/include/asm/uaccess_64.h
10482+++ b/arch/sparc/include/asm/uaccess_64.h
10483@@ -10,6 +10,7 @@
10484 #include <linux/compiler.h>
10485 #include <linux/string.h>
10486 #include <linux/thread_info.h>
10487+#include <linux/kernel.h>
10488 #include <asm/asi.h>
10489 #include <asm/spitfire.h>
10490 #include <asm-generic/uaccess-unaligned.h>
10491@@ -214,8 +215,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10492 static inline unsigned long __must_check
10493 copy_from_user(void *to, const void __user *from, unsigned long size)
10494 {
10495- unsigned long ret = ___copy_from_user(to, from, size);
10496+ unsigned long ret;
10497
10498+ if ((long)size < 0 || size > INT_MAX)
10499+ return size;
10500+
10501+ if (!__builtin_constant_p(size))
10502+ check_object_size(to, size, false);
10503+
10504+ ret = ___copy_from_user(to, from, size);
10505 if (unlikely(ret))
10506 ret = copy_from_user_fixup(to, from, size);
10507
10508@@ -231,8 +239,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10509 static inline unsigned long __must_check
10510 copy_to_user(void __user *to, const void *from, unsigned long size)
10511 {
10512- unsigned long ret = ___copy_to_user(to, from, size);
10513+ unsigned long ret;
10514
10515+ if ((long)size < 0 || size > INT_MAX)
10516+ return size;
10517+
10518+ if (!__builtin_constant_p(size))
10519+ check_object_size(from, size, true);
10520+
10521+ ret = ___copy_to_user(to, from, size);
10522 if (unlikely(ret))
10523 ret = copy_to_user_fixup(to, from, size);
10524 return ret;
10525diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10526index 7cf9c6e..6206648 100644
10527--- a/arch/sparc/kernel/Makefile
10528+++ b/arch/sparc/kernel/Makefile
10529@@ -4,7 +4,7 @@
10530 #
10531
10532 asflags-y := -ansi
10533-ccflags-y := -Werror
10534+#ccflags-y := -Werror
10535
10536 extra-y := head_$(BITS).o
10537
10538diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10539index 50e7b62..79fae35 100644
10540--- a/arch/sparc/kernel/process_32.c
10541+++ b/arch/sparc/kernel/process_32.c
10542@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10543
10544 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10545 r->psr, r->pc, r->npc, r->y, print_tainted());
10546- printk("PC: <%pS>\n", (void *) r->pc);
10547+ printk("PC: <%pA>\n", (void *) r->pc);
10548 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10549 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10550 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10551 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10552 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10553 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10554- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10555+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10556
10557 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10558 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10559@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10560 rw = (struct reg_window32 *) fp;
10561 pc = rw->ins[7];
10562 printk("[%08lx : ", pc);
10563- printk("%pS ] ", (void *) pc);
10564+ printk("%pA ] ", (void *) pc);
10565 fp = rw->ins[6];
10566 } while (++count < 16);
10567 printk("\n");
10568diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10569index 46a5964..a35c62c 100644
10570--- a/arch/sparc/kernel/process_64.c
10571+++ b/arch/sparc/kernel/process_64.c
10572@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10573 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10574 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10575 if (regs->tstate & TSTATE_PRIV)
10576- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10577+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10578 }
10579
10580 void show_regs(struct pt_regs *regs)
10581@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10582
10583 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10584 regs->tpc, regs->tnpc, regs->y, print_tainted());
10585- printk("TPC: <%pS>\n", (void *) regs->tpc);
10586+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10587 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10588 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10589 regs->u_regs[3]);
10590@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10591 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10592 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10593 regs->u_regs[15]);
10594- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10595+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10596 show_regwindow(regs);
10597 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10598 }
10599@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10600 ((tp && tp->task) ? tp->task->pid : -1));
10601
10602 if (gp->tstate & TSTATE_PRIV) {
10603- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10604+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10605 (void *) gp->tpc,
10606 (void *) gp->o7,
10607 (void *) gp->i7,
10608diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10609index 79cc0d1..ec62734 100644
10610--- a/arch/sparc/kernel/prom_common.c
10611+++ b/arch/sparc/kernel/prom_common.c
10612@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10613
10614 unsigned int prom_early_allocated __initdata;
10615
10616-static struct of_pdt_ops prom_sparc_ops __initdata = {
10617+static struct of_pdt_ops prom_sparc_ops __initconst = {
10618 .nextprop = prom_common_nextprop,
10619 .getproplen = prom_getproplen,
10620 .getproperty = prom_getproperty,
10621diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10622index 9ddc492..27a5619 100644
10623--- a/arch/sparc/kernel/ptrace_64.c
10624+++ b/arch/sparc/kernel/ptrace_64.c
10625@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10626 return ret;
10627 }
10628
10629+#ifdef CONFIG_GRKERNSEC_SETXID
10630+extern void gr_delayed_cred_worker(void);
10631+#endif
10632+
10633 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10634 {
10635 int ret = 0;
10636@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10637 if (test_thread_flag(TIF_NOHZ))
10638 user_exit();
10639
10640+#ifdef CONFIG_GRKERNSEC_SETXID
10641+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10642+ gr_delayed_cred_worker();
10643+#endif
10644+
10645 if (test_thread_flag(TIF_SYSCALL_TRACE))
10646 ret = tracehook_report_syscall_entry(regs);
10647
10648@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10649 if (test_thread_flag(TIF_NOHZ))
10650 user_exit();
10651
10652+#ifdef CONFIG_GRKERNSEC_SETXID
10653+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10654+ gr_delayed_cred_worker();
10655+#endif
10656+
10657 audit_syscall_exit(regs);
10658
10659 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10660diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10661index da6f1a7..e5dea8f 100644
10662--- a/arch/sparc/kernel/smp_64.c
10663+++ b/arch/sparc/kernel/smp_64.c
10664@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10665 return;
10666
10667 #ifdef CONFIG_DEBUG_DCFLUSH
10668- atomic_inc(&dcpage_flushes);
10669+ atomic_inc_unchecked(&dcpage_flushes);
10670 #endif
10671
10672 this_cpu = get_cpu();
10673@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10674 xcall_deliver(data0, __pa(pg_addr),
10675 (u64) pg_addr, cpumask_of(cpu));
10676 #ifdef CONFIG_DEBUG_DCFLUSH
10677- atomic_inc(&dcpage_flushes_xcall);
10678+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10679 #endif
10680 }
10681 }
10682@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10683 preempt_disable();
10684
10685 #ifdef CONFIG_DEBUG_DCFLUSH
10686- atomic_inc(&dcpage_flushes);
10687+ atomic_inc_unchecked(&dcpage_flushes);
10688 #endif
10689 data0 = 0;
10690 pg_addr = page_address(page);
10691@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10692 xcall_deliver(data0, __pa(pg_addr),
10693 (u64) pg_addr, cpu_online_mask);
10694 #ifdef CONFIG_DEBUG_DCFLUSH
10695- atomic_inc(&dcpage_flushes_xcall);
10696+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10697 #endif
10698 }
10699 __local_flush_dcache_page(page);
10700diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10701index 646988d..b88905f 100644
10702--- a/arch/sparc/kernel/sys_sparc_32.c
10703+++ b/arch/sparc/kernel/sys_sparc_32.c
10704@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10705 if (len > TASK_SIZE - PAGE_SIZE)
10706 return -ENOMEM;
10707 if (!addr)
10708- addr = TASK_UNMAPPED_BASE;
10709+ addr = current->mm->mmap_base;
10710
10711 info.flags = 0;
10712 info.length = len;
10713diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10714index 30e7ddb..266a3b0 100644
10715--- a/arch/sparc/kernel/sys_sparc_64.c
10716+++ b/arch/sparc/kernel/sys_sparc_64.c
10717@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10718 struct vm_area_struct * vma;
10719 unsigned long task_size = TASK_SIZE;
10720 int do_color_align;
10721+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10722 struct vm_unmapped_area_info info;
10723
10724 if (flags & MAP_FIXED) {
10725 /* We do not accept a shared mapping if it would violate
10726 * cache aliasing constraints.
10727 */
10728- if ((flags & MAP_SHARED) &&
10729+ if ((filp || (flags & MAP_SHARED)) &&
10730 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10731 return -EINVAL;
10732 return addr;
10733@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10734 if (filp || (flags & MAP_SHARED))
10735 do_color_align = 1;
10736
10737+#ifdef CONFIG_PAX_RANDMMAP
10738+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10739+#endif
10740+
10741 if (addr) {
10742 if (do_color_align)
10743 addr = COLOR_ALIGN(addr, pgoff);
10744@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10745 addr = PAGE_ALIGN(addr);
10746
10747 vma = find_vma(mm, addr);
10748- if (task_size - len >= addr &&
10749- (!vma || addr + len <= vma->vm_start))
10750+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10751 return addr;
10752 }
10753
10754 info.flags = 0;
10755 info.length = len;
10756- info.low_limit = TASK_UNMAPPED_BASE;
10757+ info.low_limit = mm->mmap_base;
10758 info.high_limit = min(task_size, VA_EXCLUDE_START);
10759 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10760 info.align_offset = pgoff << PAGE_SHIFT;
10761+ info.threadstack_offset = offset;
10762 addr = vm_unmapped_area(&info);
10763
10764 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10765 VM_BUG_ON(addr != -ENOMEM);
10766 info.low_limit = VA_EXCLUDE_END;
10767+
10768+#ifdef CONFIG_PAX_RANDMMAP
10769+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10770+ info.low_limit += mm->delta_mmap;
10771+#endif
10772+
10773 info.high_limit = task_size;
10774 addr = vm_unmapped_area(&info);
10775 }
10776@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10777 unsigned long task_size = STACK_TOP32;
10778 unsigned long addr = addr0;
10779 int do_color_align;
10780+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10781 struct vm_unmapped_area_info info;
10782
10783 /* This should only ever run for 32-bit processes. */
10784@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10785 /* We do not accept a shared mapping if it would violate
10786 * cache aliasing constraints.
10787 */
10788- if ((flags & MAP_SHARED) &&
10789+ if ((filp || (flags & MAP_SHARED)) &&
10790 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10791 return -EINVAL;
10792 return addr;
10793@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10794 if (filp || (flags & MAP_SHARED))
10795 do_color_align = 1;
10796
10797+#ifdef CONFIG_PAX_RANDMMAP
10798+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10799+#endif
10800+
10801 /* requesting a specific address */
10802 if (addr) {
10803 if (do_color_align)
10804@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10805 addr = PAGE_ALIGN(addr);
10806
10807 vma = find_vma(mm, addr);
10808- if (task_size - len >= addr &&
10809- (!vma || addr + len <= vma->vm_start))
10810+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10811 return addr;
10812 }
10813
10814@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10815 info.high_limit = mm->mmap_base;
10816 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10817 info.align_offset = pgoff << PAGE_SHIFT;
10818+ info.threadstack_offset = offset;
10819 addr = vm_unmapped_area(&info);
10820
10821 /*
10822@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10823 VM_BUG_ON(addr != -ENOMEM);
10824 info.flags = 0;
10825 info.low_limit = TASK_UNMAPPED_BASE;
10826+
10827+#ifdef CONFIG_PAX_RANDMMAP
10828+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10829+ info.low_limit += mm->delta_mmap;
10830+#endif
10831+
10832 info.high_limit = STACK_TOP32;
10833 addr = vm_unmapped_area(&info);
10834 }
10835@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10836 EXPORT_SYMBOL(get_fb_unmapped_area);
10837
10838 /* Essentially the same as PowerPC. */
10839-static unsigned long mmap_rnd(void)
10840+static unsigned long mmap_rnd(struct mm_struct *mm)
10841 {
10842 unsigned long rnd = 0UL;
10843
10844+#ifdef CONFIG_PAX_RANDMMAP
10845+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10846+#endif
10847+
10848 if (current->flags & PF_RANDOMIZE) {
10849 unsigned long val = get_random_int();
10850 if (test_thread_flag(TIF_32BIT))
10851@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10852
10853 void arch_pick_mmap_layout(struct mm_struct *mm)
10854 {
10855- unsigned long random_factor = mmap_rnd();
10856+ unsigned long random_factor = mmap_rnd(mm);
10857 unsigned long gap;
10858
10859 /*
10860@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10861 gap == RLIM_INFINITY ||
10862 sysctl_legacy_va_layout) {
10863 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10864+
10865+#ifdef CONFIG_PAX_RANDMMAP
10866+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10867+ mm->mmap_base += mm->delta_mmap;
10868+#endif
10869+
10870 mm->get_unmapped_area = arch_get_unmapped_area;
10871 } else {
10872 /* We know it's 32-bit */
10873@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10874 gap = (task_size / 6 * 5);
10875
10876 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10877+
10878+#ifdef CONFIG_PAX_RANDMMAP
10879+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10880+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10881+#endif
10882+
10883 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10884 }
10885 }
10886diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10887index bb00089..e0ea580 100644
10888--- a/arch/sparc/kernel/syscalls.S
10889+++ b/arch/sparc/kernel/syscalls.S
10890@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10891 #endif
10892 .align 32
10893 1: ldx [%g6 + TI_FLAGS], %l5
10894- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10895+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10896 be,pt %icc, rtrap
10897 nop
10898 call syscall_trace_leave
10899@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10900
10901 srl %i3, 0, %o3 ! IEU0
10902 srl %i2, 0, %o2 ! IEU0 Group
10903- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10904+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10905 bne,pn %icc, linux_syscall_trace32 ! CTI
10906 mov %i0, %l5 ! IEU1
10907 5: call %l7 ! CTI Group brk forced
10908@@ -218,7 +218,7 @@ linux_sparc_syscall:
10909
10910 mov %i3, %o3 ! IEU1
10911 mov %i4, %o4 ! IEU0 Group
10912- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10913+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10914 bne,pn %icc, linux_syscall_trace ! CTI Group
10915 mov %i0, %l5 ! IEU0
10916 2: call %l7 ! CTI Group brk forced
10917@@ -233,7 +233,7 @@ ret_sys_call:
10918
10919 cmp %o0, -ERESTART_RESTARTBLOCK
10920 bgeu,pn %xcc, 1f
10921- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10922+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10923 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10924
10925 2:
10926diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10927index 6fd386c5..6907d81 100644
10928--- a/arch/sparc/kernel/traps_32.c
10929+++ b/arch/sparc/kernel/traps_32.c
10930@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10931 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10932 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10933
10934+extern void gr_handle_kernel_exploit(void);
10935+
10936 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10937 {
10938 static int die_counter;
10939@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10940 count++ < 30 &&
10941 (((unsigned long) rw) >= PAGE_OFFSET) &&
10942 !(((unsigned long) rw) & 0x7)) {
10943- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10944+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10945 (void *) rw->ins[7]);
10946 rw = (struct reg_window32 *)rw->ins[6];
10947 }
10948 }
10949 printk("Instruction DUMP:");
10950 instruction_dump ((unsigned long *) regs->pc);
10951- if(regs->psr & PSR_PS)
10952+ if(regs->psr & PSR_PS) {
10953+ gr_handle_kernel_exploit();
10954 do_exit(SIGKILL);
10955+ }
10956 do_exit(SIGSEGV);
10957 }
10958
10959diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10960index 981a769..d906eda 100644
10961--- a/arch/sparc/kernel/traps_64.c
10962+++ b/arch/sparc/kernel/traps_64.c
10963@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10964 i + 1,
10965 p->trapstack[i].tstate, p->trapstack[i].tpc,
10966 p->trapstack[i].tnpc, p->trapstack[i].tt);
10967- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10968+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10969 }
10970 }
10971
10972@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10973
10974 lvl -= 0x100;
10975 if (regs->tstate & TSTATE_PRIV) {
10976+
10977+#ifdef CONFIG_PAX_REFCOUNT
10978+ if (lvl == 6)
10979+ pax_report_refcount_overflow(regs);
10980+#endif
10981+
10982 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10983 die_if_kernel(buffer, regs);
10984 }
10985@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10986 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10987 {
10988 char buffer[32];
10989-
10990+
10991 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10992 0, lvl, SIGTRAP) == NOTIFY_STOP)
10993 return;
10994
10995+#ifdef CONFIG_PAX_REFCOUNT
10996+ if (lvl == 6)
10997+ pax_report_refcount_overflow(regs);
10998+#endif
10999+
11000 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
11001
11002 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11003@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11004 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11005 printk("%s" "ERROR(%d): ",
11006 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11007- printk("TPC<%pS>\n", (void *) regs->tpc);
11008+ printk("TPC<%pA>\n", (void *) regs->tpc);
11009 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11010 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11011 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11012@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11013 smp_processor_id(),
11014 (type & 0x1) ? 'I' : 'D',
11015 regs->tpc);
11016- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11017+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11018 panic("Irrecoverable Cheetah+ parity error.");
11019 }
11020
11021@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11022 smp_processor_id(),
11023 (type & 0x1) ? 'I' : 'D',
11024 regs->tpc);
11025- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11026+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11027 }
11028
11029 struct sun4v_error_entry {
11030@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11031 /*0x38*/u64 reserved_5;
11032 };
11033
11034-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11035-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11036+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11037+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11038
11039 static const char *sun4v_err_type_to_str(u8 type)
11040 {
11041@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11042 }
11043
11044 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11045- int cpu, const char *pfx, atomic_t *ocnt)
11046+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11047 {
11048 u64 *raw_ptr = (u64 *) ent;
11049 u32 attrs;
11050@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11051
11052 show_regs(regs);
11053
11054- if ((cnt = atomic_read(ocnt)) != 0) {
11055- atomic_set(ocnt, 0);
11056+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11057+ atomic_set_unchecked(ocnt, 0);
11058 wmb();
11059 printk("%s: Queue overflowed %d times.\n",
11060 pfx, cnt);
11061@@ -2048,7 +2059,7 @@ out:
11062 */
11063 void sun4v_resum_overflow(struct pt_regs *regs)
11064 {
11065- atomic_inc(&sun4v_resum_oflow_cnt);
11066+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11067 }
11068
11069 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11070@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11071 /* XXX Actually even this can make not that much sense. Perhaps
11072 * XXX we should just pull the plug and panic directly from here?
11073 */
11074- atomic_inc(&sun4v_nonresum_oflow_cnt);
11075+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11076 }
11077
11078 static void sun4v_tlb_error(struct pt_regs *regs)
11079@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11080
11081 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11082 regs->tpc, tl);
11083- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11084+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11085 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11086- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11087+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11088 (void *) regs->u_regs[UREG_I7]);
11089 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11090 "pte[%lx] error[%lx]\n",
11091@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11092
11093 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11094 regs->tpc, tl);
11095- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11096+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11097 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11098- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11099+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11100 (void *) regs->u_regs[UREG_I7]);
11101 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11102 "pte[%lx] error[%lx]\n",
11103@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11104 fp = (unsigned long)sf->fp + STACK_BIAS;
11105 }
11106
11107- printk(" [%016lx] %pS\n", pc, (void *) pc);
11108+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11109 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11110 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11111 int index = tsk->curr_ret_stack;
11112 if (tsk->ret_stack && index >= graph) {
11113 pc = tsk->ret_stack[index - graph].ret;
11114- printk(" [%016lx] %pS\n", pc, (void *) pc);
11115+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11116 graph++;
11117 }
11118 }
11119@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11120 return (struct reg_window *) (fp + STACK_BIAS);
11121 }
11122
11123+extern void gr_handle_kernel_exploit(void);
11124+
11125 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11126 {
11127 static int die_counter;
11128@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11129 while (rw &&
11130 count++ < 30 &&
11131 kstack_valid(tp, (unsigned long) rw)) {
11132- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11133+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11134 (void *) rw->ins[7]);
11135
11136 rw = kernel_stack_up(rw);
11137@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11138 }
11139 user_instruction_dump ((unsigned int __user *) regs->tpc);
11140 }
11141- if (regs->tstate & TSTATE_PRIV)
11142+ if (regs->tstate & TSTATE_PRIV) {
11143+ gr_handle_kernel_exploit();
11144 do_exit(SIGKILL);
11145+ }
11146 do_exit(SIGSEGV);
11147 }
11148 EXPORT_SYMBOL(die_if_kernel);
11149diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11150index 62098a8..547ab2c 100644
11151--- a/arch/sparc/kernel/unaligned_64.c
11152+++ b/arch/sparc/kernel/unaligned_64.c
11153@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11154 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11155
11156 if (__ratelimit(&ratelimit)) {
11157- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11158+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11159 regs->tpc, (void *) regs->tpc);
11160 }
11161 }
11162diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11163index 3269b02..64f5231 100644
11164--- a/arch/sparc/lib/Makefile
11165+++ b/arch/sparc/lib/Makefile
11166@@ -2,7 +2,7 @@
11167 #
11168
11169 asflags-y := -ansi -DST_DIV0=0x02
11170-ccflags-y := -Werror
11171+#ccflags-y := -Werror
11172
11173 lib-$(CONFIG_SPARC32) += ashrdi3.o
11174 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11175diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11176index 05dac43..76f8ed4 100644
11177--- a/arch/sparc/lib/atomic_64.S
11178+++ b/arch/sparc/lib/atomic_64.S
11179@@ -15,11 +15,22 @@
11180 * a value and does the barriers.
11181 */
11182
11183-#define ATOMIC_OP(op) \
11184-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11185+#ifdef CONFIG_PAX_REFCOUNT
11186+#define __REFCOUNT_OP(op) op##cc
11187+#define __OVERFLOW_IOP tvs %icc, 6;
11188+#define __OVERFLOW_XOP tvs %xcc, 6;
11189+#else
11190+#define __REFCOUNT_OP(op) op
11191+#define __OVERFLOW_IOP
11192+#define __OVERFLOW_XOP
11193+#endif
11194+
11195+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11196+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11197 BACKOFF_SETUP(%o2); \
11198 1: lduw [%o1], %g1; \
11199- op %g1, %o0, %g7; \
11200+ asm_op %g1, %o0, %g7; \
11201+ post_op \
11202 cas [%o1], %g1, %g7; \
11203 cmp %g1, %g7; \
11204 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11205@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11206 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11207 ENDPROC(atomic_##op); \
11208
11209-#define ATOMIC_OP_RETURN(op) \
11210-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11211+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11212+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11213+
11214+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11215+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11216 BACKOFF_SETUP(%o2); \
11217 1: lduw [%o1], %g1; \
11218- op %g1, %o0, %g7; \
11219+ asm_op %g1, %o0, %g7; \
11220+ post_op \
11221 cas [%o1], %g1, %g7; \
11222 cmp %g1, %g7; \
11223 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11224@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11225 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11226 ENDPROC(atomic_##op##_return);
11227
11228+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11229+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11230+
11231 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11232
11233 ATOMIC_OPS(add)
11234@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11235
11236 #undef ATOMIC_OPS
11237 #undef ATOMIC_OP_RETURN
11238+#undef __ATOMIC_OP_RETURN
11239 #undef ATOMIC_OP
11240+#undef __ATOMIC_OP
11241
11242-#define ATOMIC64_OP(op) \
11243-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11244+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11245+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11246 BACKOFF_SETUP(%o2); \
11247 1: ldx [%o1], %g1; \
11248- op %g1, %o0, %g7; \
11249+ asm_op %g1, %o0, %g7; \
11250+ post_op \
11251 casx [%o1], %g1, %g7; \
11252 cmp %g1, %g7; \
11253 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11254@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11255 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11256 ENDPROC(atomic64_##op); \
11257
11258-#define ATOMIC64_OP_RETURN(op) \
11259-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11260+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11261+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11262+
11263+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11264+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11265 BACKOFF_SETUP(%o2); \
11266 1: ldx [%o1], %g1; \
11267- op %g1, %o0, %g7; \
11268+ asm_op %g1, %o0, %g7; \
11269+ post_op \
11270 casx [%o1], %g1, %g7; \
11271 cmp %g1, %g7; \
11272 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11273@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11274 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11275 ENDPROC(atomic64_##op##_return);
11276
11277+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11278+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11279+
11280 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11281
11282 ATOMIC64_OPS(add)
11283@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11284
11285 #undef ATOMIC64_OPS
11286 #undef ATOMIC64_OP_RETURN
11287+#undef __ATOMIC64_OP_RETURN
11288 #undef ATOMIC64_OP
11289+#undef __ATOMIC64_OP
11290+#undef __OVERFLOW_XOP
11291+#undef __OVERFLOW_IOP
11292+#undef __REFCOUNT_OP
11293
11294 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11295 BACKOFF_SETUP(%o2)
11296diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11297index 1d649a9..fbc5bfc 100644
11298--- a/arch/sparc/lib/ksyms.c
11299+++ b/arch/sparc/lib/ksyms.c
11300@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11301 /* Atomic counter implementation. */
11302 #define ATOMIC_OP(op) \
11303 EXPORT_SYMBOL(atomic_##op); \
11304-EXPORT_SYMBOL(atomic64_##op);
11305+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11306+EXPORT_SYMBOL(atomic64_##op); \
11307+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11308
11309 #define ATOMIC_OP_RETURN(op) \
11310 EXPORT_SYMBOL(atomic_##op##_return); \
11311@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11312 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11313
11314 ATOMIC_OPS(add)
11315+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11316+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11317 ATOMIC_OPS(sub)
11318
11319 #undef ATOMIC_OPS
11320diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11321index 30c3ecc..736f015 100644
11322--- a/arch/sparc/mm/Makefile
11323+++ b/arch/sparc/mm/Makefile
11324@@ -2,7 +2,7 @@
11325 #
11326
11327 asflags-y := -ansi
11328-ccflags-y := -Werror
11329+#ccflags-y := -Werror
11330
11331 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11332 obj-y += fault_$(BITS).o
11333diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11334index 70d8171..274c6c0 100644
11335--- a/arch/sparc/mm/fault_32.c
11336+++ b/arch/sparc/mm/fault_32.c
11337@@ -21,6 +21,9 @@
11338 #include <linux/perf_event.h>
11339 #include <linux/interrupt.h>
11340 #include <linux/kdebug.h>
11341+#include <linux/slab.h>
11342+#include <linux/pagemap.h>
11343+#include <linux/compiler.h>
11344
11345 #include <asm/page.h>
11346 #include <asm/pgtable.h>
11347@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11348 return safe_compute_effective_address(regs, insn);
11349 }
11350
11351+#ifdef CONFIG_PAX_PAGEEXEC
11352+#ifdef CONFIG_PAX_DLRESOLVE
11353+static void pax_emuplt_close(struct vm_area_struct *vma)
11354+{
11355+ vma->vm_mm->call_dl_resolve = 0UL;
11356+}
11357+
11358+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11359+{
11360+ unsigned int *kaddr;
11361+
11362+ vmf->page = alloc_page(GFP_HIGHUSER);
11363+ if (!vmf->page)
11364+ return VM_FAULT_OOM;
11365+
11366+ kaddr = kmap(vmf->page);
11367+ memset(kaddr, 0, PAGE_SIZE);
11368+ kaddr[0] = 0x9DE3BFA8U; /* save */
11369+ flush_dcache_page(vmf->page);
11370+ kunmap(vmf->page);
11371+ return VM_FAULT_MAJOR;
11372+}
11373+
11374+static const struct vm_operations_struct pax_vm_ops = {
11375+ .close = pax_emuplt_close,
11376+ .fault = pax_emuplt_fault
11377+};
11378+
11379+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11380+{
11381+ int ret;
11382+
11383+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11384+ vma->vm_mm = current->mm;
11385+ vma->vm_start = addr;
11386+ vma->vm_end = addr + PAGE_SIZE;
11387+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11388+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11389+ vma->vm_ops = &pax_vm_ops;
11390+
11391+ ret = insert_vm_struct(current->mm, vma);
11392+ if (ret)
11393+ return ret;
11394+
11395+ ++current->mm->total_vm;
11396+ return 0;
11397+}
11398+#endif
11399+
11400+/*
11401+ * PaX: decide what to do with offenders (regs->pc = fault address)
11402+ *
11403+ * returns 1 when task should be killed
11404+ * 2 when patched PLT trampoline was detected
11405+ * 3 when unpatched PLT trampoline was detected
11406+ */
11407+static int pax_handle_fetch_fault(struct pt_regs *regs)
11408+{
11409+
11410+#ifdef CONFIG_PAX_EMUPLT
11411+ int err;
11412+
11413+ do { /* PaX: patched PLT emulation #1 */
11414+ unsigned int sethi1, sethi2, jmpl;
11415+
11416+ err = get_user(sethi1, (unsigned int *)regs->pc);
11417+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11418+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11419+
11420+ if (err)
11421+ break;
11422+
11423+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11424+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11425+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11426+ {
11427+ unsigned int addr;
11428+
11429+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11430+ addr = regs->u_regs[UREG_G1];
11431+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11432+ regs->pc = addr;
11433+ regs->npc = addr+4;
11434+ return 2;
11435+ }
11436+ } while (0);
11437+
11438+ do { /* PaX: patched PLT emulation #2 */
11439+ unsigned int ba;
11440+
11441+ err = get_user(ba, (unsigned int *)regs->pc);
11442+
11443+ if (err)
11444+ break;
11445+
11446+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11447+ unsigned int addr;
11448+
11449+ if ((ba & 0xFFC00000U) == 0x30800000U)
11450+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11451+ else
11452+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11453+ regs->pc = addr;
11454+ regs->npc = addr+4;
11455+ return 2;
11456+ }
11457+ } while (0);
11458+
11459+ do { /* PaX: patched PLT emulation #3 */
11460+ unsigned int sethi, bajmpl, nop;
11461+
11462+ err = get_user(sethi, (unsigned int *)regs->pc);
11463+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11464+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11465+
11466+ if (err)
11467+ break;
11468+
11469+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11470+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11471+ nop == 0x01000000U)
11472+ {
11473+ unsigned int addr;
11474+
11475+ addr = (sethi & 0x003FFFFFU) << 10;
11476+ regs->u_regs[UREG_G1] = addr;
11477+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11478+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11479+ else
11480+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11481+ regs->pc = addr;
11482+ regs->npc = addr+4;
11483+ return 2;
11484+ }
11485+ } while (0);
11486+
11487+ do { /* PaX: unpatched PLT emulation step 1 */
11488+ unsigned int sethi, ba, nop;
11489+
11490+ err = get_user(sethi, (unsigned int *)regs->pc);
11491+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11492+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11493+
11494+ if (err)
11495+ break;
11496+
11497+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11498+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11499+ nop == 0x01000000U)
11500+ {
11501+ unsigned int addr, save, call;
11502+
11503+ if ((ba & 0xFFC00000U) == 0x30800000U)
11504+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11505+ else
11506+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11507+
11508+ err = get_user(save, (unsigned int *)addr);
11509+ err |= get_user(call, (unsigned int *)(addr+4));
11510+ err |= get_user(nop, (unsigned int *)(addr+8));
11511+ if (err)
11512+ break;
11513+
11514+#ifdef CONFIG_PAX_DLRESOLVE
11515+ if (save == 0x9DE3BFA8U &&
11516+ (call & 0xC0000000U) == 0x40000000U &&
11517+ nop == 0x01000000U)
11518+ {
11519+ struct vm_area_struct *vma;
11520+ unsigned long call_dl_resolve;
11521+
11522+ down_read(&current->mm->mmap_sem);
11523+ call_dl_resolve = current->mm->call_dl_resolve;
11524+ up_read(&current->mm->mmap_sem);
11525+ if (likely(call_dl_resolve))
11526+ goto emulate;
11527+
11528+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11529+
11530+ down_write(&current->mm->mmap_sem);
11531+ if (current->mm->call_dl_resolve) {
11532+ call_dl_resolve = current->mm->call_dl_resolve;
11533+ up_write(&current->mm->mmap_sem);
11534+ if (vma)
11535+ kmem_cache_free(vm_area_cachep, vma);
11536+ goto emulate;
11537+ }
11538+
11539+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11540+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11541+ up_write(&current->mm->mmap_sem);
11542+ if (vma)
11543+ kmem_cache_free(vm_area_cachep, vma);
11544+ return 1;
11545+ }
11546+
11547+ if (pax_insert_vma(vma, call_dl_resolve)) {
11548+ up_write(&current->mm->mmap_sem);
11549+ kmem_cache_free(vm_area_cachep, vma);
11550+ return 1;
11551+ }
11552+
11553+ current->mm->call_dl_resolve = call_dl_resolve;
11554+ up_write(&current->mm->mmap_sem);
11555+
11556+emulate:
11557+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11558+ regs->pc = call_dl_resolve;
11559+ regs->npc = addr+4;
11560+ return 3;
11561+ }
11562+#endif
11563+
11564+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11565+ if ((save & 0xFFC00000U) == 0x05000000U &&
11566+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11567+ nop == 0x01000000U)
11568+ {
11569+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11570+ regs->u_regs[UREG_G2] = addr + 4;
11571+ addr = (save & 0x003FFFFFU) << 10;
11572+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11573+ regs->pc = addr;
11574+ regs->npc = addr+4;
11575+ return 3;
11576+ }
11577+ }
11578+ } while (0);
11579+
11580+ do { /* PaX: unpatched PLT emulation step 2 */
11581+ unsigned int save, call, nop;
11582+
11583+ err = get_user(save, (unsigned int *)(regs->pc-4));
11584+ err |= get_user(call, (unsigned int *)regs->pc);
11585+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11586+ if (err)
11587+ break;
11588+
11589+ if (save == 0x9DE3BFA8U &&
11590+ (call & 0xC0000000U) == 0x40000000U &&
11591+ nop == 0x01000000U)
11592+ {
11593+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11594+
11595+ regs->u_regs[UREG_RETPC] = regs->pc;
11596+ regs->pc = dl_resolve;
11597+ regs->npc = dl_resolve+4;
11598+ return 3;
11599+ }
11600+ } while (0);
11601+#endif
11602+
11603+ return 1;
11604+}
11605+
11606+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11607+{
11608+ unsigned long i;
11609+
11610+ printk(KERN_ERR "PAX: bytes at PC: ");
11611+ for (i = 0; i < 8; i++) {
11612+ unsigned int c;
11613+ if (get_user(c, (unsigned int *)pc+i))
11614+ printk(KERN_CONT "???????? ");
11615+ else
11616+ printk(KERN_CONT "%08x ", c);
11617+ }
11618+ printk("\n");
11619+}
11620+#endif
11621+
11622 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11623 int text_fault)
11624 {
11625@@ -226,6 +500,24 @@ good_area:
11626 if (!(vma->vm_flags & VM_WRITE))
11627 goto bad_area;
11628 } else {
11629+
11630+#ifdef CONFIG_PAX_PAGEEXEC
11631+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11632+ up_read(&mm->mmap_sem);
11633+ switch (pax_handle_fetch_fault(regs)) {
11634+
11635+#ifdef CONFIG_PAX_EMUPLT
11636+ case 2:
11637+ case 3:
11638+ return;
11639+#endif
11640+
11641+ }
11642+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11643+ do_group_exit(SIGKILL);
11644+ }
11645+#endif
11646+
11647 /* Allow reads even for write-only mappings */
11648 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11649 goto bad_area;
11650diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11651index 4798232..f76e3aa 100644
11652--- a/arch/sparc/mm/fault_64.c
11653+++ b/arch/sparc/mm/fault_64.c
11654@@ -22,6 +22,9 @@
11655 #include <linux/kdebug.h>
11656 #include <linux/percpu.h>
11657 #include <linux/context_tracking.h>
11658+#include <linux/slab.h>
11659+#include <linux/pagemap.h>
11660+#include <linux/compiler.h>
11661
11662 #include <asm/page.h>
11663 #include <asm/pgtable.h>
11664@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11665 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11666 regs->tpc);
11667 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11668- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11669+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11670 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11671 dump_stack();
11672 unhandled_fault(regs->tpc, current, regs);
11673@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11674 show_regs(regs);
11675 }
11676
11677+#ifdef CONFIG_PAX_PAGEEXEC
11678+#ifdef CONFIG_PAX_DLRESOLVE
11679+static void pax_emuplt_close(struct vm_area_struct *vma)
11680+{
11681+ vma->vm_mm->call_dl_resolve = 0UL;
11682+}
11683+
11684+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11685+{
11686+ unsigned int *kaddr;
11687+
11688+ vmf->page = alloc_page(GFP_HIGHUSER);
11689+ if (!vmf->page)
11690+ return VM_FAULT_OOM;
11691+
11692+ kaddr = kmap(vmf->page);
11693+ memset(kaddr, 0, PAGE_SIZE);
11694+ kaddr[0] = 0x9DE3BFA8U; /* save */
11695+ flush_dcache_page(vmf->page);
11696+ kunmap(vmf->page);
11697+ return VM_FAULT_MAJOR;
11698+}
11699+
11700+static const struct vm_operations_struct pax_vm_ops = {
11701+ .close = pax_emuplt_close,
11702+ .fault = pax_emuplt_fault
11703+};
11704+
11705+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11706+{
11707+ int ret;
11708+
11709+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11710+ vma->vm_mm = current->mm;
11711+ vma->vm_start = addr;
11712+ vma->vm_end = addr + PAGE_SIZE;
11713+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11714+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11715+ vma->vm_ops = &pax_vm_ops;
11716+
11717+ ret = insert_vm_struct(current->mm, vma);
11718+ if (ret)
11719+ return ret;
11720+
11721+ ++current->mm->total_vm;
11722+ return 0;
11723+}
11724+#endif
11725+
11726+/*
11727+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11728+ *
11729+ * returns 1 when task should be killed
11730+ * 2 when patched PLT trampoline was detected
11731+ * 3 when unpatched PLT trampoline was detected
11732+ */
11733+static int pax_handle_fetch_fault(struct pt_regs *regs)
11734+{
11735+
11736+#ifdef CONFIG_PAX_EMUPLT
11737+ int err;
11738+
11739+ do { /* PaX: patched PLT emulation #1 */
11740+ unsigned int sethi1, sethi2, jmpl;
11741+
11742+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11743+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11744+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11745+
11746+ if (err)
11747+ break;
11748+
11749+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11750+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11751+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11752+ {
11753+ unsigned long addr;
11754+
11755+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11756+ addr = regs->u_regs[UREG_G1];
11757+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11758+
11759+ if (test_thread_flag(TIF_32BIT))
11760+ addr &= 0xFFFFFFFFUL;
11761+
11762+ regs->tpc = addr;
11763+ regs->tnpc = addr+4;
11764+ return 2;
11765+ }
11766+ } while (0);
11767+
11768+ do { /* PaX: patched PLT emulation #2 */
11769+ unsigned int ba;
11770+
11771+ err = get_user(ba, (unsigned int *)regs->tpc);
11772+
11773+ if (err)
11774+ break;
11775+
11776+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11777+ unsigned long addr;
11778+
11779+ if ((ba & 0xFFC00000U) == 0x30800000U)
11780+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11781+ else
11782+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11783+
11784+ if (test_thread_flag(TIF_32BIT))
11785+ addr &= 0xFFFFFFFFUL;
11786+
11787+ regs->tpc = addr;
11788+ regs->tnpc = addr+4;
11789+ return 2;
11790+ }
11791+ } while (0);
11792+
11793+ do { /* PaX: patched PLT emulation #3 */
11794+ unsigned int sethi, bajmpl, nop;
11795+
11796+ err = get_user(sethi, (unsigned int *)regs->tpc);
11797+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11798+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11799+
11800+ if (err)
11801+ break;
11802+
11803+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11804+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11805+ nop == 0x01000000U)
11806+ {
11807+ unsigned long addr;
11808+
11809+ addr = (sethi & 0x003FFFFFU) << 10;
11810+ regs->u_regs[UREG_G1] = addr;
11811+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11812+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11813+ else
11814+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11815+
11816+ if (test_thread_flag(TIF_32BIT))
11817+ addr &= 0xFFFFFFFFUL;
11818+
11819+ regs->tpc = addr;
11820+ regs->tnpc = addr+4;
11821+ return 2;
11822+ }
11823+ } while (0);
11824+
11825+ do { /* PaX: patched PLT emulation #4 */
11826+ unsigned int sethi, mov1, call, mov2;
11827+
11828+ err = get_user(sethi, (unsigned int *)regs->tpc);
11829+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11830+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11831+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11832+
11833+ if (err)
11834+ break;
11835+
11836+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11837+ mov1 == 0x8210000FU &&
11838+ (call & 0xC0000000U) == 0x40000000U &&
11839+ mov2 == 0x9E100001U)
11840+ {
11841+ unsigned long addr;
11842+
11843+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11844+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11845+
11846+ if (test_thread_flag(TIF_32BIT))
11847+ addr &= 0xFFFFFFFFUL;
11848+
11849+ regs->tpc = addr;
11850+ regs->tnpc = addr+4;
11851+ return 2;
11852+ }
11853+ } while (0);
11854+
11855+ do { /* PaX: patched PLT emulation #5 */
11856+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11857+
11858+ err = get_user(sethi, (unsigned int *)regs->tpc);
11859+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11860+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11861+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11862+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11863+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11864+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11865+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11866+
11867+ if (err)
11868+ break;
11869+
11870+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11871+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11872+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11873+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11874+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11875+ sllx == 0x83287020U &&
11876+ jmpl == 0x81C04005U &&
11877+ nop == 0x01000000U)
11878+ {
11879+ unsigned long addr;
11880+
11881+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11882+ regs->u_regs[UREG_G1] <<= 32;
11883+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11884+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11885+ regs->tpc = addr;
11886+ regs->tnpc = addr+4;
11887+ return 2;
11888+ }
11889+ } while (0);
11890+
11891+ do { /* PaX: patched PLT emulation #6 */
11892+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11893+
11894+ err = get_user(sethi, (unsigned int *)regs->tpc);
11895+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11896+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11897+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11898+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11899+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11900+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11901+
11902+ if (err)
11903+ break;
11904+
11905+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11906+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11907+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11908+ sllx == 0x83287020U &&
11909+ (or & 0xFFFFE000U) == 0x8A116000U &&
11910+ jmpl == 0x81C04005U &&
11911+ nop == 0x01000000U)
11912+ {
11913+ unsigned long addr;
11914+
11915+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11916+ regs->u_regs[UREG_G1] <<= 32;
11917+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11918+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11919+ regs->tpc = addr;
11920+ regs->tnpc = addr+4;
11921+ return 2;
11922+ }
11923+ } while (0);
11924+
11925+ do { /* PaX: unpatched PLT emulation step 1 */
11926+ unsigned int sethi, ba, nop;
11927+
11928+ err = get_user(sethi, (unsigned int *)regs->tpc);
11929+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11930+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11931+
11932+ if (err)
11933+ break;
11934+
11935+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11936+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11937+ nop == 0x01000000U)
11938+ {
11939+ unsigned long addr;
11940+ unsigned int save, call;
11941+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11942+
11943+ if ((ba & 0xFFC00000U) == 0x30800000U)
11944+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11945+ else
11946+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11947+
11948+ if (test_thread_flag(TIF_32BIT))
11949+ addr &= 0xFFFFFFFFUL;
11950+
11951+ err = get_user(save, (unsigned int *)addr);
11952+ err |= get_user(call, (unsigned int *)(addr+4));
11953+ err |= get_user(nop, (unsigned int *)(addr+8));
11954+ if (err)
11955+ break;
11956+
11957+#ifdef CONFIG_PAX_DLRESOLVE
11958+ if (save == 0x9DE3BFA8U &&
11959+ (call & 0xC0000000U) == 0x40000000U &&
11960+ nop == 0x01000000U)
11961+ {
11962+ struct vm_area_struct *vma;
11963+ unsigned long call_dl_resolve;
11964+
11965+ down_read(&current->mm->mmap_sem);
11966+ call_dl_resolve = current->mm->call_dl_resolve;
11967+ up_read(&current->mm->mmap_sem);
11968+ if (likely(call_dl_resolve))
11969+ goto emulate;
11970+
11971+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11972+
11973+ down_write(&current->mm->mmap_sem);
11974+ if (current->mm->call_dl_resolve) {
11975+ call_dl_resolve = current->mm->call_dl_resolve;
11976+ up_write(&current->mm->mmap_sem);
11977+ if (vma)
11978+ kmem_cache_free(vm_area_cachep, vma);
11979+ goto emulate;
11980+ }
11981+
11982+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11983+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11984+ up_write(&current->mm->mmap_sem);
11985+ if (vma)
11986+ kmem_cache_free(vm_area_cachep, vma);
11987+ return 1;
11988+ }
11989+
11990+ if (pax_insert_vma(vma, call_dl_resolve)) {
11991+ up_write(&current->mm->mmap_sem);
11992+ kmem_cache_free(vm_area_cachep, vma);
11993+ return 1;
11994+ }
11995+
11996+ current->mm->call_dl_resolve = call_dl_resolve;
11997+ up_write(&current->mm->mmap_sem);
11998+
11999+emulate:
12000+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12001+ regs->tpc = call_dl_resolve;
12002+ regs->tnpc = addr+4;
12003+ return 3;
12004+ }
12005+#endif
12006+
12007+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12008+ if ((save & 0xFFC00000U) == 0x05000000U &&
12009+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12010+ nop == 0x01000000U)
12011+ {
12012+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12013+ regs->u_regs[UREG_G2] = addr + 4;
12014+ addr = (save & 0x003FFFFFU) << 10;
12015+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12016+
12017+ if (test_thread_flag(TIF_32BIT))
12018+ addr &= 0xFFFFFFFFUL;
12019+
12020+ regs->tpc = addr;
12021+ regs->tnpc = addr+4;
12022+ return 3;
12023+ }
12024+
12025+ /* PaX: 64-bit PLT stub */
12026+ err = get_user(sethi1, (unsigned int *)addr);
12027+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12028+ err |= get_user(or1, (unsigned int *)(addr+8));
12029+ err |= get_user(or2, (unsigned int *)(addr+12));
12030+ err |= get_user(sllx, (unsigned int *)(addr+16));
12031+ err |= get_user(add, (unsigned int *)(addr+20));
12032+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12033+ err |= get_user(nop, (unsigned int *)(addr+28));
12034+ if (err)
12035+ break;
12036+
12037+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12038+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12039+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12040+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12041+ sllx == 0x89293020U &&
12042+ add == 0x8A010005U &&
12043+ jmpl == 0x89C14000U &&
12044+ nop == 0x01000000U)
12045+ {
12046+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12047+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12048+ regs->u_regs[UREG_G4] <<= 32;
12049+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12050+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12051+ regs->u_regs[UREG_G4] = addr + 24;
12052+ addr = regs->u_regs[UREG_G5];
12053+ regs->tpc = addr;
12054+ regs->tnpc = addr+4;
12055+ return 3;
12056+ }
12057+ }
12058+ } while (0);
12059+
12060+#ifdef CONFIG_PAX_DLRESOLVE
12061+ do { /* PaX: unpatched PLT emulation step 2 */
12062+ unsigned int save, call, nop;
12063+
12064+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12065+ err |= get_user(call, (unsigned int *)regs->tpc);
12066+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12067+ if (err)
12068+ break;
12069+
12070+ if (save == 0x9DE3BFA8U &&
12071+ (call & 0xC0000000U) == 0x40000000U &&
12072+ nop == 0x01000000U)
12073+ {
12074+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12075+
12076+ if (test_thread_flag(TIF_32BIT))
12077+ dl_resolve &= 0xFFFFFFFFUL;
12078+
12079+ regs->u_regs[UREG_RETPC] = regs->tpc;
12080+ regs->tpc = dl_resolve;
12081+ regs->tnpc = dl_resolve+4;
12082+ return 3;
12083+ }
12084+ } while (0);
12085+#endif
12086+
12087+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12088+ unsigned int sethi, ba, nop;
12089+
12090+ err = get_user(sethi, (unsigned int *)regs->tpc);
12091+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12092+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12093+
12094+ if (err)
12095+ break;
12096+
12097+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12098+ (ba & 0xFFF00000U) == 0x30600000U &&
12099+ nop == 0x01000000U)
12100+ {
12101+ unsigned long addr;
12102+
12103+ addr = (sethi & 0x003FFFFFU) << 10;
12104+ regs->u_regs[UREG_G1] = addr;
12105+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12106+
12107+ if (test_thread_flag(TIF_32BIT))
12108+ addr &= 0xFFFFFFFFUL;
12109+
12110+ regs->tpc = addr;
12111+ regs->tnpc = addr+4;
12112+ return 2;
12113+ }
12114+ } while (0);
12115+
12116+#endif
12117+
12118+ return 1;
12119+}
12120+
12121+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12122+{
12123+ unsigned long i;
12124+
12125+ printk(KERN_ERR "PAX: bytes at PC: ");
12126+ for (i = 0; i < 8; i++) {
12127+ unsigned int c;
12128+ if (get_user(c, (unsigned int *)pc+i))
12129+ printk(KERN_CONT "???????? ");
12130+ else
12131+ printk(KERN_CONT "%08x ", c);
12132+ }
12133+ printk("\n");
12134+}
12135+#endif
12136+
12137 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12138 {
12139 enum ctx_state prev_state = exception_enter();
12140@@ -353,6 +816,29 @@ retry:
12141 if (!vma)
12142 goto bad_area;
12143
12144+#ifdef CONFIG_PAX_PAGEEXEC
12145+ /* PaX: detect ITLB misses on non-exec pages */
12146+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12147+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12148+ {
12149+ if (address != regs->tpc)
12150+ goto good_area;
12151+
12152+ up_read(&mm->mmap_sem);
12153+ switch (pax_handle_fetch_fault(regs)) {
12154+
12155+#ifdef CONFIG_PAX_EMUPLT
12156+ case 2:
12157+ case 3:
12158+ return;
12159+#endif
12160+
12161+ }
12162+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12163+ do_group_exit(SIGKILL);
12164+ }
12165+#endif
12166+
12167 /* Pure DTLB misses do not tell us whether the fault causing
12168 * load/store/atomic was a write or not, it only says that there
12169 * was no match. So in such a case we (carefully) read the
12170diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12171index d329537..2c3746a 100644
12172--- a/arch/sparc/mm/hugetlbpage.c
12173+++ b/arch/sparc/mm/hugetlbpage.c
12174@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12175 unsigned long addr,
12176 unsigned long len,
12177 unsigned long pgoff,
12178- unsigned long flags)
12179+ unsigned long flags,
12180+ unsigned long offset)
12181 {
12182+ struct mm_struct *mm = current->mm;
12183 unsigned long task_size = TASK_SIZE;
12184 struct vm_unmapped_area_info info;
12185
12186@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12187
12188 info.flags = 0;
12189 info.length = len;
12190- info.low_limit = TASK_UNMAPPED_BASE;
12191+ info.low_limit = mm->mmap_base;
12192 info.high_limit = min(task_size, VA_EXCLUDE_START);
12193 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12194 info.align_offset = 0;
12195+ info.threadstack_offset = offset;
12196 addr = vm_unmapped_area(&info);
12197
12198 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12199 VM_BUG_ON(addr != -ENOMEM);
12200 info.low_limit = VA_EXCLUDE_END;
12201+
12202+#ifdef CONFIG_PAX_RANDMMAP
12203+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12204+ info.low_limit += mm->delta_mmap;
12205+#endif
12206+
12207 info.high_limit = task_size;
12208 addr = vm_unmapped_area(&info);
12209 }
12210@@ -55,7 +64,8 @@ static unsigned long
12211 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12212 const unsigned long len,
12213 const unsigned long pgoff,
12214- const unsigned long flags)
12215+ const unsigned long flags,
12216+ const unsigned long offset)
12217 {
12218 struct mm_struct *mm = current->mm;
12219 unsigned long addr = addr0;
12220@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12221 info.high_limit = mm->mmap_base;
12222 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12223 info.align_offset = 0;
12224+ info.threadstack_offset = offset;
12225 addr = vm_unmapped_area(&info);
12226
12227 /*
12228@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12229 VM_BUG_ON(addr != -ENOMEM);
12230 info.flags = 0;
12231 info.low_limit = TASK_UNMAPPED_BASE;
12232+
12233+#ifdef CONFIG_PAX_RANDMMAP
12234+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12235+ info.low_limit += mm->delta_mmap;
12236+#endif
12237+
12238 info.high_limit = STACK_TOP32;
12239 addr = vm_unmapped_area(&info);
12240 }
12241@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12242 struct mm_struct *mm = current->mm;
12243 struct vm_area_struct *vma;
12244 unsigned long task_size = TASK_SIZE;
12245+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12246
12247 if (test_thread_flag(TIF_32BIT))
12248 task_size = STACK_TOP32;
12249@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12250 return addr;
12251 }
12252
12253+#ifdef CONFIG_PAX_RANDMMAP
12254+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12255+#endif
12256+
12257 if (addr) {
12258 addr = ALIGN(addr, HPAGE_SIZE);
12259 vma = find_vma(mm, addr);
12260- if (task_size - len >= addr &&
12261- (!vma || addr + len <= vma->vm_start))
12262+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12263 return addr;
12264 }
12265 if (mm->get_unmapped_area == arch_get_unmapped_area)
12266 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12267- pgoff, flags);
12268+ pgoff, flags, offset);
12269 else
12270 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12271- pgoff, flags);
12272+ pgoff, flags, offset);
12273 }
12274
12275 pte_t *huge_pte_alloc(struct mm_struct *mm,
12276diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12277index 3ea267c..93f0659 100644
12278--- a/arch/sparc/mm/init_64.c
12279+++ b/arch/sparc/mm/init_64.c
12280@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12281 int num_kernel_image_mappings;
12282
12283 #ifdef CONFIG_DEBUG_DCFLUSH
12284-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12285+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12286 #ifdef CONFIG_SMP
12287-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12288+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12289 #endif
12290 #endif
12291
12292@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12293 {
12294 BUG_ON(tlb_type == hypervisor);
12295 #ifdef CONFIG_DEBUG_DCFLUSH
12296- atomic_inc(&dcpage_flushes);
12297+ atomic_inc_unchecked(&dcpage_flushes);
12298 #endif
12299
12300 #ifdef DCACHE_ALIASING_POSSIBLE
12301@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12302
12303 #ifdef CONFIG_DEBUG_DCFLUSH
12304 seq_printf(m, "DCPageFlushes\t: %d\n",
12305- atomic_read(&dcpage_flushes));
12306+ atomic_read_unchecked(&dcpage_flushes));
12307 #ifdef CONFIG_SMP
12308 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12309- atomic_read(&dcpage_flushes_xcall));
12310+ atomic_read_unchecked(&dcpage_flushes_xcall));
12311 #endif /* CONFIG_SMP */
12312 #endif /* CONFIG_DEBUG_DCFLUSH */
12313 }
12314diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12315index 7cca418..53fc030 100644
12316--- a/arch/tile/Kconfig
12317+++ b/arch/tile/Kconfig
12318@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12319
12320 config KEXEC
12321 bool "kexec system call"
12322+ depends on !GRKERNSEC_KMEM
12323 ---help---
12324 kexec is a system call that implements the ability to shutdown your
12325 current kernel, and to start another kernel. It is like a reboot
12326diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12327index 7b11c5f..755a026 100644
12328--- a/arch/tile/include/asm/atomic_64.h
12329+++ b/arch/tile/include/asm/atomic_64.h
12330@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12331
12332 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12333
12334+#define atomic64_read_unchecked(v) atomic64_read(v)
12335+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12336+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12337+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12338+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12339+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12340+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12341+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12342+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12343+
12344 /* Define this to indicate that cmpxchg is an efficient operation. */
12345 #define __HAVE_ARCH_CMPXCHG
12346
12347diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12348index 6160761..00cac88 100644
12349--- a/arch/tile/include/asm/cache.h
12350+++ b/arch/tile/include/asm/cache.h
12351@@ -15,11 +15,12 @@
12352 #ifndef _ASM_TILE_CACHE_H
12353 #define _ASM_TILE_CACHE_H
12354
12355+#include <linux/const.h>
12356 #include <arch/chip.h>
12357
12358 /* bytes per L1 data cache line */
12359 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12360-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12361+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12362
12363 /* bytes per L2 cache line */
12364 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12365diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12366index b6cde32..c0cb736 100644
12367--- a/arch/tile/include/asm/uaccess.h
12368+++ b/arch/tile/include/asm/uaccess.h
12369@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12370 const void __user *from,
12371 unsigned long n)
12372 {
12373- int sz = __compiletime_object_size(to);
12374+ size_t sz = __compiletime_object_size(to);
12375
12376- if (likely(sz == -1 || sz >= n))
12377+ if (likely(sz == (size_t)-1 || sz >= n))
12378 n = _copy_from_user(to, from, n);
12379 else
12380 copy_from_user_overflow();
12381diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12382index 3270e00..a77236e 100644
12383--- a/arch/tile/mm/hugetlbpage.c
12384+++ b/arch/tile/mm/hugetlbpage.c
12385@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12386 info.high_limit = TASK_SIZE;
12387 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12388 info.align_offset = 0;
12389+ info.threadstack_offset = 0;
12390 return vm_unmapped_area(&info);
12391 }
12392
12393@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12394 info.high_limit = current->mm->mmap_base;
12395 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12396 info.align_offset = 0;
12397+ info.threadstack_offset = 0;
12398 addr = vm_unmapped_area(&info);
12399
12400 /*
12401diff --git a/arch/um/Makefile b/arch/um/Makefile
12402index e4b1a96..16162f8 100644
12403--- a/arch/um/Makefile
12404+++ b/arch/um/Makefile
12405@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12406 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12407 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12408
12409+ifdef CONSTIFY_PLUGIN
12410+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12411+endif
12412+
12413 #This will adjust *FLAGS accordingly to the platform.
12414 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12415
12416diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12417index 19e1bdd..3665b77 100644
12418--- a/arch/um/include/asm/cache.h
12419+++ b/arch/um/include/asm/cache.h
12420@@ -1,6 +1,7 @@
12421 #ifndef __UM_CACHE_H
12422 #define __UM_CACHE_H
12423
12424+#include <linux/const.h>
12425
12426 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12427 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12428@@ -12,6 +13,6 @@
12429 # define L1_CACHE_SHIFT 5
12430 #endif
12431
12432-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12433+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12434
12435 #endif
12436diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12437index 2e0a6b1..a64d0f5 100644
12438--- a/arch/um/include/asm/kmap_types.h
12439+++ b/arch/um/include/asm/kmap_types.h
12440@@ -8,6 +8,6 @@
12441
12442 /* No more #include "asm/arch/kmap_types.h" ! */
12443
12444-#define KM_TYPE_NR 14
12445+#define KM_TYPE_NR 15
12446
12447 #endif
12448diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12449index 71c5d13..4c7b9f1 100644
12450--- a/arch/um/include/asm/page.h
12451+++ b/arch/um/include/asm/page.h
12452@@ -14,6 +14,9 @@
12453 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12454 #define PAGE_MASK (~(PAGE_SIZE-1))
12455
12456+#define ktla_ktva(addr) (addr)
12457+#define ktva_ktla(addr) (addr)
12458+
12459 #ifndef __ASSEMBLY__
12460
12461 struct page;
12462diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12463index 0032f92..cd151e0 100644
12464--- a/arch/um/include/asm/pgtable-3level.h
12465+++ b/arch/um/include/asm/pgtable-3level.h
12466@@ -58,6 +58,7 @@
12467 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12468 #define pud_populate(mm, pud, pmd) \
12469 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12470+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12471
12472 #ifdef CONFIG_64BIT
12473 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12474diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12475index f17bca8..48adb87 100644
12476--- a/arch/um/kernel/process.c
12477+++ b/arch/um/kernel/process.c
12478@@ -356,22 +356,6 @@ int singlestepping(void * t)
12479 return 2;
12480 }
12481
12482-/*
12483- * Only x86 and x86_64 have an arch_align_stack().
12484- * All other arches have "#define arch_align_stack(x) (x)"
12485- * in their asm/exec.h
12486- * As this is included in UML from asm-um/system-generic.h,
12487- * we can use it to behave as the subarch does.
12488- */
12489-#ifndef arch_align_stack
12490-unsigned long arch_align_stack(unsigned long sp)
12491-{
12492- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12493- sp -= get_random_int() % 8192;
12494- return sp & ~0xf;
12495-}
12496-#endif
12497-
12498 unsigned long get_wchan(struct task_struct *p)
12499 {
12500 unsigned long stack_page, sp, ip;
12501diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12502index ad8f795..2c7eec6 100644
12503--- a/arch/unicore32/include/asm/cache.h
12504+++ b/arch/unicore32/include/asm/cache.h
12505@@ -12,8 +12,10 @@
12506 #ifndef __UNICORE_CACHE_H__
12507 #define __UNICORE_CACHE_H__
12508
12509-#define L1_CACHE_SHIFT (5)
12510-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12511+#include <linux/const.h>
12512+
12513+#define L1_CACHE_SHIFT 5
12514+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12515
12516 /*
12517 * Memory returned by kmalloc() may be used for DMA, so we must make
12518diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12519index 0dc9d01..98df103 100644
12520--- a/arch/x86/Kconfig
12521+++ b/arch/x86/Kconfig
12522@@ -130,7 +130,7 @@ config X86
12523 select RTC_LIB
12524 select HAVE_DEBUG_STACKOVERFLOW
12525 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12526- select HAVE_CC_STACKPROTECTOR
12527+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12528 select GENERIC_CPU_AUTOPROBE
12529 select HAVE_ARCH_AUDITSYSCALL
12530 select ARCH_SUPPORTS_ATOMIC_RMW
12531@@ -263,7 +263,7 @@ config X86_HT
12532
12533 config X86_32_LAZY_GS
12534 def_bool y
12535- depends on X86_32 && !CC_STACKPROTECTOR
12536+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12537
12538 config ARCH_HWEIGHT_CFLAGS
12539 string
12540@@ -601,6 +601,7 @@ config SCHED_OMIT_FRAME_POINTER
12541
12542 menuconfig HYPERVISOR_GUEST
12543 bool "Linux guest support"
12544+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12545 ---help---
12546 Say Y here to enable options for running Linux under various hyper-
12547 visors. This option enables basic hypervisor detection and platform
12548@@ -978,6 +979,7 @@ config VM86
12549
12550 config X86_16BIT
12551 bool "Enable support for 16-bit segments" if EXPERT
12552+ depends on !GRKERNSEC
12553 default y
12554 ---help---
12555 This option is required by programs like Wine to run 16-bit
12556@@ -1151,6 +1153,7 @@ choice
12557
12558 config NOHIGHMEM
12559 bool "off"
12560+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12561 ---help---
12562 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12563 However, the address space of 32-bit x86 processors is only 4
12564@@ -1187,6 +1190,7 @@ config NOHIGHMEM
12565
12566 config HIGHMEM4G
12567 bool "4GB"
12568+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12569 ---help---
12570 Select this if you have a 32-bit processor and between 1 and 4
12571 gigabytes of physical RAM.
12572@@ -1239,7 +1243,7 @@ config PAGE_OFFSET
12573 hex
12574 default 0xB0000000 if VMSPLIT_3G_OPT
12575 default 0x80000000 if VMSPLIT_2G
12576- default 0x78000000 if VMSPLIT_2G_OPT
12577+ default 0x70000000 if VMSPLIT_2G_OPT
12578 default 0x40000000 if VMSPLIT_1G
12579 default 0xC0000000
12580 depends on X86_32
12581@@ -1680,6 +1684,7 @@ source kernel/Kconfig.hz
12582
12583 config KEXEC
12584 bool "kexec system call"
12585+ depends on !GRKERNSEC_KMEM
12586 ---help---
12587 kexec is a system call that implements the ability to shutdown your
12588 current kernel, and to start another kernel. It is like a reboot
12589@@ -1865,7 +1870,9 @@ config X86_NEED_RELOCS
12590
12591 config PHYSICAL_ALIGN
12592 hex "Alignment value to which kernel should be aligned"
12593- default "0x200000"
12594+ default "0x1000000"
12595+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12596+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12597 range 0x2000 0x1000000 if X86_32
12598 range 0x200000 0x1000000 if X86_64
12599 ---help---
12600@@ -1948,6 +1955,7 @@ config COMPAT_VDSO
12601 def_bool n
12602 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12603 depends on X86_32 || IA32_EMULATION
12604+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12605 ---help---
12606 Certain buggy versions of glibc will crash if they are
12607 presented with a 32-bit vDSO that is not mapped at the address
12608diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12609index 6983314..54ad7e8 100644
12610--- a/arch/x86/Kconfig.cpu
12611+++ b/arch/x86/Kconfig.cpu
12612@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12613
12614 config X86_F00F_BUG
12615 def_bool y
12616- depends on M586MMX || M586TSC || M586 || M486
12617+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12618
12619 config X86_INVD_BUG
12620 def_bool y
12621@@ -327,7 +327,7 @@ config X86_INVD_BUG
12622
12623 config X86_ALIGNMENT_16
12624 def_bool y
12625- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12626+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12627
12628 config X86_INTEL_USERCOPY
12629 def_bool y
12630@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12631 # generates cmov.
12632 config X86_CMOV
12633 def_bool y
12634- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12635+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12636
12637 config X86_MINIMUM_CPU_FAMILY
12638 int
12639diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12640index 61bd2ad..50b625d 100644
12641--- a/arch/x86/Kconfig.debug
12642+++ b/arch/x86/Kconfig.debug
12643@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12644 config DEBUG_RODATA
12645 bool "Write protect kernel read-only data structures"
12646 default y
12647- depends on DEBUG_KERNEL
12648+ depends on DEBUG_KERNEL && BROKEN
12649 ---help---
12650 Mark the kernel read-only data as write-protected in the pagetables,
12651 in order to catch accidental (and incorrect) writes to such const
12652@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12653
12654 config DEBUG_SET_MODULE_RONX
12655 bool "Set loadable kernel module data as NX and text as RO"
12656- depends on MODULES
12657+ depends on MODULES && BROKEN
12658 ---help---
12659 This option helps catch unintended modifications to loadable
12660 kernel module's text and read-only data. It also prevents execution
12661diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12662index 920e616..ac3d4df 100644
12663--- a/arch/x86/Makefile
12664+++ b/arch/x86/Makefile
12665@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12666 # CPU-specific tuning. Anything which can be shared with UML should go here.
12667 include $(srctree)/arch/x86/Makefile_32.cpu
12668 KBUILD_CFLAGS += $(cflags-y)
12669-
12670- # temporary until string.h is fixed
12671- KBUILD_CFLAGS += -ffreestanding
12672 else
12673 BITS := 64
12674 UTS_MACHINE := x86_64
12675@@ -107,6 +104,9 @@ else
12676 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12677 endif
12678
12679+# temporary until string.h is fixed
12680+KBUILD_CFLAGS += -ffreestanding
12681+
12682 # Make sure compiler does not have buggy stack-protector support.
12683 ifdef CONFIG_CC_STACKPROTECTOR
12684 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12685@@ -180,6 +180,7 @@ archheaders:
12686 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12687
12688 archprepare:
12689+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12690 ifeq ($(CONFIG_KEXEC_FILE),y)
12691 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12692 endif
12693@@ -263,3 +264,9 @@ define archhelp
12694 echo ' FDARGS="..." arguments for the booted kernel'
12695 echo ' FDINITRD=file initrd for the booted kernel'
12696 endef
12697+
12698+define OLD_LD
12699+
12700+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12701+*** Please upgrade your binutils to 2.18 or newer
12702+endef
12703diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12704index 3db07f3..9d81d0f 100644
12705--- a/arch/x86/boot/Makefile
12706+++ b/arch/x86/boot/Makefile
12707@@ -56,6 +56,9 @@ clean-files += cpustr.h
12708 # ---------------------------------------------------------------------------
12709
12710 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12711+ifdef CONSTIFY_PLUGIN
12712+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12713+endif
12714 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12715 GCOV_PROFILE := n
12716
12717diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12718index 878e4b9..20537ab 100644
12719--- a/arch/x86/boot/bitops.h
12720+++ b/arch/x86/boot/bitops.h
12721@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12722 u8 v;
12723 const u32 *p = (const u32 *)addr;
12724
12725- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12726+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12727 return v;
12728 }
12729
12730@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12731
12732 static inline void set_bit(int nr, void *addr)
12733 {
12734- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12735+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12736 }
12737
12738 #endif /* BOOT_BITOPS_H */
12739diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12740index bd49ec6..94c7f58 100644
12741--- a/arch/x86/boot/boot.h
12742+++ b/arch/x86/boot/boot.h
12743@@ -84,7 +84,7 @@ static inline void io_delay(void)
12744 static inline u16 ds(void)
12745 {
12746 u16 seg;
12747- asm("movw %%ds,%0" : "=rm" (seg));
12748+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12749 return seg;
12750 }
12751
12752diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12753index 8bd44e8..6b111e9 100644
12754--- a/arch/x86/boot/compressed/Makefile
12755+++ b/arch/x86/boot/compressed/Makefile
12756@@ -28,6 +28,9 @@ KBUILD_CFLAGS += $(cflags-y)
12757 KBUILD_CFLAGS += -mno-mmx -mno-sse
12758 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12759 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12760+ifdef CONSTIFY_PLUGIN
12761+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12762+endif
12763
12764 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12765 GCOV_PROFILE := n
12766diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12767index a53440e..c3dbf1e 100644
12768--- a/arch/x86/boot/compressed/efi_stub_32.S
12769+++ b/arch/x86/boot/compressed/efi_stub_32.S
12770@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12771 * parameter 2, ..., param n. To make things easy, we save the return
12772 * address of efi_call_phys in a global variable.
12773 */
12774- popl %ecx
12775- movl %ecx, saved_return_addr(%edx)
12776- /* get the function pointer into ECX*/
12777- popl %ecx
12778- movl %ecx, efi_rt_function_ptr(%edx)
12779+ popl saved_return_addr(%edx)
12780+ popl efi_rt_function_ptr(%edx)
12781
12782 /*
12783 * 3. Call the physical function.
12784 */
12785- call *%ecx
12786+ call *efi_rt_function_ptr(%edx)
12787
12788 /*
12789 * 4. Balance the stack. And because EAX contain the return value,
12790@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12791 1: popl %edx
12792 subl $1b, %edx
12793
12794- movl efi_rt_function_ptr(%edx), %ecx
12795- pushl %ecx
12796+ pushl efi_rt_function_ptr(%edx)
12797
12798 /*
12799 * 10. Push the saved return address onto the stack and return.
12800 */
12801- movl saved_return_addr(%edx), %ecx
12802- pushl %ecx
12803- ret
12804+ jmpl *saved_return_addr(%edx)
12805 ENDPROC(efi_call_phys)
12806 .previous
12807
12808diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12809index 630384a..278e788 100644
12810--- a/arch/x86/boot/compressed/efi_thunk_64.S
12811+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12812@@ -189,8 +189,8 @@ efi_gdt64:
12813 .long 0 /* Filled out by user */
12814 .word 0
12815 .quad 0x0000000000000000 /* NULL descriptor */
12816- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12817- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12818+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12819+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12820 .quad 0x0080890000000000 /* TS descriptor */
12821 .quad 0x0000000000000000 /* TS continued */
12822 efi_gdt64_end:
12823diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12824index 1d7fbbc..36ecd58 100644
12825--- a/arch/x86/boot/compressed/head_32.S
12826+++ b/arch/x86/boot/compressed/head_32.S
12827@@ -140,10 +140,10 @@ preferred_addr:
12828 addl %eax, %ebx
12829 notl %eax
12830 andl %eax, %ebx
12831- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12832+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12833 jge 1f
12834 #endif
12835- movl $LOAD_PHYSICAL_ADDR, %ebx
12836+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12837 1:
12838
12839 /* Target address to relocate to for decompression */
12840diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12841index 6b1766c..ad465c9 100644
12842--- a/arch/x86/boot/compressed/head_64.S
12843+++ b/arch/x86/boot/compressed/head_64.S
12844@@ -94,10 +94,10 @@ ENTRY(startup_32)
12845 addl %eax, %ebx
12846 notl %eax
12847 andl %eax, %ebx
12848- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12849+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12850 jge 1f
12851 #endif
12852- movl $LOAD_PHYSICAL_ADDR, %ebx
12853+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12854 1:
12855
12856 /* Target address to relocate to for decompression */
12857@@ -322,10 +322,10 @@ preferred_addr:
12858 addq %rax, %rbp
12859 notq %rax
12860 andq %rax, %rbp
12861- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12862+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12863 jge 1f
12864 #endif
12865- movq $LOAD_PHYSICAL_ADDR, %rbp
12866+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12867 1:
12868
12869 /* Target address to relocate to for decompression */
12870@@ -434,8 +434,8 @@ gdt:
12871 .long gdt
12872 .word 0
12873 .quad 0x0000000000000000 /* NULL descriptor */
12874- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12875- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12876+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12877+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12878 .quad 0x0080890000000000 /* TS descriptor */
12879 .quad 0x0000000000000000 /* TS continued */
12880 gdt_end:
12881diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12882index a950864..c710239 100644
12883--- a/arch/x86/boot/compressed/misc.c
12884+++ b/arch/x86/boot/compressed/misc.c
12885@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12886 * Calculate the delta between where vmlinux was linked to load
12887 * and where it was actually loaded.
12888 */
12889- delta = min_addr - LOAD_PHYSICAL_ADDR;
12890+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12891 if (!delta) {
12892 debug_putstr("No relocation needed... ");
12893 return;
12894@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12895 Elf32_Ehdr ehdr;
12896 Elf32_Phdr *phdrs, *phdr;
12897 #endif
12898- void *dest;
12899+ void *dest, *prev;
12900 int i;
12901
12902 memcpy(&ehdr, output, sizeof(ehdr));
12903@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12904 case PT_LOAD:
12905 #ifdef CONFIG_RELOCATABLE
12906 dest = output;
12907- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12908+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12909 #else
12910 dest = (void *)(phdr->p_paddr);
12911 #endif
12912 memcpy(dest,
12913 output + phdr->p_offset,
12914 phdr->p_filesz);
12915+ if (i)
12916+ memset(prev, 0xff, dest - prev);
12917+ prev = dest + phdr->p_filesz;
12918 break;
12919 default: /* Ignore other PT_* */ break;
12920 }
12921@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12922 error("Destination address too large");
12923 #endif
12924 #ifndef CONFIG_RELOCATABLE
12925- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12926+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12927 error("Wrong destination address");
12928 #endif
12929
12930diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12931index 1fd7d57..0f7d096 100644
12932--- a/arch/x86/boot/cpucheck.c
12933+++ b/arch/x86/boot/cpucheck.c
12934@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12935 u32 ecx = MSR_K7_HWCR;
12936 u32 eax, edx;
12937
12938- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12939+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12940 eax &= ~(1 << 15);
12941- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12942+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12943
12944 get_cpuflags(); /* Make sure it really did something */
12945 err = check_cpuflags();
12946@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12947 u32 ecx = MSR_VIA_FCR;
12948 u32 eax, edx;
12949
12950- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12951+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12952 eax |= (1<<1)|(1<<7);
12953- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12954+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12955
12956 set_bit(X86_FEATURE_CX8, cpu.flags);
12957 err = check_cpuflags();
12958@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12959 u32 eax, edx;
12960 u32 level = 1;
12961
12962- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12963- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12964- asm("cpuid"
12965+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12966+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12967+ asm volatile("cpuid"
12968 : "+a" (level), "=d" (cpu.flags[0])
12969 : : "ecx", "ebx");
12970- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12971+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12972
12973 err = check_cpuflags();
12974 } else if (err == 0x01 &&
12975diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12976index 16ef025..91e033b 100644
12977--- a/arch/x86/boot/header.S
12978+++ b/arch/x86/boot/header.S
12979@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12980 # single linked list of
12981 # struct setup_data
12982
12983-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12984+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12985
12986 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12987+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12988+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12989+#else
12990 #define VO_INIT_SIZE (VO__end - VO__text)
12991+#endif
12992 #if ZO_INIT_SIZE > VO_INIT_SIZE
12993 #define INIT_SIZE ZO_INIT_SIZE
12994 #else
12995diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12996index db75d07..8e6d0af 100644
12997--- a/arch/x86/boot/memory.c
12998+++ b/arch/x86/boot/memory.c
12999@@ -19,7 +19,7 @@
13000
13001 static int detect_memory_e820(void)
13002 {
13003- int count = 0;
13004+ unsigned int count = 0;
13005 struct biosregs ireg, oreg;
13006 struct e820entry *desc = boot_params.e820_map;
13007 static struct e820entry buf; /* static so it is zeroed */
13008diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13009index ba3e100..6501b8f 100644
13010--- a/arch/x86/boot/video-vesa.c
13011+++ b/arch/x86/boot/video-vesa.c
13012@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13013
13014 boot_params.screen_info.vesapm_seg = oreg.es;
13015 boot_params.screen_info.vesapm_off = oreg.di;
13016+ boot_params.screen_info.vesapm_size = oreg.cx;
13017 }
13018
13019 /*
13020diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13021index 43eda28..5ab5fdb 100644
13022--- a/arch/x86/boot/video.c
13023+++ b/arch/x86/boot/video.c
13024@@ -96,7 +96,7 @@ static void store_mode_params(void)
13025 static unsigned int get_entry(void)
13026 {
13027 char entry_buf[4];
13028- int i, len = 0;
13029+ unsigned int i, len = 0;
13030 int key;
13031 unsigned int v;
13032
13033diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13034index 9105655..41779c1 100644
13035--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13036+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13037@@ -8,6 +8,8 @@
13038 * including this sentence is retained in full.
13039 */
13040
13041+#include <asm/alternative-asm.h>
13042+
13043 .extern crypto_ft_tab
13044 .extern crypto_it_tab
13045 .extern crypto_fl_tab
13046@@ -70,6 +72,8 @@
13047 je B192; \
13048 leaq 32(r9),r9;
13049
13050+#define ret pax_force_retaddr; ret
13051+
13052 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13053 movq r1,r2; \
13054 movq r3,r4; \
13055diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13056index 477e9d7..c92c7d8 100644
13057--- a/arch/x86/crypto/aesni-intel_asm.S
13058+++ b/arch/x86/crypto/aesni-intel_asm.S
13059@@ -31,6 +31,7 @@
13060
13061 #include <linux/linkage.h>
13062 #include <asm/inst.h>
13063+#include <asm/alternative-asm.h>
13064
13065 #ifdef __x86_64__
13066 .data
13067@@ -205,7 +206,7 @@ enc: .octa 0x2
13068 * num_initial_blocks = b mod 4
13069 * encrypt the initial num_initial_blocks blocks and apply ghash on
13070 * the ciphertext
13071-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13072+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13073 * are clobbered
13074 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13075 */
13076@@ -214,8 +215,8 @@ enc: .octa 0x2
13077 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13078 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13079 mov arg7, %r10 # %r10 = AAD
13080- mov arg8, %r12 # %r12 = aadLen
13081- mov %r12, %r11
13082+ mov arg8, %r15 # %r15 = aadLen
13083+ mov %r15, %r11
13084 pxor %xmm\i, %xmm\i
13085 _get_AAD_loop\num_initial_blocks\operation:
13086 movd (%r10), \TMP1
13087@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13088 psrldq $4, %xmm\i
13089 pxor \TMP1, %xmm\i
13090 add $4, %r10
13091- sub $4, %r12
13092+ sub $4, %r15
13093 jne _get_AAD_loop\num_initial_blocks\operation
13094 cmp $16, %r11
13095 je _get_AAD_loop2_done\num_initial_blocks\operation
13096- mov $16, %r12
13097+ mov $16, %r15
13098 _get_AAD_loop2\num_initial_blocks\operation:
13099 psrldq $4, %xmm\i
13100- sub $4, %r12
13101- cmp %r11, %r12
13102+ sub $4, %r15
13103+ cmp %r11, %r15
13104 jne _get_AAD_loop2\num_initial_blocks\operation
13105 _get_AAD_loop2_done\num_initial_blocks\operation:
13106 movdqa SHUF_MASK(%rip), %xmm14
13107@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13108 * num_initial_blocks = b mod 4
13109 * encrypt the initial num_initial_blocks blocks and apply ghash on
13110 * the ciphertext
13111-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13112+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13113 * are clobbered
13114 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13115 */
13116@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13117 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13118 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13119 mov arg7, %r10 # %r10 = AAD
13120- mov arg8, %r12 # %r12 = aadLen
13121- mov %r12, %r11
13122+ mov arg8, %r15 # %r15 = aadLen
13123+ mov %r15, %r11
13124 pxor %xmm\i, %xmm\i
13125 _get_AAD_loop\num_initial_blocks\operation:
13126 movd (%r10), \TMP1
13127@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13128 psrldq $4, %xmm\i
13129 pxor \TMP1, %xmm\i
13130 add $4, %r10
13131- sub $4, %r12
13132+ sub $4, %r15
13133 jne _get_AAD_loop\num_initial_blocks\operation
13134 cmp $16, %r11
13135 je _get_AAD_loop2_done\num_initial_blocks\operation
13136- mov $16, %r12
13137+ mov $16, %r15
13138 _get_AAD_loop2\num_initial_blocks\operation:
13139 psrldq $4, %xmm\i
13140- sub $4, %r12
13141- cmp %r11, %r12
13142+ sub $4, %r15
13143+ cmp %r11, %r15
13144 jne _get_AAD_loop2\num_initial_blocks\operation
13145 _get_AAD_loop2_done\num_initial_blocks\operation:
13146 movdqa SHUF_MASK(%rip), %xmm14
13147@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13148 *
13149 *****************************************************************************/
13150 ENTRY(aesni_gcm_dec)
13151- push %r12
13152+ push %r15
13153 push %r13
13154 push %r14
13155 mov %rsp, %r14
13156@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13157 */
13158 sub $VARIABLE_OFFSET, %rsp
13159 and $~63, %rsp # align rsp to 64 bytes
13160- mov %arg6, %r12
13161- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13162+ mov %arg6, %r15
13163+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13164 movdqa SHUF_MASK(%rip), %xmm2
13165 PSHUFB_XMM %xmm2, %xmm13
13166
13167@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13168 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13169 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13170 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13171- mov %r13, %r12
13172- and $(3<<4), %r12
13173+ mov %r13, %r15
13174+ and $(3<<4), %r15
13175 jz _initial_num_blocks_is_0_decrypt
13176- cmp $(2<<4), %r12
13177+ cmp $(2<<4), %r15
13178 jb _initial_num_blocks_is_1_decrypt
13179 je _initial_num_blocks_is_2_decrypt
13180 _initial_num_blocks_is_3_decrypt:
13181@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13182 sub $16, %r11
13183 add %r13, %r11
13184 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13185- lea SHIFT_MASK+16(%rip), %r12
13186- sub %r13, %r12
13187+ lea SHIFT_MASK+16(%rip), %r15
13188+ sub %r13, %r15
13189 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13190 # (%r13 is the number of bytes in plaintext mod 16)
13191- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13192+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13193 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13194
13195 movdqa %xmm1, %xmm2
13196 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13197- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13198+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13199 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13200 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13201 pand %xmm1, %xmm2
13202@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13203 sub $1, %r13
13204 jne _less_than_8_bytes_left_decrypt
13205 _multiple_of_16_bytes_decrypt:
13206- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13207- shl $3, %r12 # convert into number of bits
13208- movd %r12d, %xmm15 # len(A) in %xmm15
13209+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13210+ shl $3, %r15 # convert into number of bits
13211+ movd %r15d, %xmm15 # len(A) in %xmm15
13212 shl $3, %arg4 # len(C) in bits (*128)
13213 MOVQ_R64_XMM %arg4, %xmm1
13214 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13215@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13216 mov %r14, %rsp
13217 pop %r14
13218 pop %r13
13219- pop %r12
13220+ pop %r15
13221+ pax_force_retaddr
13222 ret
13223 ENDPROC(aesni_gcm_dec)
13224
13225@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13226 * poly = x^128 + x^127 + x^126 + x^121 + 1
13227 ***************************************************************************/
13228 ENTRY(aesni_gcm_enc)
13229- push %r12
13230+ push %r15
13231 push %r13
13232 push %r14
13233 mov %rsp, %r14
13234@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13235 #
13236 sub $VARIABLE_OFFSET, %rsp
13237 and $~63, %rsp
13238- mov %arg6, %r12
13239- movdqu (%r12), %xmm13
13240+ mov %arg6, %r15
13241+ movdqu (%r15), %xmm13
13242 movdqa SHUF_MASK(%rip), %xmm2
13243 PSHUFB_XMM %xmm2, %xmm13
13244
13245@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13246 movdqa %xmm13, HashKey(%rsp)
13247 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13248 and $-16, %r13
13249- mov %r13, %r12
13250+ mov %r13, %r15
13251
13252 # Encrypt first few blocks
13253
13254- and $(3<<4), %r12
13255+ and $(3<<4), %r15
13256 jz _initial_num_blocks_is_0_encrypt
13257- cmp $(2<<4), %r12
13258+ cmp $(2<<4), %r15
13259 jb _initial_num_blocks_is_1_encrypt
13260 je _initial_num_blocks_is_2_encrypt
13261 _initial_num_blocks_is_3_encrypt:
13262@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13263 sub $16, %r11
13264 add %r13, %r11
13265 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13266- lea SHIFT_MASK+16(%rip), %r12
13267- sub %r13, %r12
13268+ lea SHIFT_MASK+16(%rip), %r15
13269+ sub %r13, %r15
13270 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13271 # (%r13 is the number of bytes in plaintext mod 16)
13272- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13273+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13274 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13275 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13276- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13277+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13278 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13279 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13280 movdqa SHUF_MASK(%rip), %xmm10
13281@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13282 sub $1, %r13
13283 jne _less_than_8_bytes_left_encrypt
13284 _multiple_of_16_bytes_encrypt:
13285- mov arg8, %r12 # %r12 = addLen (number of bytes)
13286- shl $3, %r12
13287- movd %r12d, %xmm15 # len(A) in %xmm15
13288+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13289+ shl $3, %r15
13290+ movd %r15d, %xmm15 # len(A) in %xmm15
13291 shl $3, %arg4 # len(C) in bits (*128)
13292 MOVQ_R64_XMM %arg4, %xmm1
13293 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13294@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13295 mov %r14, %rsp
13296 pop %r14
13297 pop %r13
13298- pop %r12
13299+ pop %r15
13300+ pax_force_retaddr
13301 ret
13302 ENDPROC(aesni_gcm_enc)
13303
13304@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13305 pxor %xmm1, %xmm0
13306 movaps %xmm0, (TKEYP)
13307 add $0x10, TKEYP
13308+ pax_force_retaddr
13309 ret
13310 ENDPROC(_key_expansion_128)
13311 ENDPROC(_key_expansion_256a)
13312@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13313 shufps $0b01001110, %xmm2, %xmm1
13314 movaps %xmm1, 0x10(TKEYP)
13315 add $0x20, TKEYP
13316+ pax_force_retaddr
13317 ret
13318 ENDPROC(_key_expansion_192a)
13319
13320@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13321
13322 movaps %xmm0, (TKEYP)
13323 add $0x10, TKEYP
13324+ pax_force_retaddr
13325 ret
13326 ENDPROC(_key_expansion_192b)
13327
13328@@ -1781,6 +1787,7 @@ _key_expansion_256b:
13329 pxor %xmm1, %xmm2
13330 movaps %xmm2, (TKEYP)
13331 add $0x10, TKEYP
13332+ pax_force_retaddr
13333 ret
13334 ENDPROC(_key_expansion_256b)
13335
13336@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13337 #ifndef __x86_64__
13338 popl KEYP
13339 #endif
13340+ pax_force_retaddr
13341 ret
13342 ENDPROC(aesni_set_key)
13343
13344@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13345 popl KLEN
13346 popl KEYP
13347 #endif
13348+ pax_force_retaddr
13349 ret
13350 ENDPROC(aesni_enc)
13351
13352@@ -1974,6 +1983,7 @@ _aesni_enc1:
13353 AESENC KEY STATE
13354 movaps 0x70(TKEYP), KEY
13355 AESENCLAST KEY STATE
13356+ pax_force_retaddr
13357 ret
13358 ENDPROC(_aesni_enc1)
13359
13360@@ -2083,6 +2093,7 @@ _aesni_enc4:
13361 AESENCLAST KEY STATE2
13362 AESENCLAST KEY STATE3
13363 AESENCLAST KEY STATE4
13364+ pax_force_retaddr
13365 ret
13366 ENDPROC(_aesni_enc4)
13367
13368@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13369 popl KLEN
13370 popl KEYP
13371 #endif
13372+ pax_force_retaddr
13373 ret
13374 ENDPROC(aesni_dec)
13375
13376@@ -2164,6 +2176,7 @@ _aesni_dec1:
13377 AESDEC KEY STATE
13378 movaps 0x70(TKEYP), KEY
13379 AESDECLAST KEY STATE
13380+ pax_force_retaddr
13381 ret
13382 ENDPROC(_aesni_dec1)
13383
13384@@ -2273,6 +2286,7 @@ _aesni_dec4:
13385 AESDECLAST KEY STATE2
13386 AESDECLAST KEY STATE3
13387 AESDECLAST KEY STATE4
13388+ pax_force_retaddr
13389 ret
13390 ENDPROC(_aesni_dec4)
13391
13392@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13393 popl KEYP
13394 popl LEN
13395 #endif
13396+ pax_force_retaddr
13397 ret
13398 ENDPROC(aesni_ecb_enc)
13399
13400@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13401 popl KEYP
13402 popl LEN
13403 #endif
13404+ pax_force_retaddr
13405 ret
13406 ENDPROC(aesni_ecb_dec)
13407
13408@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13409 popl LEN
13410 popl IVP
13411 #endif
13412+ pax_force_retaddr
13413 ret
13414 ENDPROC(aesni_cbc_enc)
13415
13416@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13417 popl LEN
13418 popl IVP
13419 #endif
13420+ pax_force_retaddr
13421 ret
13422 ENDPROC(aesni_cbc_dec)
13423
13424@@ -2550,6 +2568,7 @@ _aesni_inc_init:
13425 mov $1, TCTR_LOW
13426 MOVQ_R64_XMM TCTR_LOW INC
13427 MOVQ_R64_XMM CTR TCTR_LOW
13428+ pax_force_retaddr
13429 ret
13430 ENDPROC(_aesni_inc_init)
13431
13432@@ -2579,6 +2598,7 @@ _aesni_inc:
13433 .Linc_low:
13434 movaps CTR, IV
13435 PSHUFB_XMM BSWAP_MASK IV
13436+ pax_force_retaddr
13437 ret
13438 ENDPROC(_aesni_inc)
13439
13440@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13441 .Lctr_enc_ret:
13442 movups IV, (IVP)
13443 .Lctr_enc_just_ret:
13444+ pax_force_retaddr
13445 ret
13446 ENDPROC(aesni_ctr_enc)
13447
13448@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13449 pxor INC, STATE4
13450 movdqu STATE4, 0x70(OUTP)
13451
13452+ pax_force_retaddr
13453 ret
13454 ENDPROC(aesni_xts_crypt8)
13455
13456diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13457index 246c670..466e2d6 100644
13458--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13459+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13460@@ -21,6 +21,7 @@
13461 */
13462
13463 #include <linux/linkage.h>
13464+#include <asm/alternative-asm.h>
13465
13466 .file "blowfish-x86_64-asm.S"
13467 .text
13468@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13469 jnz .L__enc_xor;
13470
13471 write_block();
13472+ pax_force_retaddr
13473 ret;
13474 .L__enc_xor:
13475 xor_block();
13476+ pax_force_retaddr
13477 ret;
13478 ENDPROC(__blowfish_enc_blk)
13479
13480@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13481
13482 movq %r11, %rbp;
13483
13484+ pax_force_retaddr
13485 ret;
13486 ENDPROC(blowfish_dec_blk)
13487
13488@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13489
13490 popq %rbx;
13491 popq %rbp;
13492+ pax_force_retaddr
13493 ret;
13494
13495 .L__enc_xor4:
13496@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13497
13498 popq %rbx;
13499 popq %rbp;
13500+ pax_force_retaddr
13501 ret;
13502 ENDPROC(__blowfish_enc_blk_4way)
13503
13504@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13505 popq %rbx;
13506 popq %rbp;
13507
13508+ pax_force_retaddr
13509 ret;
13510 ENDPROC(blowfish_dec_blk_4way)
13511diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13512index ce71f92..1dce7ec 100644
13513--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13514+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13515@@ -16,6 +16,7 @@
13516 */
13517
13518 #include <linux/linkage.h>
13519+#include <asm/alternative-asm.h>
13520
13521 #define CAMELLIA_TABLE_BYTE_LEN 272
13522
13523@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13524 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13525 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13526 %rcx, (%r9));
13527+ pax_force_retaddr
13528 ret;
13529 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13530
13531@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13532 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13533 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13534 %rax, (%r9));
13535+ pax_force_retaddr
13536 ret;
13537 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13538
13539@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13540 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13541 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13542
13543+ pax_force_retaddr
13544 ret;
13545
13546 .align 8
13547@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13548 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13549 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13550
13551+ pax_force_retaddr
13552 ret;
13553
13554 .align 8
13555@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13556 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13557 %xmm8, %rsi);
13558
13559+ pax_force_retaddr
13560 ret;
13561 ENDPROC(camellia_ecb_enc_16way)
13562
13563@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13564 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13565 %xmm8, %rsi);
13566
13567+ pax_force_retaddr
13568 ret;
13569 ENDPROC(camellia_ecb_dec_16way)
13570
13571@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13572 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13573 %xmm8, %rsi);
13574
13575+ pax_force_retaddr
13576 ret;
13577 ENDPROC(camellia_cbc_dec_16way)
13578
13579@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13580 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13581 %xmm8, %rsi);
13582
13583+ pax_force_retaddr
13584 ret;
13585 ENDPROC(camellia_ctr_16way)
13586
13587@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13588 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13589 %xmm8, %rsi);
13590
13591+ pax_force_retaddr
13592 ret;
13593 ENDPROC(camellia_xts_crypt_16way)
13594
13595diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13596index 0e0b886..5a3123c 100644
13597--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13598+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13599@@ -11,6 +11,7 @@
13600 */
13601
13602 #include <linux/linkage.h>
13603+#include <asm/alternative-asm.h>
13604
13605 #define CAMELLIA_TABLE_BYTE_LEN 272
13606
13607@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13608 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13609 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13610 %rcx, (%r9));
13611+ pax_force_retaddr
13612 ret;
13613 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13614
13615@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13616 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13617 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13618 %rax, (%r9));
13619+ pax_force_retaddr
13620 ret;
13621 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13622
13623@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13624 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13625 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13626
13627+ pax_force_retaddr
13628 ret;
13629
13630 .align 8
13631@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13632 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13633 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13634
13635+ pax_force_retaddr
13636 ret;
13637
13638 .align 8
13639@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13640
13641 vzeroupper;
13642
13643+ pax_force_retaddr
13644 ret;
13645 ENDPROC(camellia_ecb_enc_32way)
13646
13647@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13648
13649 vzeroupper;
13650
13651+ pax_force_retaddr
13652 ret;
13653 ENDPROC(camellia_ecb_dec_32way)
13654
13655@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13656
13657 vzeroupper;
13658
13659+ pax_force_retaddr
13660 ret;
13661 ENDPROC(camellia_cbc_dec_32way)
13662
13663@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13664
13665 vzeroupper;
13666
13667+ pax_force_retaddr
13668 ret;
13669 ENDPROC(camellia_ctr_32way)
13670
13671@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13672
13673 vzeroupper;
13674
13675+ pax_force_retaddr
13676 ret;
13677 ENDPROC(camellia_xts_crypt_32way)
13678
13679diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13680index 310319c..db3d7b5 100644
13681--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13682+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13683@@ -21,6 +21,7 @@
13684 */
13685
13686 #include <linux/linkage.h>
13687+#include <asm/alternative-asm.h>
13688
13689 .file "camellia-x86_64-asm_64.S"
13690 .text
13691@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13692 enc_outunpack(mov, RT1);
13693
13694 movq RRBP, %rbp;
13695+ pax_force_retaddr
13696 ret;
13697
13698 .L__enc_xor:
13699 enc_outunpack(xor, RT1);
13700
13701 movq RRBP, %rbp;
13702+ pax_force_retaddr
13703 ret;
13704 ENDPROC(__camellia_enc_blk)
13705
13706@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13707 dec_outunpack();
13708
13709 movq RRBP, %rbp;
13710+ pax_force_retaddr
13711 ret;
13712 ENDPROC(camellia_dec_blk)
13713
13714@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13715
13716 movq RRBP, %rbp;
13717 popq %rbx;
13718+ pax_force_retaddr
13719 ret;
13720
13721 .L__enc2_xor:
13722@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13723
13724 movq RRBP, %rbp;
13725 popq %rbx;
13726+ pax_force_retaddr
13727 ret;
13728 ENDPROC(__camellia_enc_blk_2way)
13729
13730@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13731
13732 movq RRBP, %rbp;
13733 movq RXOR, %rbx;
13734+ pax_force_retaddr
13735 ret;
13736 ENDPROC(camellia_dec_blk_2way)
13737diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13738index c35fd5d..2d8c7db 100644
13739--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13740+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13741@@ -24,6 +24,7 @@
13742 */
13743
13744 #include <linux/linkage.h>
13745+#include <asm/alternative-asm.h>
13746
13747 .file "cast5-avx-x86_64-asm_64.S"
13748
13749@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13750 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13751 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13752
13753+ pax_force_retaddr
13754 ret;
13755 ENDPROC(__cast5_enc_blk16)
13756
13757@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13758 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13759 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13760
13761+ pax_force_retaddr
13762 ret;
13763
13764 .L__skip_dec:
13765@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13766 vmovdqu RR4, (6*4*4)(%r11);
13767 vmovdqu RL4, (7*4*4)(%r11);
13768
13769+ pax_force_retaddr
13770 ret;
13771 ENDPROC(cast5_ecb_enc_16way)
13772
13773@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13774 vmovdqu RR4, (6*4*4)(%r11);
13775 vmovdqu RL4, (7*4*4)(%r11);
13776
13777+ pax_force_retaddr
13778 ret;
13779 ENDPROC(cast5_ecb_dec_16way)
13780
13781@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13782 * %rdx: src
13783 */
13784
13785- pushq %r12;
13786+ pushq %r14;
13787
13788 movq %rsi, %r11;
13789- movq %rdx, %r12;
13790+ movq %rdx, %r14;
13791
13792 vmovdqu (0*16)(%rdx), RL1;
13793 vmovdqu (1*16)(%rdx), RR1;
13794@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13795 call __cast5_dec_blk16;
13796
13797 /* xor with src */
13798- vmovq (%r12), RX;
13799+ vmovq (%r14), RX;
13800 vpshufd $0x4f, RX, RX;
13801 vpxor RX, RR1, RR1;
13802- vpxor 0*16+8(%r12), RL1, RL1;
13803- vpxor 1*16+8(%r12), RR2, RR2;
13804- vpxor 2*16+8(%r12), RL2, RL2;
13805- vpxor 3*16+8(%r12), RR3, RR3;
13806- vpxor 4*16+8(%r12), RL3, RL3;
13807- vpxor 5*16+8(%r12), RR4, RR4;
13808- vpxor 6*16+8(%r12), RL4, RL4;
13809+ vpxor 0*16+8(%r14), RL1, RL1;
13810+ vpxor 1*16+8(%r14), RR2, RR2;
13811+ vpxor 2*16+8(%r14), RL2, RL2;
13812+ vpxor 3*16+8(%r14), RR3, RR3;
13813+ vpxor 4*16+8(%r14), RL3, RL3;
13814+ vpxor 5*16+8(%r14), RR4, RR4;
13815+ vpxor 6*16+8(%r14), RL4, RL4;
13816
13817 vmovdqu RR1, (0*16)(%r11);
13818 vmovdqu RL1, (1*16)(%r11);
13819@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13820 vmovdqu RR4, (6*16)(%r11);
13821 vmovdqu RL4, (7*16)(%r11);
13822
13823- popq %r12;
13824+ popq %r14;
13825
13826+ pax_force_retaddr
13827 ret;
13828 ENDPROC(cast5_cbc_dec_16way)
13829
13830@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13831 * %rcx: iv (big endian, 64bit)
13832 */
13833
13834- pushq %r12;
13835+ pushq %r14;
13836
13837 movq %rsi, %r11;
13838- movq %rdx, %r12;
13839+ movq %rdx, %r14;
13840
13841 vpcmpeqd RTMP, RTMP, RTMP;
13842 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13843@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13844 call __cast5_enc_blk16;
13845
13846 /* dst = src ^ iv */
13847- vpxor (0*16)(%r12), RR1, RR1;
13848- vpxor (1*16)(%r12), RL1, RL1;
13849- vpxor (2*16)(%r12), RR2, RR2;
13850- vpxor (3*16)(%r12), RL2, RL2;
13851- vpxor (4*16)(%r12), RR3, RR3;
13852- vpxor (5*16)(%r12), RL3, RL3;
13853- vpxor (6*16)(%r12), RR4, RR4;
13854- vpxor (7*16)(%r12), RL4, RL4;
13855+ vpxor (0*16)(%r14), RR1, RR1;
13856+ vpxor (1*16)(%r14), RL1, RL1;
13857+ vpxor (2*16)(%r14), RR2, RR2;
13858+ vpxor (3*16)(%r14), RL2, RL2;
13859+ vpxor (4*16)(%r14), RR3, RR3;
13860+ vpxor (5*16)(%r14), RL3, RL3;
13861+ vpxor (6*16)(%r14), RR4, RR4;
13862+ vpxor (7*16)(%r14), RL4, RL4;
13863 vmovdqu RR1, (0*16)(%r11);
13864 vmovdqu RL1, (1*16)(%r11);
13865 vmovdqu RR2, (2*16)(%r11);
13866@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13867 vmovdqu RR4, (6*16)(%r11);
13868 vmovdqu RL4, (7*16)(%r11);
13869
13870- popq %r12;
13871+ popq %r14;
13872
13873+ pax_force_retaddr
13874 ret;
13875 ENDPROC(cast5_ctr_16way)
13876diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13877index e3531f8..e123f35 100644
13878--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13879+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13880@@ -24,6 +24,7 @@
13881 */
13882
13883 #include <linux/linkage.h>
13884+#include <asm/alternative-asm.h>
13885 #include "glue_helper-asm-avx.S"
13886
13887 .file "cast6-avx-x86_64-asm_64.S"
13888@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13889 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13890 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13891
13892+ pax_force_retaddr
13893 ret;
13894 ENDPROC(__cast6_enc_blk8)
13895
13896@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13897 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13898 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13899
13900+ pax_force_retaddr
13901 ret;
13902 ENDPROC(__cast6_dec_blk8)
13903
13904@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13905
13906 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13907
13908+ pax_force_retaddr
13909 ret;
13910 ENDPROC(cast6_ecb_enc_8way)
13911
13912@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13913
13914 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13915
13916+ pax_force_retaddr
13917 ret;
13918 ENDPROC(cast6_ecb_dec_8way)
13919
13920@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13921 * %rdx: src
13922 */
13923
13924- pushq %r12;
13925+ pushq %r14;
13926
13927 movq %rsi, %r11;
13928- movq %rdx, %r12;
13929+ movq %rdx, %r14;
13930
13931 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13932
13933 call __cast6_dec_blk8;
13934
13935- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13936+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13937
13938- popq %r12;
13939+ popq %r14;
13940
13941+ pax_force_retaddr
13942 ret;
13943 ENDPROC(cast6_cbc_dec_8way)
13944
13945@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13946 * %rcx: iv (little endian, 128bit)
13947 */
13948
13949- pushq %r12;
13950+ pushq %r14;
13951
13952 movq %rsi, %r11;
13953- movq %rdx, %r12;
13954+ movq %rdx, %r14;
13955
13956 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13957 RD2, RX, RKR, RKM);
13958
13959 call __cast6_enc_blk8;
13960
13961- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13962+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13963
13964- popq %r12;
13965+ popq %r14;
13966
13967+ pax_force_retaddr
13968 ret;
13969 ENDPROC(cast6_ctr_8way)
13970
13971@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13972 /* dst <= regs xor IVs(in dst) */
13973 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13974
13975+ pax_force_retaddr
13976 ret;
13977 ENDPROC(cast6_xts_enc_8way)
13978
13979@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13980 /* dst <= regs xor IVs(in dst) */
13981 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13982
13983+ pax_force_retaddr
13984 ret;
13985 ENDPROC(cast6_xts_dec_8way)
13986diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13987index 26d49eb..c0a8c84 100644
13988--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13989+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13990@@ -45,6 +45,7 @@
13991
13992 #include <asm/inst.h>
13993 #include <linux/linkage.h>
13994+#include <asm/alternative-asm.h>
13995
13996 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13997
13998@@ -309,6 +310,7 @@ do_return:
13999 popq %rsi
14000 popq %rdi
14001 popq %rbx
14002+ pax_force_retaddr
14003 ret
14004
14005 ################################################################
14006diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14007index 5d1e007..098cb4f 100644
14008--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14009+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14010@@ -18,6 +18,7 @@
14011
14012 #include <linux/linkage.h>
14013 #include <asm/inst.h>
14014+#include <asm/alternative-asm.h>
14015
14016 .data
14017
14018@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14019 psrlq $1, T2
14020 pxor T2, T1
14021 pxor T1, DATA
14022+ pax_force_retaddr
14023 ret
14024 ENDPROC(__clmul_gf128mul_ble)
14025
14026@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14027 call __clmul_gf128mul_ble
14028 PSHUFB_XMM BSWAP DATA
14029 movups DATA, (%rdi)
14030+ pax_force_retaddr
14031 ret
14032 ENDPROC(clmul_ghash_mul)
14033
14034@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14035 PSHUFB_XMM BSWAP DATA
14036 movups DATA, (%rdi)
14037 .Lupdate_just_ret:
14038+ pax_force_retaddr
14039 ret
14040 ENDPROC(clmul_ghash_update)
14041diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14042index 9279e0b..c4b3d2c 100644
14043--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14044+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14045@@ -1,4 +1,5 @@
14046 #include <linux/linkage.h>
14047+#include <asm/alternative-asm.h>
14048
14049 # enter salsa20_encrypt_bytes
14050 ENTRY(salsa20_encrypt_bytes)
14051@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14052 add %r11,%rsp
14053 mov %rdi,%rax
14054 mov %rsi,%rdx
14055+ pax_force_retaddr
14056 ret
14057 # bytesatleast65:
14058 ._bytesatleast65:
14059@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14060 add %r11,%rsp
14061 mov %rdi,%rax
14062 mov %rsi,%rdx
14063+ pax_force_retaddr
14064 ret
14065 ENDPROC(salsa20_keysetup)
14066
14067@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14068 add %r11,%rsp
14069 mov %rdi,%rax
14070 mov %rsi,%rdx
14071+ pax_force_retaddr
14072 ret
14073 ENDPROC(salsa20_ivsetup)
14074diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14075index 2f202f4..d9164d6 100644
14076--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14077+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14078@@ -24,6 +24,7 @@
14079 */
14080
14081 #include <linux/linkage.h>
14082+#include <asm/alternative-asm.h>
14083 #include "glue_helper-asm-avx.S"
14084
14085 .file "serpent-avx-x86_64-asm_64.S"
14086@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14087 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14088 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14089
14090+ pax_force_retaddr
14091 ret;
14092 ENDPROC(__serpent_enc_blk8_avx)
14093
14094@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14095 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14096 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14097
14098+ pax_force_retaddr
14099 ret;
14100 ENDPROC(__serpent_dec_blk8_avx)
14101
14102@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14103
14104 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14105
14106+ pax_force_retaddr
14107 ret;
14108 ENDPROC(serpent_ecb_enc_8way_avx)
14109
14110@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14111
14112 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14113
14114+ pax_force_retaddr
14115 ret;
14116 ENDPROC(serpent_ecb_dec_8way_avx)
14117
14118@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14119
14120 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14121
14122+ pax_force_retaddr
14123 ret;
14124 ENDPROC(serpent_cbc_dec_8way_avx)
14125
14126@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14127
14128 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14129
14130+ pax_force_retaddr
14131 ret;
14132 ENDPROC(serpent_ctr_8way_avx)
14133
14134@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14135 /* dst <= regs xor IVs(in dst) */
14136 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14137
14138+ pax_force_retaddr
14139 ret;
14140 ENDPROC(serpent_xts_enc_8way_avx)
14141
14142@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14143 /* dst <= regs xor IVs(in dst) */
14144 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14145
14146+ pax_force_retaddr
14147 ret;
14148 ENDPROC(serpent_xts_dec_8way_avx)
14149diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14150index b222085..abd483c 100644
14151--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14152+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14153@@ -15,6 +15,7 @@
14154 */
14155
14156 #include <linux/linkage.h>
14157+#include <asm/alternative-asm.h>
14158 #include "glue_helper-asm-avx2.S"
14159
14160 .file "serpent-avx2-asm_64.S"
14161@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14162 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14163 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14164
14165+ pax_force_retaddr
14166 ret;
14167 ENDPROC(__serpent_enc_blk16)
14168
14169@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14170 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14171 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14172
14173+ pax_force_retaddr
14174 ret;
14175 ENDPROC(__serpent_dec_blk16)
14176
14177@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14178
14179 vzeroupper;
14180
14181+ pax_force_retaddr
14182 ret;
14183 ENDPROC(serpent_ecb_enc_16way)
14184
14185@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14186
14187 vzeroupper;
14188
14189+ pax_force_retaddr
14190 ret;
14191 ENDPROC(serpent_ecb_dec_16way)
14192
14193@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14194
14195 vzeroupper;
14196
14197+ pax_force_retaddr
14198 ret;
14199 ENDPROC(serpent_cbc_dec_16way)
14200
14201@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14202
14203 vzeroupper;
14204
14205+ pax_force_retaddr
14206 ret;
14207 ENDPROC(serpent_ctr_16way)
14208
14209@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14210
14211 vzeroupper;
14212
14213+ pax_force_retaddr
14214 ret;
14215 ENDPROC(serpent_xts_enc_16way)
14216
14217@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14218
14219 vzeroupper;
14220
14221+ pax_force_retaddr
14222 ret;
14223 ENDPROC(serpent_xts_dec_16way)
14224diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14225index acc066c..1559cc4 100644
14226--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14227+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14228@@ -25,6 +25,7 @@
14229 */
14230
14231 #include <linux/linkage.h>
14232+#include <asm/alternative-asm.h>
14233
14234 .file "serpent-sse2-x86_64-asm_64.S"
14235 .text
14236@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14237 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14238 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14239
14240+ pax_force_retaddr
14241 ret;
14242
14243 .L__enc_xor8:
14244 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14245 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14246
14247+ pax_force_retaddr
14248 ret;
14249 ENDPROC(__serpent_enc_blk_8way)
14250
14251@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14252 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14253 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14254
14255+ pax_force_retaddr
14256 ret;
14257 ENDPROC(serpent_dec_blk_8way)
14258diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14259index a410950..9dfe7ad 100644
14260--- a/arch/x86/crypto/sha1_ssse3_asm.S
14261+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14262@@ -29,6 +29,7 @@
14263 */
14264
14265 #include <linux/linkage.h>
14266+#include <asm/alternative-asm.h>
14267
14268 #define CTX %rdi // arg1
14269 #define BUF %rsi // arg2
14270@@ -75,9 +76,9 @@
14271
14272 push %rbx
14273 push %rbp
14274- push %r12
14275+ push %r14
14276
14277- mov %rsp, %r12
14278+ mov %rsp, %r14
14279 sub $64, %rsp # allocate workspace
14280 and $~15, %rsp # align stack
14281
14282@@ -99,11 +100,12 @@
14283 xor %rax, %rax
14284 rep stosq
14285
14286- mov %r12, %rsp # deallocate workspace
14287+ mov %r14, %rsp # deallocate workspace
14288
14289- pop %r12
14290+ pop %r14
14291 pop %rbp
14292 pop %rbx
14293+ pax_force_retaddr
14294 ret
14295
14296 ENDPROC(\name)
14297diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14298index 642f156..51a513c 100644
14299--- a/arch/x86/crypto/sha256-avx-asm.S
14300+++ b/arch/x86/crypto/sha256-avx-asm.S
14301@@ -49,6 +49,7 @@
14302
14303 #ifdef CONFIG_AS_AVX
14304 #include <linux/linkage.h>
14305+#include <asm/alternative-asm.h>
14306
14307 ## assume buffers not aligned
14308 #define VMOVDQ vmovdqu
14309@@ -460,6 +461,7 @@ done_hash:
14310 popq %r13
14311 popq %rbp
14312 popq %rbx
14313+ pax_force_retaddr
14314 ret
14315 ENDPROC(sha256_transform_avx)
14316
14317diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14318index 9e86944..3795e6a 100644
14319--- a/arch/x86/crypto/sha256-avx2-asm.S
14320+++ b/arch/x86/crypto/sha256-avx2-asm.S
14321@@ -50,6 +50,7 @@
14322
14323 #ifdef CONFIG_AS_AVX2
14324 #include <linux/linkage.h>
14325+#include <asm/alternative-asm.h>
14326
14327 ## assume buffers not aligned
14328 #define VMOVDQ vmovdqu
14329@@ -720,6 +721,7 @@ done_hash:
14330 popq %r12
14331 popq %rbp
14332 popq %rbx
14333+ pax_force_retaddr
14334 ret
14335 ENDPROC(sha256_transform_rorx)
14336
14337diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14338index f833b74..8c62a9e 100644
14339--- a/arch/x86/crypto/sha256-ssse3-asm.S
14340+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14341@@ -47,6 +47,7 @@
14342 ########################################################################
14343
14344 #include <linux/linkage.h>
14345+#include <asm/alternative-asm.h>
14346
14347 ## assume buffers not aligned
14348 #define MOVDQ movdqu
14349@@ -471,6 +472,7 @@ done_hash:
14350 popq %rbp
14351 popq %rbx
14352
14353+ pax_force_retaddr
14354 ret
14355 ENDPROC(sha256_transform_ssse3)
14356
14357diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14358index 974dde9..a823ff9 100644
14359--- a/arch/x86/crypto/sha512-avx-asm.S
14360+++ b/arch/x86/crypto/sha512-avx-asm.S
14361@@ -49,6 +49,7 @@
14362
14363 #ifdef CONFIG_AS_AVX
14364 #include <linux/linkage.h>
14365+#include <asm/alternative-asm.h>
14366
14367 .text
14368
14369@@ -364,6 +365,7 @@ updateblock:
14370 mov frame_RSPSAVE(%rsp), %rsp
14371
14372 nowork:
14373+ pax_force_retaddr
14374 ret
14375 ENDPROC(sha512_transform_avx)
14376
14377diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14378index 568b961..ed20c37 100644
14379--- a/arch/x86/crypto/sha512-avx2-asm.S
14380+++ b/arch/x86/crypto/sha512-avx2-asm.S
14381@@ -51,6 +51,7 @@
14382
14383 #ifdef CONFIG_AS_AVX2
14384 #include <linux/linkage.h>
14385+#include <asm/alternative-asm.h>
14386
14387 .text
14388
14389@@ -678,6 +679,7 @@ done_hash:
14390
14391 # Restore Stack Pointer
14392 mov frame_RSPSAVE(%rsp), %rsp
14393+ pax_force_retaddr
14394 ret
14395 ENDPROC(sha512_transform_rorx)
14396
14397diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14398index fb56855..6edd768 100644
14399--- a/arch/x86/crypto/sha512-ssse3-asm.S
14400+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14401@@ -48,6 +48,7 @@
14402 ########################################################################
14403
14404 #include <linux/linkage.h>
14405+#include <asm/alternative-asm.h>
14406
14407 .text
14408
14409@@ -363,6 +364,7 @@ updateblock:
14410 mov frame_RSPSAVE(%rsp), %rsp
14411
14412 nowork:
14413+ pax_force_retaddr
14414 ret
14415 ENDPROC(sha512_transform_ssse3)
14416
14417diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14418index 0505813..b067311 100644
14419--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14420+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14421@@ -24,6 +24,7 @@
14422 */
14423
14424 #include <linux/linkage.h>
14425+#include <asm/alternative-asm.h>
14426 #include "glue_helper-asm-avx.S"
14427
14428 .file "twofish-avx-x86_64-asm_64.S"
14429@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14430 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14431 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14432
14433+ pax_force_retaddr
14434 ret;
14435 ENDPROC(__twofish_enc_blk8)
14436
14437@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14438 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14439 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14440
14441+ pax_force_retaddr
14442 ret;
14443 ENDPROC(__twofish_dec_blk8)
14444
14445@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14446
14447 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14448
14449+ pax_force_retaddr
14450 ret;
14451 ENDPROC(twofish_ecb_enc_8way)
14452
14453@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14454
14455 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14456
14457+ pax_force_retaddr
14458 ret;
14459 ENDPROC(twofish_ecb_dec_8way)
14460
14461@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14462 * %rdx: src
14463 */
14464
14465- pushq %r12;
14466+ pushq %r14;
14467
14468 movq %rsi, %r11;
14469- movq %rdx, %r12;
14470+ movq %rdx, %r14;
14471
14472 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14473
14474 call __twofish_dec_blk8;
14475
14476- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14477+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14478
14479- popq %r12;
14480+ popq %r14;
14481
14482+ pax_force_retaddr
14483 ret;
14484 ENDPROC(twofish_cbc_dec_8way)
14485
14486@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14487 * %rcx: iv (little endian, 128bit)
14488 */
14489
14490- pushq %r12;
14491+ pushq %r14;
14492
14493 movq %rsi, %r11;
14494- movq %rdx, %r12;
14495+ movq %rdx, %r14;
14496
14497 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14498 RD2, RX0, RX1, RY0);
14499
14500 call __twofish_enc_blk8;
14501
14502- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14503+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14504
14505- popq %r12;
14506+ popq %r14;
14507
14508+ pax_force_retaddr
14509 ret;
14510 ENDPROC(twofish_ctr_8way)
14511
14512@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14513 /* dst <= regs xor IVs(in dst) */
14514 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14515
14516+ pax_force_retaddr
14517 ret;
14518 ENDPROC(twofish_xts_enc_8way)
14519
14520@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14521 /* dst <= regs xor IVs(in dst) */
14522 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14523
14524+ pax_force_retaddr
14525 ret;
14526 ENDPROC(twofish_xts_dec_8way)
14527diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14528index 1c3b7ce..02f578d 100644
14529--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14530+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14531@@ -21,6 +21,7 @@
14532 */
14533
14534 #include <linux/linkage.h>
14535+#include <asm/alternative-asm.h>
14536
14537 .file "twofish-x86_64-asm-3way.S"
14538 .text
14539@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14540 popq %r13;
14541 popq %r14;
14542 popq %r15;
14543+ pax_force_retaddr
14544 ret;
14545
14546 .L__enc_xor3:
14547@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14548 popq %r13;
14549 popq %r14;
14550 popq %r15;
14551+ pax_force_retaddr
14552 ret;
14553 ENDPROC(__twofish_enc_blk_3way)
14554
14555@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14556 popq %r13;
14557 popq %r14;
14558 popq %r15;
14559+ pax_force_retaddr
14560 ret;
14561 ENDPROC(twofish_dec_blk_3way)
14562diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14563index a039d21..524b8b2 100644
14564--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14565+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14566@@ -22,6 +22,7 @@
14567
14568 #include <linux/linkage.h>
14569 #include <asm/asm-offsets.h>
14570+#include <asm/alternative-asm.h>
14571
14572 #define a_offset 0
14573 #define b_offset 4
14574@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14575
14576 popq R1
14577 movq $1,%rax
14578+ pax_force_retaddr
14579 ret
14580 ENDPROC(twofish_enc_blk)
14581
14582@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14583
14584 popq R1
14585 movq $1,%rax
14586+ pax_force_retaddr
14587 ret
14588 ENDPROC(twofish_dec_blk)
14589diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14590index ae6aad1..719d6d9 100644
14591--- a/arch/x86/ia32/ia32_aout.c
14592+++ b/arch/x86/ia32/ia32_aout.c
14593@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14594 unsigned long dump_start, dump_size;
14595 struct user32 dump;
14596
14597+ memset(&dump, 0, sizeof(dump));
14598+
14599 fs = get_fs();
14600 set_fs(KERNEL_DS);
14601 has_dumped = 1;
14602diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14603index f9e181a..300544c 100644
14604--- a/arch/x86/ia32/ia32_signal.c
14605+++ b/arch/x86/ia32/ia32_signal.c
14606@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14607 if (__get_user(set.sig[0], &frame->sc.oldmask)
14608 || (_COMPAT_NSIG_WORDS > 1
14609 && __copy_from_user((((char *) &set.sig) + 4),
14610- &frame->extramask,
14611+ frame->extramask,
14612 sizeof(frame->extramask))))
14613 goto badframe;
14614
14615@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14616 sp -= frame_size;
14617 /* Align the stack pointer according to the i386 ABI,
14618 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14619- sp = ((sp + 4) & -16ul) - 4;
14620+ sp = ((sp - 12) & -16ul) - 4;
14621 return (void __user *) sp;
14622 }
14623
14624@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14625 } else {
14626 /* Return stub is in 32bit vsyscall page */
14627 if (current->mm->context.vdso)
14628- restorer = current->mm->context.vdso +
14629- selected_vdso32->sym___kernel_sigreturn;
14630+ restorer = (void __force_user *)(current->mm->context.vdso +
14631+ selected_vdso32->sym___kernel_sigreturn);
14632 else
14633- restorer = &frame->retcode;
14634+ restorer = frame->retcode;
14635 }
14636
14637 put_user_try {
14638@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14639 * These are actually not used anymore, but left because some
14640 * gdb versions depend on them as a marker.
14641 */
14642- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14643+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14644 } put_user_catch(err);
14645
14646 if (err)
14647@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14648 0xb8,
14649 __NR_ia32_rt_sigreturn,
14650 0x80cd,
14651- 0,
14652+ 0
14653 };
14654
14655 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14656@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14657
14658 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14659 restorer = ksig->ka.sa.sa_restorer;
14660+ else if (current->mm->context.vdso)
14661+ /* Return stub is in 32bit vsyscall page */
14662+ restorer = (void __force_user *)(current->mm->context.vdso +
14663+ selected_vdso32->sym___kernel_rt_sigreturn);
14664 else
14665- restorer = current->mm->context.vdso +
14666- selected_vdso32->sym___kernel_rt_sigreturn;
14667+ restorer = frame->retcode;
14668 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14669
14670 /*
14671 * Not actually used anymore, but left because some gdb
14672 * versions need it.
14673 */
14674- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14675+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14676 } put_user_catch(err);
14677
14678 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14679diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14680index 82e8a1d..4e998d5 100644
14681--- a/arch/x86/ia32/ia32entry.S
14682+++ b/arch/x86/ia32/ia32entry.S
14683@@ -15,8 +15,10 @@
14684 #include <asm/irqflags.h>
14685 #include <asm/asm.h>
14686 #include <asm/smap.h>
14687+#include <asm/pgtable.h>
14688 #include <linux/linkage.h>
14689 #include <linux/err.h>
14690+#include <asm/alternative-asm.h>
14691
14692 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14693 #include <linux/elf-em.h>
14694@@ -62,12 +64,12 @@
14695 */
14696 .macro LOAD_ARGS32 offset, _r9=0
14697 .if \_r9
14698- movl \offset+16(%rsp),%r9d
14699+ movl \offset+R9(%rsp),%r9d
14700 .endif
14701- movl \offset+40(%rsp),%ecx
14702- movl \offset+48(%rsp),%edx
14703- movl \offset+56(%rsp),%esi
14704- movl \offset+64(%rsp),%edi
14705+ movl \offset+RCX(%rsp),%ecx
14706+ movl \offset+RDX(%rsp),%edx
14707+ movl \offset+RSI(%rsp),%esi
14708+ movl \offset+RDI(%rsp),%edi
14709 movl %eax,%eax /* zero extension */
14710 .endm
14711
14712@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14713 ENDPROC(native_irq_enable_sysexit)
14714 #endif
14715
14716+ .macro pax_enter_kernel_user
14717+ pax_set_fptr_mask
14718+#ifdef CONFIG_PAX_MEMORY_UDEREF
14719+ call pax_enter_kernel_user
14720+#endif
14721+ .endm
14722+
14723+ .macro pax_exit_kernel_user
14724+#ifdef CONFIG_PAX_MEMORY_UDEREF
14725+ call pax_exit_kernel_user
14726+#endif
14727+#ifdef CONFIG_PAX_RANDKSTACK
14728+ pushq %rax
14729+ pushq %r11
14730+ call pax_randomize_kstack
14731+ popq %r11
14732+ popq %rax
14733+#endif
14734+ .endm
14735+
14736+ .macro pax_erase_kstack
14737+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14738+ call pax_erase_kstack
14739+#endif
14740+ .endm
14741+
14742 /*
14743 * 32bit SYSENTER instruction entry.
14744 *
14745@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14746 CFI_REGISTER rsp,rbp
14747 SWAPGS_UNSAFE_STACK
14748 movq PER_CPU_VAR(kernel_stack), %rsp
14749- addq $(KERNEL_STACK_OFFSET),%rsp
14750- /*
14751- * No need to follow this irqs on/off section: the syscall
14752- * disabled irqs, here we enable it straight after entry:
14753- */
14754- ENABLE_INTERRUPTS(CLBR_NONE)
14755 movl %ebp,%ebp /* zero extension */
14756 pushq_cfi $__USER32_DS
14757 /*CFI_REL_OFFSET ss,0*/
14758@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14759 CFI_REL_OFFSET rsp,0
14760 pushfq_cfi
14761 /*CFI_REL_OFFSET rflags,0*/
14762- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14763- CFI_REGISTER rip,r10
14764+ orl $X86_EFLAGS_IF,(%rsp)
14765+ GET_THREAD_INFO(%r11)
14766+ movl TI_sysenter_return(%r11), %r11d
14767+ CFI_REGISTER rip,r11
14768 pushq_cfi $__USER32_CS
14769 /*CFI_REL_OFFSET cs,0*/
14770 movl %eax, %eax
14771- pushq_cfi %r10
14772+ pushq_cfi %r11
14773 CFI_REL_OFFSET rip,0
14774 pushq_cfi %rax
14775 cld
14776 SAVE_ARGS 0,1,0
14777+ pax_enter_kernel_user
14778+
14779+#ifdef CONFIG_PAX_RANDKSTACK
14780+ pax_erase_kstack
14781+#endif
14782+
14783+ /*
14784+ * No need to follow this irqs on/off section: the syscall
14785+ * disabled irqs, here we enable it straight after entry:
14786+ */
14787+ ENABLE_INTERRUPTS(CLBR_NONE)
14788 /* no need to do an access_ok check here because rbp has been
14789 32bit zero extended */
14790+
14791+#ifdef CONFIG_PAX_MEMORY_UDEREF
14792+ addq pax_user_shadow_base,%rbp
14793+ ASM_PAX_OPEN_USERLAND
14794+#endif
14795+
14796 ASM_STAC
14797 1: movl (%rbp),%ebp
14798 _ASM_EXTABLE(1b,ia32_badarg)
14799 ASM_CLAC
14800
14801+#ifdef CONFIG_PAX_MEMORY_UDEREF
14802+ ASM_PAX_CLOSE_USERLAND
14803+#endif
14804+
14805 /*
14806 * Sysenter doesn't filter flags, so we need to clear NT
14807 * ourselves. To save a few cycles, we can check whether
14808@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14809 jnz sysenter_fix_flags
14810 sysenter_flags_fixed:
14811
14812- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14813- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14814+ GET_THREAD_INFO(%r11)
14815+ orl $TS_COMPAT,TI_status(%r11)
14816+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14817 CFI_REMEMBER_STATE
14818 jnz sysenter_tracesys
14819 cmpq $(IA32_NR_syscalls-1),%rax
14820@@ -172,15 +218,18 @@ sysenter_do_call:
14821 sysenter_dispatch:
14822 call *ia32_sys_call_table(,%rax,8)
14823 movq %rax,RAX-ARGOFFSET(%rsp)
14824+ GET_THREAD_INFO(%r11)
14825 DISABLE_INTERRUPTS(CLBR_NONE)
14826 TRACE_IRQS_OFF
14827- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14828+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14829 jnz sysexit_audit
14830 sysexit_from_sys_call:
14831- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14832+ pax_exit_kernel_user
14833+ pax_erase_kstack
14834+ andl $~TS_COMPAT,TI_status(%r11)
14835 /* clear IF, that popfq doesn't enable interrupts early */
14836- andl $~0x200,EFLAGS-R11(%rsp)
14837- movl RIP-R11(%rsp),%edx /* User %eip */
14838+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
14839+ movl RIP(%rsp),%edx /* User %eip */
14840 CFI_REGISTER rip,rdx
14841 RESTORE_ARGS 0,24,0,0,0,0
14842 xorq %r8,%r8
14843@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14844 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14845 movl %eax,%edi /* 1st arg: syscall number */
14846 call __audit_syscall_entry
14847+
14848+ pax_erase_kstack
14849+
14850 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14851 cmpq $(IA32_NR_syscalls-1),%rax
14852 ja ia32_badsys
14853@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14854 .endm
14855
14856 .macro auditsys_exit exit
14857- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14858+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14859 jnz ia32_ret_from_sys_call
14860 TRACE_IRQS_ON
14861 ENABLE_INTERRUPTS(CLBR_NONE)
14862@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14863 1: setbe %al /* 1 if error, 0 if not */
14864 movzbl %al,%edi /* zero-extend that into %edi */
14865 call __audit_syscall_exit
14866+ GET_THREAD_INFO(%r11)
14867 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14868 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14869 DISABLE_INTERRUPTS(CLBR_NONE)
14870 TRACE_IRQS_OFF
14871- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14872+ testl %edi,TI_flags(%r11)
14873 jz \exit
14874 CLEAR_RREGS -ARGOFFSET
14875 jmp int_with_check
14876@@ -253,7 +306,7 @@ sysenter_fix_flags:
14877
14878 sysenter_tracesys:
14879 #ifdef CONFIG_AUDITSYSCALL
14880- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14881+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14882 jz sysenter_auditsys
14883 #endif
14884 SAVE_REST
14885@@ -265,6 +318,9 @@ sysenter_tracesys:
14886 RESTORE_REST
14887 cmpq $(IA32_NR_syscalls-1),%rax
14888 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14889+
14890+ pax_erase_kstack
14891+
14892 jmp sysenter_do_call
14893 CFI_ENDPROC
14894 ENDPROC(ia32_sysenter_target)
14895@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14896 ENTRY(ia32_cstar_target)
14897 CFI_STARTPROC32 simple
14898 CFI_SIGNAL_FRAME
14899- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14900+ CFI_DEF_CFA rsp,0
14901 CFI_REGISTER rip,rcx
14902 /*CFI_REGISTER rflags,r11*/
14903 SWAPGS_UNSAFE_STACK
14904 movl %esp,%r8d
14905 CFI_REGISTER rsp,r8
14906 movq PER_CPU_VAR(kernel_stack),%rsp
14907+ SAVE_ARGS 8*6,0,0
14908+ pax_enter_kernel_user
14909+
14910+#ifdef CONFIG_PAX_RANDKSTACK
14911+ pax_erase_kstack
14912+#endif
14913+
14914 /*
14915 * No need to follow this irqs on/off section: the syscall
14916 * disabled irqs and here we enable it straight after entry:
14917 */
14918 ENABLE_INTERRUPTS(CLBR_NONE)
14919- SAVE_ARGS 8,0,0
14920 movl %eax,%eax /* zero extension */
14921 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14922 movq %rcx,RIP-ARGOFFSET(%rsp)
14923@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14924 /* no need to do an access_ok check here because r8 has been
14925 32bit zero extended */
14926 /* hardware stack frame is complete now */
14927+
14928+#ifdef CONFIG_PAX_MEMORY_UDEREF
14929+ ASM_PAX_OPEN_USERLAND
14930+ movq pax_user_shadow_base,%r8
14931+ addq RSP-ARGOFFSET(%rsp),%r8
14932+#endif
14933+
14934 ASM_STAC
14935 1: movl (%r8),%r9d
14936 _ASM_EXTABLE(1b,ia32_badarg)
14937 ASM_CLAC
14938- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14939- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14940+
14941+#ifdef CONFIG_PAX_MEMORY_UDEREF
14942+ ASM_PAX_CLOSE_USERLAND
14943+#endif
14944+
14945+ GET_THREAD_INFO(%r11)
14946+ orl $TS_COMPAT,TI_status(%r11)
14947+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14948 CFI_REMEMBER_STATE
14949 jnz cstar_tracesys
14950 cmpq $IA32_NR_syscalls-1,%rax
14951@@ -335,13 +410,16 @@ cstar_do_call:
14952 cstar_dispatch:
14953 call *ia32_sys_call_table(,%rax,8)
14954 movq %rax,RAX-ARGOFFSET(%rsp)
14955+ GET_THREAD_INFO(%r11)
14956 DISABLE_INTERRUPTS(CLBR_NONE)
14957 TRACE_IRQS_OFF
14958- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14959+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14960 jnz sysretl_audit
14961 sysretl_from_sys_call:
14962- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14963- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14964+ pax_exit_kernel_user
14965+ pax_erase_kstack
14966+ andl $~TS_COMPAT,TI_status(%r11)
14967+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14968 movl RIP-ARGOFFSET(%rsp),%ecx
14969 CFI_REGISTER rip,rcx
14970 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14971@@ -368,7 +446,7 @@ sysretl_audit:
14972
14973 cstar_tracesys:
14974 #ifdef CONFIG_AUDITSYSCALL
14975- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14976+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14977 jz cstar_auditsys
14978 #endif
14979 xchgl %r9d,%ebp
14980@@ -382,11 +460,19 @@ cstar_tracesys:
14981 xchgl %ebp,%r9d
14982 cmpq $(IA32_NR_syscalls-1),%rax
14983 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14984+
14985+ pax_erase_kstack
14986+
14987 jmp cstar_do_call
14988 END(ia32_cstar_target)
14989
14990 ia32_badarg:
14991 ASM_CLAC
14992+
14993+#ifdef CONFIG_PAX_MEMORY_UDEREF
14994+ ASM_PAX_CLOSE_USERLAND
14995+#endif
14996+
14997 movq $-EFAULT,%rax
14998 jmp ia32_sysret
14999 CFI_ENDPROC
15000@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
15001 CFI_REL_OFFSET rip,RIP-RIP
15002 PARAVIRT_ADJUST_EXCEPTION_FRAME
15003 SWAPGS
15004- /*
15005- * No need to follow this irqs on/off section: the syscall
15006- * disabled irqs and here we enable it straight after entry:
15007- */
15008- ENABLE_INTERRUPTS(CLBR_NONE)
15009 movl %eax,%eax
15010 pushq_cfi %rax
15011 cld
15012 /* note the registers are not zero extended to the sf.
15013 this could be a problem. */
15014 SAVE_ARGS 0,1,0
15015- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15016- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15017+ pax_enter_kernel_user
15018+
15019+#ifdef CONFIG_PAX_RANDKSTACK
15020+ pax_erase_kstack
15021+#endif
15022+
15023+ /*
15024+ * No need to follow this irqs on/off section: the syscall
15025+ * disabled irqs and here we enable it straight after entry:
15026+ */
15027+ ENABLE_INTERRUPTS(CLBR_NONE)
15028+ GET_THREAD_INFO(%r11)
15029+ orl $TS_COMPAT,TI_status(%r11)
15030+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15031 jnz ia32_tracesys
15032 cmpq $(IA32_NR_syscalls-1),%rax
15033 ja ia32_badsys
15034@@ -458,6 +551,9 @@ ia32_tracesys:
15035 RESTORE_REST
15036 cmpq $(IA32_NR_syscalls-1),%rax
15037 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15038+
15039+ pax_erase_kstack
15040+
15041 jmp ia32_do_call
15042 END(ia32_syscall)
15043
15044diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15045index 8e0ceec..af13504 100644
15046--- a/arch/x86/ia32/sys_ia32.c
15047+++ b/arch/x86/ia32/sys_ia32.c
15048@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15049 */
15050 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15051 {
15052- typeof(ubuf->st_uid) uid = 0;
15053- typeof(ubuf->st_gid) gid = 0;
15054+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15055+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15056 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15057 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15058 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15059diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15060index 372231c..51b537d 100644
15061--- a/arch/x86/include/asm/alternative-asm.h
15062+++ b/arch/x86/include/asm/alternative-asm.h
15063@@ -18,6 +18,45 @@
15064 .endm
15065 #endif
15066
15067+#ifdef KERNEXEC_PLUGIN
15068+ .macro pax_force_retaddr_bts rip=0
15069+ btsq $63,\rip(%rsp)
15070+ .endm
15071+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15072+ .macro pax_force_retaddr rip=0, reload=0
15073+ btsq $63,\rip(%rsp)
15074+ .endm
15075+ .macro pax_force_fptr ptr
15076+ btsq $63,\ptr
15077+ .endm
15078+ .macro pax_set_fptr_mask
15079+ .endm
15080+#endif
15081+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15082+ .macro pax_force_retaddr rip=0, reload=0
15083+ .if \reload
15084+ pax_set_fptr_mask
15085+ .endif
15086+ orq %r12,\rip(%rsp)
15087+ .endm
15088+ .macro pax_force_fptr ptr
15089+ orq %r12,\ptr
15090+ .endm
15091+ .macro pax_set_fptr_mask
15092+ movabs $0x8000000000000000,%r12
15093+ .endm
15094+#endif
15095+#else
15096+ .macro pax_force_retaddr rip=0, reload=0
15097+ .endm
15098+ .macro pax_force_fptr ptr
15099+ .endm
15100+ .macro pax_force_retaddr_bts rip=0
15101+ .endm
15102+ .macro pax_set_fptr_mask
15103+ .endm
15104+#endif
15105+
15106 .macro altinstruction_entry orig alt feature orig_len alt_len
15107 .long \orig - .
15108 .long \alt - .
15109diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15110index 473bdbe..b1e3377 100644
15111--- a/arch/x86/include/asm/alternative.h
15112+++ b/arch/x86/include/asm/alternative.h
15113@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15114 ".pushsection .discard,\"aw\",@progbits\n" \
15115 DISCARD_ENTRY(1) \
15116 ".popsection\n" \
15117- ".pushsection .altinstr_replacement, \"ax\"\n" \
15118+ ".pushsection .altinstr_replacement, \"a\"\n" \
15119 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15120 ".popsection"
15121
15122@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15123 DISCARD_ENTRY(1) \
15124 DISCARD_ENTRY(2) \
15125 ".popsection\n" \
15126- ".pushsection .altinstr_replacement, \"ax\"\n" \
15127+ ".pushsection .altinstr_replacement, \"a\"\n" \
15128 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15129 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15130 ".popsection"
15131diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15132index 465b309..ab7e51f 100644
15133--- a/arch/x86/include/asm/apic.h
15134+++ b/arch/x86/include/asm/apic.h
15135@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15136
15137 #ifdef CONFIG_X86_LOCAL_APIC
15138
15139-extern unsigned int apic_verbosity;
15140+extern int apic_verbosity;
15141 extern int local_apic_timer_c2_ok;
15142
15143 extern int disable_apic;
15144diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15145index 20370c6..a2eb9b0 100644
15146--- a/arch/x86/include/asm/apm.h
15147+++ b/arch/x86/include/asm/apm.h
15148@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15149 __asm__ __volatile__(APM_DO_ZERO_SEGS
15150 "pushl %%edi\n\t"
15151 "pushl %%ebp\n\t"
15152- "lcall *%%cs:apm_bios_entry\n\t"
15153+ "lcall *%%ss:apm_bios_entry\n\t"
15154 "setc %%al\n\t"
15155 "popl %%ebp\n\t"
15156 "popl %%edi\n\t"
15157@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15158 __asm__ __volatile__(APM_DO_ZERO_SEGS
15159 "pushl %%edi\n\t"
15160 "pushl %%ebp\n\t"
15161- "lcall *%%cs:apm_bios_entry\n\t"
15162+ "lcall *%%ss:apm_bios_entry\n\t"
15163 "setc %%bl\n\t"
15164 "popl %%ebp\n\t"
15165 "popl %%edi\n\t"
15166diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15167index 5e5cd12..51cdc93 100644
15168--- a/arch/x86/include/asm/atomic.h
15169+++ b/arch/x86/include/asm/atomic.h
15170@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15171 }
15172
15173 /**
15174+ * atomic_read_unchecked - read atomic variable
15175+ * @v: pointer of type atomic_unchecked_t
15176+ *
15177+ * Atomically reads the value of @v.
15178+ */
15179+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15180+{
15181+ return ACCESS_ONCE((v)->counter);
15182+}
15183+
15184+/**
15185 * atomic_set - set atomic variable
15186 * @v: pointer of type atomic_t
15187 * @i: required value
15188@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15189 }
15190
15191 /**
15192+ * atomic_set_unchecked - set atomic variable
15193+ * @v: pointer of type atomic_unchecked_t
15194+ * @i: required value
15195+ *
15196+ * Atomically sets the value of @v to @i.
15197+ */
15198+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15199+{
15200+ v->counter = i;
15201+}
15202+
15203+/**
15204 * atomic_add - add integer to atomic variable
15205 * @i: integer value to add
15206 * @v: pointer of type atomic_t
15207@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15208 */
15209 static inline void atomic_add(int i, atomic_t *v)
15210 {
15211- asm volatile(LOCK_PREFIX "addl %1,%0"
15212+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15213+
15214+#ifdef CONFIG_PAX_REFCOUNT
15215+ "jno 0f\n"
15216+ LOCK_PREFIX "subl %1,%0\n"
15217+ "int $4\n0:\n"
15218+ _ASM_EXTABLE(0b, 0b)
15219+#endif
15220+
15221+ : "+m" (v->counter)
15222+ : "ir" (i));
15223+}
15224+
15225+/**
15226+ * atomic_add_unchecked - add integer to atomic variable
15227+ * @i: integer value to add
15228+ * @v: pointer of type atomic_unchecked_t
15229+ *
15230+ * Atomically adds @i to @v.
15231+ */
15232+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15233+{
15234+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15235 : "+m" (v->counter)
15236 : "ir" (i));
15237 }
15238@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15239 */
15240 static inline void atomic_sub(int i, atomic_t *v)
15241 {
15242- asm volatile(LOCK_PREFIX "subl %1,%0"
15243+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15244+
15245+#ifdef CONFIG_PAX_REFCOUNT
15246+ "jno 0f\n"
15247+ LOCK_PREFIX "addl %1,%0\n"
15248+ "int $4\n0:\n"
15249+ _ASM_EXTABLE(0b, 0b)
15250+#endif
15251+
15252+ : "+m" (v->counter)
15253+ : "ir" (i));
15254+}
15255+
15256+/**
15257+ * atomic_sub_unchecked - subtract integer from atomic variable
15258+ * @i: integer value to subtract
15259+ * @v: pointer of type atomic_unchecked_t
15260+ *
15261+ * Atomically subtracts @i from @v.
15262+ */
15263+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15264+{
15265+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15266 : "+m" (v->counter)
15267 : "ir" (i));
15268 }
15269@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15270 */
15271 static inline int atomic_sub_and_test(int i, atomic_t *v)
15272 {
15273- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15274+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15275 }
15276
15277 /**
15278@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15279 */
15280 static inline void atomic_inc(atomic_t *v)
15281 {
15282- asm volatile(LOCK_PREFIX "incl %0"
15283+ asm volatile(LOCK_PREFIX "incl %0\n"
15284+
15285+#ifdef CONFIG_PAX_REFCOUNT
15286+ "jno 0f\n"
15287+ LOCK_PREFIX "decl %0\n"
15288+ "int $4\n0:\n"
15289+ _ASM_EXTABLE(0b, 0b)
15290+#endif
15291+
15292+ : "+m" (v->counter));
15293+}
15294+
15295+/**
15296+ * atomic_inc_unchecked - increment atomic variable
15297+ * @v: pointer of type atomic_unchecked_t
15298+ *
15299+ * Atomically increments @v by 1.
15300+ */
15301+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15302+{
15303+ asm volatile(LOCK_PREFIX "incl %0\n"
15304 : "+m" (v->counter));
15305 }
15306
15307@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15308 */
15309 static inline void atomic_dec(atomic_t *v)
15310 {
15311- asm volatile(LOCK_PREFIX "decl %0"
15312+ asm volatile(LOCK_PREFIX "decl %0\n"
15313+
15314+#ifdef CONFIG_PAX_REFCOUNT
15315+ "jno 0f\n"
15316+ LOCK_PREFIX "incl %0\n"
15317+ "int $4\n0:\n"
15318+ _ASM_EXTABLE(0b, 0b)
15319+#endif
15320+
15321+ : "+m" (v->counter));
15322+}
15323+
15324+/**
15325+ * atomic_dec_unchecked - decrement atomic variable
15326+ * @v: pointer of type atomic_unchecked_t
15327+ *
15328+ * Atomically decrements @v by 1.
15329+ */
15330+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15331+{
15332+ asm volatile(LOCK_PREFIX "decl %0\n"
15333 : "+m" (v->counter));
15334 }
15335
15336@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15337 */
15338 static inline int atomic_dec_and_test(atomic_t *v)
15339 {
15340- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15341+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15342 }
15343
15344 /**
15345@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15346 */
15347 static inline int atomic_inc_and_test(atomic_t *v)
15348 {
15349- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15350+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15351+}
15352+
15353+/**
15354+ * atomic_inc_and_test_unchecked - increment and test
15355+ * @v: pointer of type atomic_unchecked_t
15356+ *
15357+ * Atomically increments @v by 1
15358+ * and returns true if the result is zero, or false for all
15359+ * other cases.
15360+ */
15361+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15362+{
15363+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15364 }
15365
15366 /**
15367@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15368 */
15369 static inline int atomic_add_negative(int i, atomic_t *v)
15370 {
15371- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15372+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15373 }
15374
15375 /**
15376@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15377 *
15378 * Atomically adds @i to @v and returns @i + @v
15379 */
15380-static inline int atomic_add_return(int i, atomic_t *v)
15381+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15382+{
15383+ return i + xadd_check_overflow(&v->counter, i);
15384+}
15385+
15386+/**
15387+ * atomic_add_return_unchecked - add integer and return
15388+ * @i: integer value to add
15389+ * @v: pointer of type atomic_unchecked_t
15390+ *
15391+ * Atomically adds @i to @v and returns @i + @v
15392+ */
15393+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15394 {
15395 return i + xadd(&v->counter, i);
15396 }
15397@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15398 *
15399 * Atomically subtracts @i from @v and returns @v - @i
15400 */
15401-static inline int atomic_sub_return(int i, atomic_t *v)
15402+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15403 {
15404 return atomic_add_return(-i, v);
15405 }
15406
15407 #define atomic_inc_return(v) (atomic_add_return(1, v))
15408+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15409+{
15410+ return atomic_add_return_unchecked(1, v);
15411+}
15412 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15413
15414-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15415+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15416+{
15417+ return cmpxchg(&v->counter, old, new);
15418+}
15419+
15420+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15421 {
15422 return cmpxchg(&v->counter, old, new);
15423 }
15424@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15425 return xchg(&v->counter, new);
15426 }
15427
15428+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15429+{
15430+ return xchg(&v->counter, new);
15431+}
15432+
15433 /**
15434 * __atomic_add_unless - add unless the number is already a given value
15435 * @v: pointer of type atomic_t
15436@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15437 */
15438 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15439 {
15440- int c, old;
15441+ int c, old, new;
15442 c = atomic_read(v);
15443 for (;;) {
15444- if (unlikely(c == (u)))
15445+ if (unlikely(c == u))
15446 break;
15447- old = atomic_cmpxchg((v), c, c + (a));
15448+
15449+ asm volatile("addl %2,%0\n"
15450+
15451+#ifdef CONFIG_PAX_REFCOUNT
15452+ "jno 0f\n"
15453+ "subl %2,%0\n"
15454+ "int $4\n0:\n"
15455+ _ASM_EXTABLE(0b, 0b)
15456+#endif
15457+
15458+ : "=r" (new)
15459+ : "0" (c), "ir" (a));
15460+
15461+ old = atomic_cmpxchg(v, c, new);
15462 if (likely(old == c))
15463 break;
15464 c = old;
15465@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15466 }
15467
15468 /**
15469+ * atomic_inc_not_zero_hint - increment if not null
15470+ * @v: pointer of type atomic_t
15471+ * @hint: probable value of the atomic before the increment
15472+ *
15473+ * This version of atomic_inc_not_zero() gives a hint of probable
15474+ * value of the atomic. This helps processor to not read the memory
15475+ * before doing the atomic read/modify/write cycle, lowering
15476+ * number of bus transactions on some arches.
15477+ *
15478+ * Returns: 0 if increment was not done, 1 otherwise.
15479+ */
15480+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15481+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15482+{
15483+ int val, c = hint, new;
15484+
15485+ /* sanity test, should be removed by compiler if hint is a constant */
15486+ if (!hint)
15487+ return __atomic_add_unless(v, 1, 0);
15488+
15489+ do {
15490+ asm volatile("incl %0\n"
15491+
15492+#ifdef CONFIG_PAX_REFCOUNT
15493+ "jno 0f\n"
15494+ "decl %0\n"
15495+ "int $4\n0:\n"
15496+ _ASM_EXTABLE(0b, 0b)
15497+#endif
15498+
15499+ : "=r" (new)
15500+ : "0" (c));
15501+
15502+ val = atomic_cmpxchg(v, c, new);
15503+ if (val == c)
15504+ return 1;
15505+ c = val;
15506+ } while (c);
15507+
15508+ return 0;
15509+}
15510+
15511+/**
15512 * atomic_inc_short - increment of a short integer
15513 * @v: pointer to type int
15514 *
15515@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15516 }
15517
15518 /* These are x86-specific, used by some header files */
15519-#define atomic_clear_mask(mask, addr) \
15520- asm volatile(LOCK_PREFIX "andl %0,%1" \
15521- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15522+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15523+{
15524+ asm volatile(LOCK_PREFIX "andl %1,%0"
15525+ : "+m" (v->counter)
15526+ : "r" (~(mask))
15527+ : "memory");
15528+}
15529
15530-#define atomic_set_mask(mask, addr) \
15531- asm volatile(LOCK_PREFIX "orl %0,%1" \
15532- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15533- : "memory")
15534+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15535+{
15536+ asm volatile(LOCK_PREFIX "andl %1,%0"
15537+ : "+m" (v->counter)
15538+ : "r" (~(mask))
15539+ : "memory");
15540+}
15541+
15542+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15543+{
15544+ asm volatile(LOCK_PREFIX "orl %1,%0"
15545+ : "+m" (v->counter)
15546+ : "r" (mask)
15547+ : "memory");
15548+}
15549+
15550+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15551+{
15552+ asm volatile(LOCK_PREFIX "orl %1,%0"
15553+ : "+m" (v->counter)
15554+ : "r" (mask)
15555+ : "memory");
15556+}
15557
15558 #ifdef CONFIG_X86_32
15559 # include <asm/atomic64_32.h>
15560diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15561index b154de7..bf18a5a 100644
15562--- a/arch/x86/include/asm/atomic64_32.h
15563+++ b/arch/x86/include/asm/atomic64_32.h
15564@@ -12,6 +12,14 @@ typedef struct {
15565 u64 __aligned(8) counter;
15566 } atomic64_t;
15567
15568+#ifdef CONFIG_PAX_REFCOUNT
15569+typedef struct {
15570+ u64 __aligned(8) counter;
15571+} atomic64_unchecked_t;
15572+#else
15573+typedef atomic64_t atomic64_unchecked_t;
15574+#endif
15575+
15576 #define ATOMIC64_INIT(val) { (val) }
15577
15578 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15579@@ -37,21 +45,31 @@ typedef struct {
15580 ATOMIC64_DECL_ONE(sym##_386)
15581
15582 ATOMIC64_DECL_ONE(add_386);
15583+ATOMIC64_DECL_ONE(add_unchecked_386);
15584 ATOMIC64_DECL_ONE(sub_386);
15585+ATOMIC64_DECL_ONE(sub_unchecked_386);
15586 ATOMIC64_DECL_ONE(inc_386);
15587+ATOMIC64_DECL_ONE(inc_unchecked_386);
15588 ATOMIC64_DECL_ONE(dec_386);
15589+ATOMIC64_DECL_ONE(dec_unchecked_386);
15590 #endif
15591
15592 #define alternative_atomic64(f, out, in...) \
15593 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15594
15595 ATOMIC64_DECL(read);
15596+ATOMIC64_DECL(read_unchecked);
15597 ATOMIC64_DECL(set);
15598+ATOMIC64_DECL(set_unchecked);
15599 ATOMIC64_DECL(xchg);
15600 ATOMIC64_DECL(add_return);
15601+ATOMIC64_DECL(add_return_unchecked);
15602 ATOMIC64_DECL(sub_return);
15603+ATOMIC64_DECL(sub_return_unchecked);
15604 ATOMIC64_DECL(inc_return);
15605+ATOMIC64_DECL(inc_return_unchecked);
15606 ATOMIC64_DECL(dec_return);
15607+ATOMIC64_DECL(dec_return_unchecked);
15608 ATOMIC64_DECL(dec_if_positive);
15609 ATOMIC64_DECL(inc_not_zero);
15610 ATOMIC64_DECL(add_unless);
15611@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15612 }
15613
15614 /**
15615+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15616+ * @p: pointer to type atomic64_unchecked_t
15617+ * @o: expected value
15618+ * @n: new value
15619+ *
15620+ * Atomically sets @v to @n if it was equal to @o and returns
15621+ * the old value.
15622+ */
15623+
15624+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15625+{
15626+ return cmpxchg64(&v->counter, o, n);
15627+}
15628+
15629+/**
15630 * atomic64_xchg - xchg atomic64 variable
15631 * @v: pointer to type atomic64_t
15632 * @n: value to assign
15633@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15634 }
15635
15636 /**
15637+ * atomic64_set_unchecked - set atomic64 variable
15638+ * @v: pointer to type atomic64_unchecked_t
15639+ * @n: value to assign
15640+ *
15641+ * Atomically sets the value of @v to @n.
15642+ */
15643+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15644+{
15645+ unsigned high = (unsigned)(i >> 32);
15646+ unsigned low = (unsigned)i;
15647+ alternative_atomic64(set, /* no output */,
15648+ "S" (v), "b" (low), "c" (high)
15649+ : "eax", "edx", "memory");
15650+}
15651+
15652+/**
15653 * atomic64_read - read atomic64 variable
15654 * @v: pointer to type atomic64_t
15655 *
15656@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15657 }
15658
15659 /**
15660+ * atomic64_read_unchecked - read atomic64 variable
15661+ * @v: pointer to type atomic64_unchecked_t
15662+ *
15663+ * Atomically reads the value of @v and returns it.
15664+ */
15665+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15666+{
15667+ long long r;
15668+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15669+ return r;
15670+ }
15671+
15672+/**
15673 * atomic64_add_return - add and return
15674 * @i: integer value to add
15675 * @v: pointer to type atomic64_t
15676@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15677 return i;
15678 }
15679
15680+/**
15681+ * atomic64_add_return_unchecked - add and return
15682+ * @i: integer value to add
15683+ * @v: pointer to type atomic64_unchecked_t
15684+ *
15685+ * Atomically adds @i to @v and returns @i + *@v
15686+ */
15687+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15688+{
15689+ alternative_atomic64(add_return_unchecked,
15690+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15691+ ASM_NO_INPUT_CLOBBER("memory"));
15692+ return i;
15693+}
15694+
15695 /*
15696 * Other variants with different arithmetic operators:
15697 */
15698@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15699 return a;
15700 }
15701
15702+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15703+{
15704+ long long a;
15705+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15706+ "S" (v) : "memory", "ecx");
15707+ return a;
15708+}
15709+
15710 static inline long long atomic64_dec_return(atomic64_t *v)
15711 {
15712 long long a;
15713@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15714 }
15715
15716 /**
15717+ * atomic64_add_unchecked - add integer to atomic64 variable
15718+ * @i: integer value to add
15719+ * @v: pointer to type atomic64_unchecked_t
15720+ *
15721+ * Atomically adds @i to @v.
15722+ */
15723+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15724+{
15725+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15726+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15727+ ASM_NO_INPUT_CLOBBER("memory"));
15728+ return i;
15729+}
15730+
15731+/**
15732 * atomic64_sub - subtract the atomic64 variable
15733 * @i: integer value to subtract
15734 * @v: pointer to type atomic64_t
15735diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15736index f8d273e..02f39f3 100644
15737--- a/arch/x86/include/asm/atomic64_64.h
15738+++ b/arch/x86/include/asm/atomic64_64.h
15739@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15740 }
15741
15742 /**
15743+ * atomic64_read_unchecked - read atomic64 variable
15744+ * @v: pointer of type atomic64_unchecked_t
15745+ *
15746+ * Atomically reads the value of @v.
15747+ * Doesn't imply a read memory barrier.
15748+ */
15749+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15750+{
15751+ return ACCESS_ONCE((v)->counter);
15752+}
15753+
15754+/**
15755 * atomic64_set - set atomic64 variable
15756 * @v: pointer to type atomic64_t
15757 * @i: required value
15758@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15759 }
15760
15761 /**
15762+ * atomic64_set_unchecked - set atomic64 variable
15763+ * @v: pointer to type atomic64_unchecked_t
15764+ * @i: required value
15765+ *
15766+ * Atomically sets the value of @v to @i.
15767+ */
15768+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15769+{
15770+ v->counter = i;
15771+}
15772+
15773+/**
15774 * atomic64_add - add integer to atomic64 variable
15775 * @i: integer value to add
15776 * @v: pointer to type atomic64_t
15777@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15778 */
15779 static inline void atomic64_add(long i, atomic64_t *v)
15780 {
15781+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15782+
15783+#ifdef CONFIG_PAX_REFCOUNT
15784+ "jno 0f\n"
15785+ LOCK_PREFIX "subq %1,%0\n"
15786+ "int $4\n0:\n"
15787+ _ASM_EXTABLE(0b, 0b)
15788+#endif
15789+
15790+ : "=m" (v->counter)
15791+ : "er" (i), "m" (v->counter));
15792+}
15793+
15794+/**
15795+ * atomic64_add_unchecked - add integer to atomic64 variable
15796+ * @i: integer value to add
15797+ * @v: pointer to type atomic64_unchecked_t
15798+ *
15799+ * Atomically adds @i to @v.
15800+ */
15801+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15802+{
15803 asm volatile(LOCK_PREFIX "addq %1,%0"
15804 : "=m" (v->counter)
15805 : "er" (i), "m" (v->counter));
15806@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15807 */
15808 static inline void atomic64_sub(long i, atomic64_t *v)
15809 {
15810- asm volatile(LOCK_PREFIX "subq %1,%0"
15811+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15812+
15813+#ifdef CONFIG_PAX_REFCOUNT
15814+ "jno 0f\n"
15815+ LOCK_PREFIX "addq %1,%0\n"
15816+ "int $4\n0:\n"
15817+ _ASM_EXTABLE(0b, 0b)
15818+#endif
15819+
15820+ : "=m" (v->counter)
15821+ : "er" (i), "m" (v->counter));
15822+}
15823+
15824+/**
15825+ * atomic64_sub_unchecked - subtract the atomic64 variable
15826+ * @i: integer value to subtract
15827+ * @v: pointer to type atomic64_unchecked_t
15828+ *
15829+ * Atomically subtracts @i from @v.
15830+ */
15831+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15832+{
15833+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15834 : "=m" (v->counter)
15835 : "er" (i), "m" (v->counter));
15836 }
15837@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15838 */
15839 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15840 {
15841- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15842+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15843 }
15844
15845 /**
15846@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15847 */
15848 static inline void atomic64_inc(atomic64_t *v)
15849 {
15850+ asm volatile(LOCK_PREFIX "incq %0\n"
15851+
15852+#ifdef CONFIG_PAX_REFCOUNT
15853+ "jno 0f\n"
15854+ LOCK_PREFIX "decq %0\n"
15855+ "int $4\n0:\n"
15856+ _ASM_EXTABLE(0b, 0b)
15857+#endif
15858+
15859+ : "=m" (v->counter)
15860+ : "m" (v->counter));
15861+}
15862+
15863+/**
15864+ * atomic64_inc_unchecked - increment atomic64 variable
15865+ * @v: pointer to type atomic64_unchecked_t
15866+ *
15867+ * Atomically increments @v by 1.
15868+ */
15869+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15870+{
15871 asm volatile(LOCK_PREFIX "incq %0"
15872 : "=m" (v->counter)
15873 : "m" (v->counter));
15874@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15875 */
15876 static inline void atomic64_dec(atomic64_t *v)
15877 {
15878- asm volatile(LOCK_PREFIX "decq %0"
15879+ asm volatile(LOCK_PREFIX "decq %0\n"
15880+
15881+#ifdef CONFIG_PAX_REFCOUNT
15882+ "jno 0f\n"
15883+ LOCK_PREFIX "incq %0\n"
15884+ "int $4\n0:\n"
15885+ _ASM_EXTABLE(0b, 0b)
15886+#endif
15887+
15888+ : "=m" (v->counter)
15889+ : "m" (v->counter));
15890+}
15891+
15892+/**
15893+ * atomic64_dec_unchecked - decrement atomic64 variable
15894+ * @v: pointer to type atomic64_t
15895+ *
15896+ * Atomically decrements @v by 1.
15897+ */
15898+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15899+{
15900+ asm volatile(LOCK_PREFIX "decq %0\n"
15901 : "=m" (v->counter)
15902 : "m" (v->counter));
15903 }
15904@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15905 */
15906 static inline int atomic64_dec_and_test(atomic64_t *v)
15907 {
15908- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15909+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15910 }
15911
15912 /**
15913@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15914 */
15915 static inline int atomic64_inc_and_test(atomic64_t *v)
15916 {
15917- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15918+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15919 }
15920
15921 /**
15922@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15923 */
15924 static inline int atomic64_add_negative(long i, atomic64_t *v)
15925 {
15926- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15927+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15928 }
15929
15930 /**
15931@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15932 */
15933 static inline long atomic64_add_return(long i, atomic64_t *v)
15934 {
15935+ return i + xadd_check_overflow(&v->counter, i);
15936+}
15937+
15938+/**
15939+ * atomic64_add_return_unchecked - add and return
15940+ * @i: integer value to add
15941+ * @v: pointer to type atomic64_unchecked_t
15942+ *
15943+ * Atomically adds @i to @v and returns @i + @v
15944+ */
15945+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15946+{
15947 return i + xadd(&v->counter, i);
15948 }
15949
15950@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15951 }
15952
15953 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15954+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15955+{
15956+ return atomic64_add_return_unchecked(1, v);
15957+}
15958 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15959
15960 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15961@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15962 return cmpxchg(&v->counter, old, new);
15963 }
15964
15965+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15966+{
15967+ return cmpxchg(&v->counter, old, new);
15968+}
15969+
15970 static inline long atomic64_xchg(atomic64_t *v, long new)
15971 {
15972 return xchg(&v->counter, new);
15973@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15974 */
15975 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15976 {
15977- long c, old;
15978+ long c, old, new;
15979 c = atomic64_read(v);
15980 for (;;) {
15981- if (unlikely(c == (u)))
15982+ if (unlikely(c == u))
15983 break;
15984- old = atomic64_cmpxchg((v), c, c + (a));
15985+
15986+ asm volatile("add %2,%0\n"
15987+
15988+#ifdef CONFIG_PAX_REFCOUNT
15989+ "jno 0f\n"
15990+ "sub %2,%0\n"
15991+ "int $4\n0:\n"
15992+ _ASM_EXTABLE(0b, 0b)
15993+#endif
15994+
15995+ : "=r" (new)
15996+ : "0" (c), "ir" (a));
15997+
15998+ old = atomic64_cmpxchg(v, c, new);
15999 if (likely(old == c))
16000 break;
16001 c = old;
16002 }
16003- return c != (u);
16004+ return c != u;
16005 }
16006
16007 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16008diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16009index 2ab1eb3..1e8cc5d 100644
16010--- a/arch/x86/include/asm/barrier.h
16011+++ b/arch/x86/include/asm/barrier.h
16012@@ -57,7 +57,7 @@
16013 do { \
16014 compiletime_assert_atomic_type(*p); \
16015 smp_mb(); \
16016- ACCESS_ONCE(*p) = (v); \
16017+ ACCESS_ONCE_RW(*p) = (v); \
16018 } while (0)
16019
16020 #define smp_load_acquire(p) \
16021@@ -74,7 +74,7 @@ do { \
16022 do { \
16023 compiletime_assert_atomic_type(*p); \
16024 barrier(); \
16025- ACCESS_ONCE(*p) = (v); \
16026+ ACCESS_ONCE_RW(*p) = (v); \
16027 } while (0)
16028
16029 #define smp_load_acquire(p) \
16030diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16031index cfe3b95..d01b118 100644
16032--- a/arch/x86/include/asm/bitops.h
16033+++ b/arch/x86/include/asm/bitops.h
16034@@ -50,7 +50,7 @@
16035 * a mask operation on a byte.
16036 */
16037 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16038-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16039+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16040 #define CONST_MASK(nr) (1 << ((nr) & 7))
16041
16042 /**
16043@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16044 */
16045 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16046 {
16047- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16048+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16049 }
16050
16051 /**
16052@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16053 */
16054 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16055 {
16056- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16057+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16058 }
16059
16060 /**
16061@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16062 */
16063 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16064 {
16065- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16066+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16067 }
16068
16069 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16070@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16071 *
16072 * Undefined if no bit exists, so code should check against 0 first.
16073 */
16074-static inline unsigned long __ffs(unsigned long word)
16075+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16076 {
16077 asm("rep; bsf %1,%0"
16078 : "=r" (word)
16079@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16080 *
16081 * Undefined if no zero exists, so code should check against ~0UL first.
16082 */
16083-static inline unsigned long ffz(unsigned long word)
16084+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16085 {
16086 asm("rep; bsf %1,%0"
16087 : "=r" (word)
16088@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16089 *
16090 * Undefined if no set bit exists, so code should check against 0 first.
16091 */
16092-static inline unsigned long __fls(unsigned long word)
16093+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16094 {
16095 asm("bsr %1,%0"
16096 : "=r" (word)
16097@@ -434,7 +434,7 @@ static inline int ffs(int x)
16098 * set bit if value is nonzero. The last (most significant) bit is
16099 * at position 32.
16100 */
16101-static inline int fls(int x)
16102+static inline int __intentional_overflow(-1) fls(int x)
16103 {
16104 int r;
16105
16106@@ -476,7 +476,7 @@ static inline int fls(int x)
16107 * at position 64.
16108 */
16109 #ifdef CONFIG_X86_64
16110-static __always_inline int fls64(__u64 x)
16111+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16112 {
16113 int bitpos = -1;
16114 /*
16115diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16116index 4fa687a..60f2d39 100644
16117--- a/arch/x86/include/asm/boot.h
16118+++ b/arch/x86/include/asm/boot.h
16119@@ -6,10 +6,15 @@
16120 #include <uapi/asm/boot.h>
16121
16122 /* Physical address where kernel should be loaded. */
16123-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16124+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16125 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16126 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16127
16128+#ifndef __ASSEMBLY__
16129+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16130+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16131+#endif
16132+
16133 /* Minimum kernel alignment, as a power of two */
16134 #ifdef CONFIG_X86_64
16135 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16136diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16137index 48f99f1..d78ebf9 100644
16138--- a/arch/x86/include/asm/cache.h
16139+++ b/arch/x86/include/asm/cache.h
16140@@ -5,12 +5,13 @@
16141
16142 /* L1 cache line size */
16143 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16144-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16145+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16146
16147 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16148+#define __read_only __attribute__((__section__(".data..read_only")))
16149
16150 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16151-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16152+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16153
16154 #ifdef CONFIG_X86_VSMP
16155 #ifdef CONFIG_SMP
16156diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16157index 76659b6..72b8439 100644
16158--- a/arch/x86/include/asm/calling.h
16159+++ b/arch/x86/include/asm/calling.h
16160@@ -82,107 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16161 #define RSP 152
16162 #define SS 160
16163
16164-#define ARGOFFSET R11
16165-#define SWFRAME ORIG_RAX
16166+#define ARGOFFSET R15
16167
16168 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16169- subq $9*8+\addskip, %rsp
16170- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16171- movq_cfi rdi, 8*8
16172- movq_cfi rsi, 7*8
16173- movq_cfi rdx, 6*8
16174+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16175+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16176+ movq_cfi rdi, RDI
16177+ movq_cfi rsi, RSI
16178+ movq_cfi rdx, RDX
16179
16180 .if \save_rcx
16181- movq_cfi rcx, 5*8
16182+ movq_cfi rcx, RCX
16183 .endif
16184
16185 .if \rax_enosys
16186- movq $-ENOSYS, 4*8(%rsp)
16187+ movq $-ENOSYS, RAX(%rsp)
16188 .else
16189- movq_cfi rax, 4*8
16190+ movq_cfi rax, RAX
16191 .endif
16192
16193 .if \save_r891011
16194- movq_cfi r8, 3*8
16195- movq_cfi r9, 2*8
16196- movq_cfi r10, 1*8
16197- movq_cfi r11, 0*8
16198+ movq_cfi r8, R8
16199+ movq_cfi r9, R9
16200+ movq_cfi r10, R10
16201+ movq_cfi r11, R11
16202 .endif
16203
16204+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16205+ movq_cfi r12, R12
16206+#endif
16207+
16208 .endm
16209
16210-#define ARG_SKIP (9*8)
16211+#define ARG_SKIP ORIG_RAX
16212
16213 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16214 rstor_r8910=1, rstor_rdx=1
16215+
16216+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16217+ movq_cfi_restore R12, r12
16218+#endif
16219+
16220 .if \rstor_r11
16221- movq_cfi_restore 0*8, r11
16222+ movq_cfi_restore R11, r11
16223 .endif
16224
16225 .if \rstor_r8910
16226- movq_cfi_restore 1*8, r10
16227- movq_cfi_restore 2*8, r9
16228- movq_cfi_restore 3*8, r8
16229+ movq_cfi_restore R10, r10
16230+ movq_cfi_restore R9, r9
16231+ movq_cfi_restore R8, r8
16232 .endif
16233
16234 .if \rstor_rax
16235- movq_cfi_restore 4*8, rax
16236+ movq_cfi_restore RAX, rax
16237 .endif
16238
16239 .if \rstor_rcx
16240- movq_cfi_restore 5*8, rcx
16241+ movq_cfi_restore RCX, rcx
16242 .endif
16243
16244 .if \rstor_rdx
16245- movq_cfi_restore 6*8, rdx
16246+ movq_cfi_restore RDX, rdx
16247 .endif
16248
16249- movq_cfi_restore 7*8, rsi
16250- movq_cfi_restore 8*8, rdi
16251+ movq_cfi_restore RSI, rsi
16252+ movq_cfi_restore RDI, rdi
16253
16254- .if ARG_SKIP+\addskip > 0
16255- addq $ARG_SKIP+\addskip, %rsp
16256- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16257+ .if ORIG_RAX+\addskip > 0
16258+ addq $ORIG_RAX+\addskip, %rsp
16259+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16260 .endif
16261 .endm
16262
16263- .macro LOAD_ARGS offset, skiprax=0
16264- movq \offset(%rsp), %r11
16265- movq \offset+8(%rsp), %r10
16266- movq \offset+16(%rsp), %r9
16267- movq \offset+24(%rsp), %r8
16268- movq \offset+40(%rsp), %rcx
16269- movq \offset+48(%rsp), %rdx
16270- movq \offset+56(%rsp), %rsi
16271- movq \offset+64(%rsp), %rdi
16272+ .macro LOAD_ARGS skiprax=0
16273+ movq R11(%rsp), %r11
16274+ movq R10(%rsp), %r10
16275+ movq R9(%rsp), %r9
16276+ movq R8(%rsp), %r8
16277+ movq RCX(%rsp), %rcx
16278+ movq RDX(%rsp), %rdx
16279+ movq RSI(%rsp), %rsi
16280+ movq RDI(%rsp), %rdi
16281 .if \skiprax
16282 .else
16283- movq \offset+72(%rsp), %rax
16284+ movq ORIG_RAX(%rsp), %rax
16285 .endif
16286 .endm
16287
16288-#define REST_SKIP (6*8)
16289-
16290 .macro SAVE_REST
16291- subq $REST_SKIP, %rsp
16292- CFI_ADJUST_CFA_OFFSET REST_SKIP
16293- movq_cfi rbx, 5*8
16294- movq_cfi rbp, 4*8
16295- movq_cfi r12, 3*8
16296- movq_cfi r13, 2*8
16297- movq_cfi r14, 1*8
16298- movq_cfi r15, 0*8
16299+ movq_cfi rbx, RBX
16300+ movq_cfi rbp, RBP
16301+
16302+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16303+ movq_cfi r12, R12
16304+#endif
16305+
16306+ movq_cfi r13, R13
16307+ movq_cfi r14, R14
16308+ movq_cfi r15, R15
16309 .endm
16310
16311 .macro RESTORE_REST
16312- movq_cfi_restore 0*8, r15
16313- movq_cfi_restore 1*8, r14
16314- movq_cfi_restore 2*8, r13
16315- movq_cfi_restore 3*8, r12
16316- movq_cfi_restore 4*8, rbp
16317- movq_cfi_restore 5*8, rbx
16318- addq $REST_SKIP, %rsp
16319- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16320+ movq_cfi_restore R15, r15
16321+ movq_cfi_restore R14, r14
16322+ movq_cfi_restore R13, r13
16323+
16324+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16325+ movq_cfi_restore R12, r12
16326+#endif
16327+
16328+ movq_cfi_restore RBP, rbp
16329+ movq_cfi_restore RBX, rbx
16330 .endm
16331
16332 .macro SAVE_ALL
16333diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16334index f50de69..2b0a458 100644
16335--- a/arch/x86/include/asm/checksum_32.h
16336+++ b/arch/x86/include/asm/checksum_32.h
16337@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16338 int len, __wsum sum,
16339 int *src_err_ptr, int *dst_err_ptr);
16340
16341+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16342+ int len, __wsum sum,
16343+ int *src_err_ptr, int *dst_err_ptr);
16344+
16345+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16346+ int len, __wsum sum,
16347+ int *src_err_ptr, int *dst_err_ptr);
16348+
16349 /*
16350 * Note: when you get a NULL pointer exception here this means someone
16351 * passed in an incorrect kernel address to one of these functions.
16352@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16353
16354 might_sleep();
16355 stac();
16356- ret = csum_partial_copy_generic((__force void *)src, dst,
16357+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16358 len, sum, err_ptr, NULL);
16359 clac();
16360
16361@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16362 might_sleep();
16363 if (access_ok(VERIFY_WRITE, dst, len)) {
16364 stac();
16365- ret = csum_partial_copy_generic(src, (__force void *)dst,
16366+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16367 len, sum, NULL, err_ptr);
16368 clac();
16369 return ret;
16370diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16371index 99c105d7..2f667ac 100644
16372--- a/arch/x86/include/asm/cmpxchg.h
16373+++ b/arch/x86/include/asm/cmpxchg.h
16374@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16375 __compiletime_error("Bad argument size for cmpxchg");
16376 extern void __xadd_wrong_size(void)
16377 __compiletime_error("Bad argument size for xadd");
16378+extern void __xadd_check_overflow_wrong_size(void)
16379+ __compiletime_error("Bad argument size for xadd_check_overflow");
16380 extern void __add_wrong_size(void)
16381 __compiletime_error("Bad argument size for add");
16382+extern void __add_check_overflow_wrong_size(void)
16383+ __compiletime_error("Bad argument size for add_check_overflow");
16384
16385 /*
16386 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16387@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16388 __ret; \
16389 })
16390
16391+#ifdef CONFIG_PAX_REFCOUNT
16392+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16393+ ({ \
16394+ __typeof__ (*(ptr)) __ret = (arg); \
16395+ switch (sizeof(*(ptr))) { \
16396+ case __X86_CASE_L: \
16397+ asm volatile (lock #op "l %0, %1\n" \
16398+ "jno 0f\n" \
16399+ "mov %0,%1\n" \
16400+ "int $4\n0:\n" \
16401+ _ASM_EXTABLE(0b, 0b) \
16402+ : "+r" (__ret), "+m" (*(ptr)) \
16403+ : : "memory", "cc"); \
16404+ break; \
16405+ case __X86_CASE_Q: \
16406+ asm volatile (lock #op "q %q0, %1\n" \
16407+ "jno 0f\n" \
16408+ "mov %0,%1\n" \
16409+ "int $4\n0:\n" \
16410+ _ASM_EXTABLE(0b, 0b) \
16411+ : "+r" (__ret), "+m" (*(ptr)) \
16412+ : : "memory", "cc"); \
16413+ break; \
16414+ default: \
16415+ __ ## op ## _check_overflow_wrong_size(); \
16416+ } \
16417+ __ret; \
16418+ })
16419+#else
16420+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16421+#endif
16422+
16423 /*
16424 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16425 * Since this is generally used to protect other memory information, we
16426@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16427 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16428 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16429
16430+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16431+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16432+
16433 #define __add(ptr, inc, lock) \
16434 ({ \
16435 __typeof__ (*(ptr)) __ret = (inc); \
16436diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16437index 59c6c40..5e0b22c 100644
16438--- a/arch/x86/include/asm/compat.h
16439+++ b/arch/x86/include/asm/compat.h
16440@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16441 typedef u32 compat_uint_t;
16442 typedef u32 compat_ulong_t;
16443 typedef u64 __attribute__((aligned(4))) compat_u64;
16444-typedef u32 compat_uptr_t;
16445+typedef u32 __user compat_uptr_t;
16446
16447 struct compat_timespec {
16448 compat_time_t tv_sec;
16449diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16450index aede2c3..40d7a8f 100644
16451--- a/arch/x86/include/asm/cpufeature.h
16452+++ b/arch/x86/include/asm/cpufeature.h
16453@@ -212,7 +212,7 @@
16454 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16455 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16456 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16457-
16458+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16459
16460 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16461 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16462@@ -220,7 +220,7 @@
16463 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16464 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16465 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16466-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16467+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16468 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16469 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16470 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16471@@ -388,6 +388,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16472 #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
16473 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16474 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16475+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16476
16477 #if __GNUC__ >= 4
16478 extern void warn_pre_alternatives(void);
16479@@ -439,7 +440,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16480
16481 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16482 t_warn:
16483- warn_pre_alternatives();
16484+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16485+ warn_pre_alternatives();
16486 return false;
16487 #endif
16488
16489@@ -459,7 +461,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16490 ".section .discard,\"aw\",@progbits\n"
16491 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16492 ".previous\n"
16493- ".section .altinstr_replacement,\"ax\"\n"
16494+ ".section .altinstr_replacement,\"a\"\n"
16495 "3: movb $1,%0\n"
16496 "4:\n"
16497 ".previous\n"
16498@@ -496,7 +498,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16499 " .byte 2b - 1b\n" /* src len */
16500 " .byte 4f - 3f\n" /* repl len */
16501 ".previous\n"
16502- ".section .altinstr_replacement,\"ax\"\n"
16503+ ".section .altinstr_replacement,\"a\"\n"
16504 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16505 "4:\n"
16506 ".previous\n"
16507@@ -529,7 +531,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16508 ".section .discard,\"aw\",@progbits\n"
16509 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16510 ".previous\n"
16511- ".section .altinstr_replacement,\"ax\"\n"
16512+ ".section .altinstr_replacement,\"a\"\n"
16513 "3: movb $0,%0\n"
16514 "4:\n"
16515 ".previous\n"
16516@@ -543,7 +545,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16517 ".section .discard,\"aw\",@progbits\n"
16518 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16519 ".previous\n"
16520- ".section .altinstr_replacement,\"ax\"\n"
16521+ ".section .altinstr_replacement,\"a\"\n"
16522 "5: movb $1,%0\n"
16523 "6:\n"
16524 ".previous\n"
16525diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16526index a94b82e..59ecefa 100644
16527--- a/arch/x86/include/asm/desc.h
16528+++ b/arch/x86/include/asm/desc.h
16529@@ -4,6 +4,7 @@
16530 #include <asm/desc_defs.h>
16531 #include <asm/ldt.h>
16532 #include <asm/mmu.h>
16533+#include <asm/pgtable.h>
16534
16535 #include <linux/smp.h>
16536 #include <linux/percpu.h>
16537@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16538
16539 desc->type = (info->read_exec_only ^ 1) << 1;
16540 desc->type |= info->contents << 2;
16541+ desc->type |= info->seg_not_present ^ 1;
16542
16543 desc->s = 1;
16544 desc->dpl = 0x3;
16545@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16546 }
16547
16548 extern struct desc_ptr idt_descr;
16549-extern gate_desc idt_table[];
16550-extern struct desc_ptr debug_idt_descr;
16551-extern gate_desc debug_idt_table[];
16552-
16553-struct gdt_page {
16554- struct desc_struct gdt[GDT_ENTRIES];
16555-} __attribute__((aligned(PAGE_SIZE)));
16556-
16557-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16558+extern gate_desc idt_table[IDT_ENTRIES];
16559+extern const struct desc_ptr debug_idt_descr;
16560+extern gate_desc debug_idt_table[IDT_ENTRIES];
16561
16562+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16563 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16564 {
16565- return per_cpu(gdt_page, cpu).gdt;
16566+ return cpu_gdt_table[cpu];
16567 }
16568
16569 #ifdef CONFIG_X86_64
16570@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16571 unsigned long base, unsigned dpl, unsigned flags,
16572 unsigned short seg)
16573 {
16574- gate->a = (seg << 16) | (base & 0xffff);
16575- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16576+ gate->gate.offset_low = base;
16577+ gate->gate.seg = seg;
16578+ gate->gate.reserved = 0;
16579+ gate->gate.type = type;
16580+ gate->gate.s = 0;
16581+ gate->gate.dpl = dpl;
16582+ gate->gate.p = 1;
16583+ gate->gate.offset_high = base >> 16;
16584 }
16585
16586 #endif
16587@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16588
16589 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16590 {
16591+ pax_open_kernel();
16592 memcpy(&idt[entry], gate, sizeof(*gate));
16593+ pax_close_kernel();
16594 }
16595
16596 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16597 {
16598+ pax_open_kernel();
16599 memcpy(&ldt[entry], desc, 8);
16600+ pax_close_kernel();
16601 }
16602
16603 static inline void
16604@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16605 default: size = sizeof(*gdt); break;
16606 }
16607
16608+ pax_open_kernel();
16609 memcpy(&gdt[entry], desc, size);
16610+ pax_close_kernel();
16611 }
16612
16613 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16614@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16615
16616 static inline void native_load_tr_desc(void)
16617 {
16618+ pax_open_kernel();
16619 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16620+ pax_close_kernel();
16621 }
16622
16623 static inline void native_load_gdt(const struct desc_ptr *dtr)
16624@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16625 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16626 unsigned int i;
16627
16628+ pax_open_kernel();
16629 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16630 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16631+ pax_close_kernel();
16632 }
16633
16634 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16635@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16636 preempt_enable();
16637 }
16638
16639-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16640+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16641 {
16642 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16643 }
16644@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16645 }
16646
16647 #ifdef CONFIG_X86_64
16648-static inline void set_nmi_gate(int gate, void *addr)
16649+static inline void set_nmi_gate(int gate, const void *addr)
16650 {
16651 gate_desc s;
16652
16653@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16654 #endif
16655
16656 #ifdef CONFIG_TRACING
16657-extern struct desc_ptr trace_idt_descr;
16658-extern gate_desc trace_idt_table[];
16659+extern const struct desc_ptr trace_idt_descr;
16660+extern gate_desc trace_idt_table[IDT_ENTRIES];
16661 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16662 {
16663 write_idt_entry(trace_idt_table, entry, gate);
16664 }
16665
16666-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16667+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16668 unsigned dpl, unsigned ist, unsigned seg)
16669 {
16670 gate_desc s;
16671@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16672 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16673 #endif
16674
16675-static inline void _set_gate(int gate, unsigned type, void *addr,
16676+static inline void _set_gate(int gate, unsigned type, const void *addr,
16677 unsigned dpl, unsigned ist, unsigned seg)
16678 {
16679 gate_desc s;
16680@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16681 #define set_intr_gate(n, addr) \
16682 do { \
16683 BUG_ON((unsigned)n > 0xFF); \
16684- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16685+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16686 __KERNEL_CS); \
16687- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16688+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16689 0, 0, __KERNEL_CS); \
16690 } while (0)
16691
16692@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16693 /*
16694 * This routine sets up an interrupt gate at directory privilege level 3.
16695 */
16696-static inline void set_system_intr_gate(unsigned int n, void *addr)
16697+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16698 {
16699 BUG_ON((unsigned)n > 0xFF);
16700 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16701 }
16702
16703-static inline void set_system_trap_gate(unsigned int n, void *addr)
16704+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16705 {
16706 BUG_ON((unsigned)n > 0xFF);
16707 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16708 }
16709
16710-static inline void set_trap_gate(unsigned int n, void *addr)
16711+static inline void set_trap_gate(unsigned int n, const void *addr)
16712 {
16713 BUG_ON((unsigned)n > 0xFF);
16714 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16715@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16716 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16717 {
16718 BUG_ON((unsigned)n > 0xFF);
16719- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16720+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16721 }
16722
16723-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16724+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16725 {
16726 BUG_ON((unsigned)n > 0xFF);
16727 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16728 }
16729
16730-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16731+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16732 {
16733 BUG_ON((unsigned)n > 0xFF);
16734 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16735@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16736 else
16737 load_idt((const struct desc_ptr *)&idt_descr);
16738 }
16739+
16740+#ifdef CONFIG_X86_32
16741+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16742+{
16743+ struct desc_struct d;
16744+
16745+ if (likely(limit))
16746+ limit = (limit - 1UL) >> PAGE_SHIFT;
16747+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16748+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16749+}
16750+#endif
16751+
16752 #endif /* _ASM_X86_DESC_H */
16753diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16754index 278441f..b95a174 100644
16755--- a/arch/x86/include/asm/desc_defs.h
16756+++ b/arch/x86/include/asm/desc_defs.h
16757@@ -31,6 +31,12 @@ struct desc_struct {
16758 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16759 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16760 };
16761+ struct {
16762+ u16 offset_low;
16763+ u16 seg;
16764+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16765+ unsigned offset_high: 16;
16766+ } gate;
16767 };
16768 } __attribute__((packed));
16769
16770diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16771index ced283a..ffe04cc 100644
16772--- a/arch/x86/include/asm/div64.h
16773+++ b/arch/x86/include/asm/div64.h
16774@@ -39,7 +39,7 @@
16775 __mod; \
16776 })
16777
16778-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16779+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16780 {
16781 union {
16782 u64 v64;
16783diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16784index ca3347a..1a5082a 100644
16785--- a/arch/x86/include/asm/elf.h
16786+++ b/arch/x86/include/asm/elf.h
16787@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16788
16789 #include <asm/vdso.h>
16790
16791-#ifdef CONFIG_X86_64
16792-extern unsigned int vdso64_enabled;
16793-#endif
16794 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16795 extern unsigned int vdso32_enabled;
16796 #endif
16797@@ -249,7 +246,25 @@ extern int force_personality32;
16798 the loader. We need to make sure that it is out of the way of the program
16799 that it will "exec", and that there is sufficient room for the brk. */
16800
16801+#ifdef CONFIG_PAX_SEGMEXEC
16802+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16803+#else
16804 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16805+#endif
16806+
16807+#ifdef CONFIG_PAX_ASLR
16808+#ifdef CONFIG_X86_32
16809+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16810+
16811+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16812+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16813+#else
16814+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16815+
16816+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16817+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16818+#endif
16819+#endif
16820
16821 /* This yields a mask that user programs can use to figure out what
16822 instruction set this CPU supports. This could be done in user space,
16823@@ -298,17 +313,13 @@ do { \
16824
16825 #define ARCH_DLINFO \
16826 do { \
16827- if (vdso64_enabled) \
16828- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16829- (unsigned long __force)current->mm->context.vdso); \
16830+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16831 } while (0)
16832
16833 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16834 #define ARCH_DLINFO_X32 \
16835 do { \
16836- if (vdso64_enabled) \
16837- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16838- (unsigned long __force)current->mm->context.vdso); \
16839+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16840 } while (0)
16841
16842 #define AT_SYSINFO 32
16843@@ -323,10 +334,10 @@ else \
16844
16845 #endif /* !CONFIG_X86_32 */
16846
16847-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16848+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16849
16850 #define VDSO_ENTRY \
16851- ((unsigned long)current->mm->context.vdso + \
16852+ (current->mm->context.vdso + \
16853 selected_vdso32->sym___kernel_vsyscall)
16854
16855 struct linux_binprm;
16856@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16857 int uses_interp);
16858 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16859
16860-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16861-#define arch_randomize_brk arch_randomize_brk
16862-
16863 /*
16864 * True on X86_32 or when emulating IA32 on X86_64
16865 */
16866diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16867index 77a99ac..39ff7f5 100644
16868--- a/arch/x86/include/asm/emergency-restart.h
16869+++ b/arch/x86/include/asm/emergency-restart.h
16870@@ -1,6 +1,6 @@
16871 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16872 #define _ASM_X86_EMERGENCY_RESTART_H
16873
16874-extern void machine_emergency_restart(void);
16875+extern void machine_emergency_restart(void) __noreturn;
16876
16877 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16878diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16879index 1c7eefe..d0e4702 100644
16880--- a/arch/x86/include/asm/floppy.h
16881+++ b/arch/x86/include/asm/floppy.h
16882@@ -229,18 +229,18 @@ static struct fd_routine_l {
16883 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16884 } fd_routine[] = {
16885 {
16886- request_dma,
16887- free_dma,
16888- get_dma_residue,
16889- dma_mem_alloc,
16890- hard_dma_setup
16891+ ._request_dma = request_dma,
16892+ ._free_dma = free_dma,
16893+ ._get_dma_residue = get_dma_residue,
16894+ ._dma_mem_alloc = dma_mem_alloc,
16895+ ._dma_setup = hard_dma_setup
16896 },
16897 {
16898- vdma_request_dma,
16899- vdma_nop,
16900- vdma_get_dma_residue,
16901- vdma_mem_alloc,
16902- vdma_dma_setup
16903+ ._request_dma = vdma_request_dma,
16904+ ._free_dma = vdma_nop,
16905+ ._get_dma_residue = vdma_get_dma_residue,
16906+ ._dma_mem_alloc = vdma_mem_alloc,
16907+ ._dma_setup = vdma_dma_setup
16908 }
16909 };
16910
16911diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16912index f895358..800c60d 100644
16913--- a/arch/x86/include/asm/fpu-internal.h
16914+++ b/arch/x86/include/asm/fpu-internal.h
16915@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16916 #define user_insn(insn, output, input...) \
16917 ({ \
16918 int err; \
16919+ pax_open_userland(); \
16920 asm volatile(ASM_STAC "\n" \
16921- "1:" #insn "\n\t" \
16922+ "1:" \
16923+ __copyuser_seg \
16924+ #insn "\n\t" \
16925 "2: " ASM_CLAC "\n" \
16926 ".section .fixup,\"ax\"\n" \
16927 "3: movl $-1,%[err]\n" \
16928@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16929 _ASM_EXTABLE(1b, 3b) \
16930 : [err] "=r" (err), output \
16931 : "0"(0), input); \
16932+ pax_close_userland(); \
16933 err; \
16934 })
16935
16936@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16937 "fnclex\n\t"
16938 "emms\n\t"
16939 "fildl %P[addr]" /* set F?P to defined value */
16940- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16941+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16942 }
16943
16944 return fpu_restore_checking(&tsk->thread.fpu);
16945diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16946index b4c1f54..e290c08 100644
16947--- a/arch/x86/include/asm/futex.h
16948+++ b/arch/x86/include/asm/futex.h
16949@@ -12,6 +12,7 @@
16950 #include <asm/smap.h>
16951
16952 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16953+ typecheck(u32 __user *, uaddr); \
16954 asm volatile("\t" ASM_STAC "\n" \
16955 "1:\t" insn "\n" \
16956 "2:\t" ASM_CLAC "\n" \
16957@@ -20,15 +21,16 @@
16958 "\tjmp\t2b\n" \
16959 "\t.previous\n" \
16960 _ASM_EXTABLE(1b, 3b) \
16961- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16962+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16963 : "i" (-EFAULT), "0" (oparg), "1" (0))
16964
16965 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16966+ typecheck(u32 __user *, uaddr); \
16967 asm volatile("\t" ASM_STAC "\n" \
16968 "1:\tmovl %2, %0\n" \
16969 "\tmovl\t%0, %3\n" \
16970 "\t" insn "\n" \
16971- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16972+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16973 "\tjnz\t1b\n" \
16974 "3:\t" ASM_CLAC "\n" \
16975 "\t.section .fixup,\"ax\"\n" \
16976@@ -38,7 +40,7 @@
16977 _ASM_EXTABLE(1b, 4b) \
16978 _ASM_EXTABLE(2b, 4b) \
16979 : "=&a" (oldval), "=&r" (ret), \
16980- "+m" (*uaddr), "=&r" (tem) \
16981+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16982 : "r" (oparg), "i" (-EFAULT), "1" (0))
16983
16984 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16985@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16986
16987 pagefault_disable();
16988
16989+ pax_open_userland();
16990 switch (op) {
16991 case FUTEX_OP_SET:
16992- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16993+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16994 break;
16995 case FUTEX_OP_ADD:
16996- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16997+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16998 uaddr, oparg);
16999 break;
17000 case FUTEX_OP_OR:
17001@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17002 default:
17003 ret = -ENOSYS;
17004 }
17005+ pax_close_userland();
17006
17007 pagefault_enable();
17008
17009diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17010index 9662290..49ca5e5 100644
17011--- a/arch/x86/include/asm/hw_irq.h
17012+++ b/arch/x86/include/asm/hw_irq.h
17013@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
17014 #endif /* CONFIG_X86_LOCAL_APIC */
17015
17016 /* Statistics */
17017-extern atomic_t irq_err_count;
17018-extern atomic_t irq_mis_count;
17019+extern atomic_unchecked_t irq_err_count;
17020+extern atomic_unchecked_t irq_mis_count;
17021
17022 /* EISA */
17023 extern void eisa_set_level_irq(unsigned int irq);
17024diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17025index ccffa53..3c90c87 100644
17026--- a/arch/x86/include/asm/i8259.h
17027+++ b/arch/x86/include/asm/i8259.h
17028@@ -62,7 +62,7 @@ struct legacy_pic {
17029 void (*init)(int auto_eoi);
17030 int (*irq_pending)(unsigned int irq);
17031 void (*make_irq)(unsigned int irq);
17032-};
17033+} __do_const;
17034
17035 extern struct legacy_pic *legacy_pic;
17036 extern struct legacy_pic null_legacy_pic;
17037diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17038index 34a5b93..27e40a6 100644
17039--- a/arch/x86/include/asm/io.h
17040+++ b/arch/x86/include/asm/io.h
17041@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17042 "m" (*(volatile type __force *)addr) barrier); }
17043
17044 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17045-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17046-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17047+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17048+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17049
17050 build_mmio_read(__readb, "b", unsigned char, "=q", )
17051-build_mmio_read(__readw, "w", unsigned short, "=r", )
17052-build_mmio_read(__readl, "l", unsigned int, "=r", )
17053+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17054+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17055
17056 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17057 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17058@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17059 * this function
17060 */
17061
17062-static inline phys_addr_t virt_to_phys(volatile void *address)
17063+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17064 {
17065 return __pa(address);
17066 }
17067@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17068 return ioremap_nocache(offset, size);
17069 }
17070
17071-extern void iounmap(volatile void __iomem *addr);
17072+extern void iounmap(const volatile void __iomem *addr);
17073
17074 extern void set_iounmap_nonlazy(void);
17075
17076@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17077
17078 #include <linux/vmalloc.h>
17079
17080+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17081+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17082+{
17083+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17084+}
17085+
17086+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17087+{
17088+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17089+}
17090+
17091 /*
17092 * Convert a virtual cached pointer to an uncached pointer
17093 */
17094diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17095index 0a8b519..80e7d5b 100644
17096--- a/arch/x86/include/asm/irqflags.h
17097+++ b/arch/x86/include/asm/irqflags.h
17098@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17099 sti; \
17100 sysexit
17101
17102+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17103+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17104+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17105+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17106+
17107 #else
17108 #define INTERRUPT_RETURN iret
17109 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17110diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17111index 4421b5d..8543006 100644
17112--- a/arch/x86/include/asm/kprobes.h
17113+++ b/arch/x86/include/asm/kprobes.h
17114@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17115 #define RELATIVEJUMP_SIZE 5
17116 #define RELATIVECALL_OPCODE 0xe8
17117 #define RELATIVE_ADDR_SIZE 4
17118-#define MAX_STACK_SIZE 64
17119-#define MIN_STACK_SIZE(ADDR) \
17120- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17121- THREAD_SIZE - (unsigned long)(ADDR))) \
17122- ? (MAX_STACK_SIZE) \
17123- : (((unsigned long)current_thread_info()) + \
17124- THREAD_SIZE - (unsigned long)(ADDR)))
17125+#define MAX_STACK_SIZE 64UL
17126+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17127
17128 #define flush_insn_slot(p) do { } while (0)
17129
17130diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
17131index d89c6b8..e711c69 100644
17132--- a/arch/x86/include/asm/kvm_host.h
17133+++ b/arch/x86/include/asm/kvm_host.h
17134@@ -51,7 +51,7 @@
17135 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
17136
17137 #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
17138-#define CR3_PCID_INVD (1UL << 63)
17139+#define CR3_PCID_INVD (1ULL << 63)
17140 #define CR4_RESERVED_BITS \
17141 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
17142 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
17143diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17144index 4ad6560..75c7bdd 100644
17145--- a/arch/x86/include/asm/local.h
17146+++ b/arch/x86/include/asm/local.h
17147@@ -10,33 +10,97 @@ typedef struct {
17148 atomic_long_t a;
17149 } local_t;
17150
17151+typedef struct {
17152+ atomic_long_unchecked_t a;
17153+} local_unchecked_t;
17154+
17155 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17156
17157 #define local_read(l) atomic_long_read(&(l)->a)
17158+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17159 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17160+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17161
17162 static inline void local_inc(local_t *l)
17163 {
17164- asm volatile(_ASM_INC "%0"
17165+ asm volatile(_ASM_INC "%0\n"
17166+
17167+#ifdef CONFIG_PAX_REFCOUNT
17168+ "jno 0f\n"
17169+ _ASM_DEC "%0\n"
17170+ "int $4\n0:\n"
17171+ _ASM_EXTABLE(0b, 0b)
17172+#endif
17173+
17174+ : "+m" (l->a.counter));
17175+}
17176+
17177+static inline void local_inc_unchecked(local_unchecked_t *l)
17178+{
17179+ asm volatile(_ASM_INC "%0\n"
17180 : "+m" (l->a.counter));
17181 }
17182
17183 static inline void local_dec(local_t *l)
17184 {
17185- asm volatile(_ASM_DEC "%0"
17186+ asm volatile(_ASM_DEC "%0\n"
17187+
17188+#ifdef CONFIG_PAX_REFCOUNT
17189+ "jno 0f\n"
17190+ _ASM_INC "%0\n"
17191+ "int $4\n0:\n"
17192+ _ASM_EXTABLE(0b, 0b)
17193+#endif
17194+
17195+ : "+m" (l->a.counter));
17196+}
17197+
17198+static inline void local_dec_unchecked(local_unchecked_t *l)
17199+{
17200+ asm volatile(_ASM_DEC "%0\n"
17201 : "+m" (l->a.counter));
17202 }
17203
17204 static inline void local_add(long i, local_t *l)
17205 {
17206- asm volatile(_ASM_ADD "%1,%0"
17207+ asm volatile(_ASM_ADD "%1,%0\n"
17208+
17209+#ifdef CONFIG_PAX_REFCOUNT
17210+ "jno 0f\n"
17211+ _ASM_SUB "%1,%0\n"
17212+ "int $4\n0:\n"
17213+ _ASM_EXTABLE(0b, 0b)
17214+#endif
17215+
17216+ : "+m" (l->a.counter)
17217+ : "ir" (i));
17218+}
17219+
17220+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17221+{
17222+ asm volatile(_ASM_ADD "%1,%0\n"
17223 : "+m" (l->a.counter)
17224 : "ir" (i));
17225 }
17226
17227 static inline void local_sub(long i, local_t *l)
17228 {
17229- asm volatile(_ASM_SUB "%1,%0"
17230+ asm volatile(_ASM_SUB "%1,%0\n"
17231+
17232+#ifdef CONFIG_PAX_REFCOUNT
17233+ "jno 0f\n"
17234+ _ASM_ADD "%1,%0\n"
17235+ "int $4\n0:\n"
17236+ _ASM_EXTABLE(0b, 0b)
17237+#endif
17238+
17239+ : "+m" (l->a.counter)
17240+ : "ir" (i));
17241+}
17242+
17243+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17244+{
17245+ asm volatile(_ASM_SUB "%1,%0\n"
17246 : "+m" (l->a.counter)
17247 : "ir" (i));
17248 }
17249@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17250 */
17251 static inline int local_sub_and_test(long i, local_t *l)
17252 {
17253- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17254+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17255 }
17256
17257 /**
17258@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17259 */
17260 static inline int local_dec_and_test(local_t *l)
17261 {
17262- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17263+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17264 }
17265
17266 /**
17267@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17268 */
17269 static inline int local_inc_and_test(local_t *l)
17270 {
17271- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17272+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17273 }
17274
17275 /**
17276@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17277 */
17278 static inline int local_add_negative(long i, local_t *l)
17279 {
17280- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17281+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17282 }
17283
17284 /**
17285@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17286 static inline long local_add_return(long i, local_t *l)
17287 {
17288 long __i = i;
17289+ asm volatile(_ASM_XADD "%0, %1\n"
17290+
17291+#ifdef CONFIG_PAX_REFCOUNT
17292+ "jno 0f\n"
17293+ _ASM_MOV "%0,%1\n"
17294+ "int $4\n0:\n"
17295+ _ASM_EXTABLE(0b, 0b)
17296+#endif
17297+
17298+ : "+r" (i), "+m" (l->a.counter)
17299+ : : "memory");
17300+ return i + __i;
17301+}
17302+
17303+/**
17304+ * local_add_return_unchecked - add and return
17305+ * @i: integer value to add
17306+ * @l: pointer to type local_unchecked_t
17307+ *
17308+ * Atomically adds @i to @l and returns @i + @l
17309+ */
17310+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17311+{
17312+ long __i = i;
17313 asm volatile(_ASM_XADD "%0, %1;"
17314 : "+r" (i), "+m" (l->a.counter)
17315 : : "memory");
17316@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17317
17318 #define local_cmpxchg(l, o, n) \
17319 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17320+#define local_cmpxchg_unchecked(l, o, n) \
17321+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17322 /* Always has a lock prefix */
17323 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17324
17325diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17326new file mode 100644
17327index 0000000..2bfd3ba
17328--- /dev/null
17329+++ b/arch/x86/include/asm/mman.h
17330@@ -0,0 +1,15 @@
17331+#ifndef _X86_MMAN_H
17332+#define _X86_MMAN_H
17333+
17334+#include <uapi/asm/mman.h>
17335+
17336+#ifdef __KERNEL__
17337+#ifndef __ASSEMBLY__
17338+#ifdef CONFIG_X86_32
17339+#define arch_mmap_check i386_mmap_check
17340+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17341+#endif
17342+#endif
17343+#endif
17344+
17345+#endif /* X86_MMAN_H */
17346diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17347index 876e74e..e20bfb1 100644
17348--- a/arch/x86/include/asm/mmu.h
17349+++ b/arch/x86/include/asm/mmu.h
17350@@ -9,7 +9,7 @@
17351 * we put the segment information here.
17352 */
17353 typedef struct {
17354- void *ldt;
17355+ struct desc_struct *ldt;
17356 int size;
17357
17358 #ifdef CONFIG_X86_64
17359@@ -18,7 +18,19 @@ typedef struct {
17360 #endif
17361
17362 struct mutex lock;
17363- void __user *vdso;
17364+ unsigned long vdso;
17365+
17366+#ifdef CONFIG_X86_32
17367+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17368+ unsigned long user_cs_base;
17369+ unsigned long user_cs_limit;
17370+
17371+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17372+ cpumask_t cpu_user_cs_mask;
17373+#endif
17374+
17375+#endif
17376+#endif
17377 } mm_context_t;
17378
17379 #ifdef CONFIG_SMP
17380diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17381index 4b75d59..8ffacb6 100644
17382--- a/arch/x86/include/asm/mmu_context.h
17383+++ b/arch/x86/include/asm/mmu_context.h
17384@@ -27,6 +27,20 @@ void destroy_context(struct mm_struct *mm);
17385
17386 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17387 {
17388+
17389+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17390+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17391+ unsigned int i;
17392+ pgd_t *pgd;
17393+
17394+ pax_open_kernel();
17395+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17396+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17397+ set_pgd_batched(pgd+i, native_make_pgd(0));
17398+ pax_close_kernel();
17399+ }
17400+#endif
17401+
17402 #ifdef CONFIG_SMP
17403 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17404 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17405@@ -37,16 +51,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17406 struct task_struct *tsk)
17407 {
17408 unsigned cpu = smp_processor_id();
17409+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17410+ int tlbstate = TLBSTATE_OK;
17411+#endif
17412
17413 if (likely(prev != next)) {
17414 #ifdef CONFIG_SMP
17415+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17416+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17417+#endif
17418 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17419 this_cpu_write(cpu_tlbstate.active_mm, next);
17420 #endif
17421 cpumask_set_cpu(cpu, mm_cpumask(next));
17422
17423 /* Re-load page tables */
17424+#ifdef CONFIG_PAX_PER_CPU_PGD
17425+ pax_open_kernel();
17426+
17427+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17428+ if (static_cpu_has(X86_FEATURE_PCID))
17429+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17430+ else
17431+#endif
17432+
17433+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17434+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17435+ pax_close_kernel();
17436+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17437+
17438+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17439+ if (static_cpu_has(X86_FEATURE_PCID)) {
17440+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17441+ u64 descriptor[2];
17442+ descriptor[0] = PCID_USER;
17443+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17444+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17445+ descriptor[0] = PCID_KERNEL;
17446+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17447+ }
17448+ } else {
17449+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17450+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17451+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17452+ else
17453+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17454+ }
17455+ } else
17456+#endif
17457+
17458+ load_cr3(get_cpu_pgd(cpu, kernel));
17459+#else
17460 load_cr3(next->pgd);
17461+#endif
17462 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17463
17464 /* Stop flush ipis for the previous mm */
17465@@ -64,9 +121,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17466 */
17467 if (unlikely(prev->context.ldt != next->context.ldt))
17468 load_LDT_nolock(&next->context);
17469+
17470+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17471+ if (!(__supported_pte_mask & _PAGE_NX)) {
17472+ smp_mb__before_atomic();
17473+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17474+ smp_mb__after_atomic();
17475+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17476+ }
17477+#endif
17478+
17479+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17480+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17481+ prev->context.user_cs_limit != next->context.user_cs_limit))
17482+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17483+#ifdef CONFIG_SMP
17484+ else if (unlikely(tlbstate != TLBSTATE_OK))
17485+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17486+#endif
17487+#endif
17488+
17489 }
17490+ else {
17491+
17492+#ifdef CONFIG_PAX_PER_CPU_PGD
17493+ pax_open_kernel();
17494+
17495+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17496+ if (static_cpu_has(X86_FEATURE_PCID))
17497+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17498+ else
17499+#endif
17500+
17501+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17502+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17503+ pax_close_kernel();
17504+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17505+
17506+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17507+ if (static_cpu_has(X86_FEATURE_PCID)) {
17508+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17509+ u64 descriptor[2];
17510+ descriptor[0] = PCID_USER;
17511+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17512+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17513+ descriptor[0] = PCID_KERNEL;
17514+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17515+ }
17516+ } else {
17517+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17518+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17519+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17520+ else
17521+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17522+ }
17523+ } else
17524+#endif
17525+
17526+ load_cr3(get_cpu_pgd(cpu, kernel));
17527+#endif
17528+
17529 #ifdef CONFIG_SMP
17530- else {
17531 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17532 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17533
17534@@ -83,12 +198,29 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17535 * tlb flush IPI delivery. We must reload CR3
17536 * to make sure to use no freed page tables.
17537 */
17538+
17539+#ifndef CONFIG_PAX_PER_CPU_PGD
17540 load_cr3(next->pgd);
17541 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17542+#endif
17543+
17544 load_LDT_nolock(&next->context);
17545+
17546+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17547+ if (!(__supported_pte_mask & _PAGE_NX))
17548+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17549+#endif
17550+
17551+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17552+#ifdef CONFIG_PAX_PAGEEXEC
17553+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17554+#endif
17555+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17556+#endif
17557+
17558 }
17559+#endif
17560 }
17561-#endif
17562 }
17563
17564 #define activate_mm(prev, next) \
17565diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17566index e3b7819..b257c64 100644
17567--- a/arch/x86/include/asm/module.h
17568+++ b/arch/x86/include/asm/module.h
17569@@ -5,6 +5,7 @@
17570
17571 #ifdef CONFIG_X86_64
17572 /* X86_64 does not define MODULE_PROC_FAMILY */
17573+#define MODULE_PROC_FAMILY ""
17574 #elif defined CONFIG_M486
17575 #define MODULE_PROC_FAMILY "486 "
17576 #elif defined CONFIG_M586
17577@@ -57,8 +58,20 @@
17578 #error unknown processor family
17579 #endif
17580
17581-#ifdef CONFIG_X86_32
17582-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17583+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17584+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17585+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17586+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17587+#else
17588+#define MODULE_PAX_KERNEXEC ""
17589 #endif
17590
17591+#ifdef CONFIG_PAX_MEMORY_UDEREF
17592+#define MODULE_PAX_UDEREF "UDEREF "
17593+#else
17594+#define MODULE_PAX_UDEREF ""
17595+#endif
17596+
17597+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17598+
17599 #endif /* _ASM_X86_MODULE_H */
17600diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17601index 5f2fc44..106caa6 100644
17602--- a/arch/x86/include/asm/nmi.h
17603+++ b/arch/x86/include/asm/nmi.h
17604@@ -36,26 +36,35 @@ enum {
17605
17606 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17607
17608+struct nmiaction;
17609+
17610+struct nmiwork {
17611+ const struct nmiaction *action;
17612+ u64 max_duration;
17613+ struct irq_work irq_work;
17614+};
17615+
17616 struct nmiaction {
17617 struct list_head list;
17618 nmi_handler_t handler;
17619- u64 max_duration;
17620- struct irq_work irq_work;
17621 unsigned long flags;
17622 const char *name;
17623-};
17624+ struct nmiwork *work;
17625+} __do_const;
17626
17627 #define register_nmi_handler(t, fn, fg, n, init...) \
17628 ({ \
17629- static struct nmiaction init fn##_na = { \
17630+ static struct nmiwork fn##_nw; \
17631+ static const struct nmiaction init fn##_na = { \
17632 .handler = (fn), \
17633 .name = (n), \
17634 .flags = (fg), \
17635+ .work = &fn##_nw, \
17636 }; \
17637 __register_nmi_handler((t), &fn##_na); \
17638 })
17639
17640-int __register_nmi_handler(unsigned int, struct nmiaction *);
17641+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17642
17643 void unregister_nmi_handler(unsigned int, const char *);
17644
17645diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17646index 802dde3..9183e68 100644
17647--- a/arch/x86/include/asm/page.h
17648+++ b/arch/x86/include/asm/page.h
17649@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17650 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17651
17652 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17653+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17654
17655 #define __boot_va(x) __va(x)
17656 #define __boot_pa(x) __pa(x)
17657@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17658 * virt_to_page(kaddr) returns a valid pointer if and only if
17659 * virt_addr_valid(kaddr) returns true.
17660 */
17661-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17662 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17663 extern bool __virt_addr_valid(unsigned long kaddr);
17664 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17665
17666+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17667+#define virt_to_page(kaddr) \
17668+ ({ \
17669+ const void *__kaddr = (const void *)(kaddr); \
17670+ BUG_ON(!virt_addr_valid(__kaddr)); \
17671+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17672+ })
17673+#else
17674+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17675+#endif
17676+
17677 #endif /* __ASSEMBLY__ */
17678
17679 #include <asm-generic/memory_model.h>
17680diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17681index b3bebf9..e1f5d95 100644
17682--- a/arch/x86/include/asm/page_64.h
17683+++ b/arch/x86/include/asm/page_64.h
17684@@ -7,9 +7,9 @@
17685
17686 /* duplicated to the one in bootmem.h */
17687 extern unsigned long max_pfn;
17688-extern unsigned long phys_base;
17689+extern const unsigned long phys_base;
17690
17691-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17692+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17693 {
17694 unsigned long y = x - __START_KERNEL_map;
17695
17696diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17697index 32444ae..1a1624b 100644
17698--- a/arch/x86/include/asm/paravirt.h
17699+++ b/arch/x86/include/asm/paravirt.h
17700@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17701 return (pmd_t) { ret };
17702 }
17703
17704-static inline pmdval_t pmd_val(pmd_t pmd)
17705+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17706 {
17707 pmdval_t ret;
17708
17709@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17710 val);
17711 }
17712
17713+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17714+{
17715+ pgdval_t val = native_pgd_val(pgd);
17716+
17717+ if (sizeof(pgdval_t) > sizeof(long))
17718+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17719+ val, (u64)val >> 32);
17720+ else
17721+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17722+ val);
17723+}
17724+
17725 static inline void pgd_clear(pgd_t *pgdp)
17726 {
17727 set_pgd(pgdp, __pgd(0));
17728@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17729 pv_mmu_ops.set_fixmap(idx, phys, flags);
17730 }
17731
17732+#ifdef CONFIG_PAX_KERNEXEC
17733+static inline unsigned long pax_open_kernel(void)
17734+{
17735+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17736+}
17737+
17738+static inline unsigned long pax_close_kernel(void)
17739+{
17740+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17741+}
17742+#else
17743+static inline unsigned long pax_open_kernel(void) { return 0; }
17744+static inline unsigned long pax_close_kernel(void) { return 0; }
17745+#endif
17746+
17747 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17748
17749 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17750@@ -906,7 +933,7 @@ extern void default_banner(void);
17751
17752 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17753 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17754-#define PARA_INDIRECT(addr) *%cs:addr
17755+#define PARA_INDIRECT(addr) *%ss:addr
17756 #endif
17757
17758 #define INTERRUPT_RETURN \
17759@@ -981,6 +1008,21 @@ extern void default_banner(void);
17760 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17761 CLBR_NONE, \
17762 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17763+
17764+#define GET_CR0_INTO_RDI \
17765+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17766+ mov %rax,%rdi
17767+
17768+#define SET_RDI_INTO_CR0 \
17769+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17770+
17771+#define GET_CR3_INTO_RDI \
17772+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17773+ mov %rax,%rdi
17774+
17775+#define SET_RDI_INTO_CR3 \
17776+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17777+
17778 #endif /* CONFIG_X86_32 */
17779
17780 #endif /* __ASSEMBLY__ */
17781diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17782index 7549b8b..f0edfda 100644
17783--- a/arch/x86/include/asm/paravirt_types.h
17784+++ b/arch/x86/include/asm/paravirt_types.h
17785@@ -84,7 +84,7 @@ struct pv_init_ops {
17786 */
17787 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17788 unsigned long addr, unsigned len);
17789-};
17790+} __no_const __no_randomize_layout;
17791
17792
17793 struct pv_lazy_ops {
17794@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17795 void (*enter)(void);
17796 void (*leave)(void);
17797 void (*flush)(void);
17798-};
17799+} __no_randomize_layout;
17800
17801 struct pv_time_ops {
17802 unsigned long long (*sched_clock)(void);
17803 unsigned long long (*steal_clock)(int cpu);
17804 unsigned long (*get_tsc_khz)(void);
17805-};
17806+} __no_const __no_randomize_layout;
17807
17808 struct pv_cpu_ops {
17809 /* hooks for various privileged instructions */
17810@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17811
17812 void (*start_context_switch)(struct task_struct *prev);
17813 void (*end_context_switch)(struct task_struct *next);
17814-};
17815+} __no_const __no_randomize_layout;
17816
17817 struct pv_irq_ops {
17818 /*
17819@@ -215,7 +215,7 @@ struct pv_irq_ops {
17820 #ifdef CONFIG_X86_64
17821 void (*adjust_exception_frame)(void);
17822 #endif
17823-};
17824+} __no_randomize_layout;
17825
17826 struct pv_apic_ops {
17827 #ifdef CONFIG_X86_LOCAL_APIC
17828@@ -223,7 +223,7 @@ struct pv_apic_ops {
17829 unsigned long start_eip,
17830 unsigned long start_esp);
17831 #endif
17832-};
17833+} __no_const __no_randomize_layout;
17834
17835 struct pv_mmu_ops {
17836 unsigned long (*read_cr2)(void);
17837@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17838 struct paravirt_callee_save make_pud;
17839
17840 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17841+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17842 #endif /* PAGETABLE_LEVELS == 4 */
17843 #endif /* PAGETABLE_LEVELS >= 3 */
17844
17845@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17846 an mfn. We can tell which is which from the index. */
17847 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17848 phys_addr_t phys, pgprot_t flags);
17849-};
17850+
17851+#ifdef CONFIG_PAX_KERNEXEC
17852+ unsigned long (*pax_open_kernel)(void);
17853+ unsigned long (*pax_close_kernel)(void);
17854+#endif
17855+
17856+} __no_randomize_layout;
17857
17858 struct arch_spinlock;
17859 #ifdef CONFIG_SMP
17860@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17861 struct pv_lock_ops {
17862 struct paravirt_callee_save lock_spinning;
17863 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17864-};
17865+} __no_randomize_layout;
17866
17867 /* This contains all the paravirt structures: we get a convenient
17868 * number for each function using the offset which we use to indicate
17869- * what to patch. */
17870+ * what to patch.
17871+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17872+ */
17873+
17874 struct paravirt_patch_template {
17875 struct pv_init_ops pv_init_ops;
17876 struct pv_time_ops pv_time_ops;
17877@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17878 struct pv_apic_ops pv_apic_ops;
17879 struct pv_mmu_ops pv_mmu_ops;
17880 struct pv_lock_ops pv_lock_ops;
17881-};
17882+} __no_randomize_layout;
17883
17884 extern struct pv_info pv_info;
17885 extern struct pv_init_ops pv_init_ops;
17886diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17887index c4412e9..90e88c5 100644
17888--- a/arch/x86/include/asm/pgalloc.h
17889+++ b/arch/x86/include/asm/pgalloc.h
17890@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17891 pmd_t *pmd, pte_t *pte)
17892 {
17893 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17894+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17895+}
17896+
17897+static inline void pmd_populate_user(struct mm_struct *mm,
17898+ pmd_t *pmd, pte_t *pte)
17899+{
17900+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17901 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17902 }
17903
17904@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17905
17906 #ifdef CONFIG_X86_PAE
17907 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17908+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17909+{
17910+ pud_populate(mm, pudp, pmd);
17911+}
17912 #else /* !CONFIG_X86_PAE */
17913 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17914 {
17915 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17916 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17917 }
17918+
17919+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17920+{
17921+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17922+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17923+}
17924 #endif /* CONFIG_X86_PAE */
17925
17926 #if PAGETABLE_LEVELS > 3
17927@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17928 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17929 }
17930
17931+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17932+{
17933+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17934+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17935+}
17936+
17937 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17938 {
17939 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17940diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17941index 206a87f..1623b06 100644
17942--- a/arch/x86/include/asm/pgtable-2level.h
17943+++ b/arch/x86/include/asm/pgtable-2level.h
17944@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17945
17946 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17947 {
17948+ pax_open_kernel();
17949 *pmdp = pmd;
17950+ pax_close_kernel();
17951 }
17952
17953 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17954diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17955index 81bb91b..9392125 100644
17956--- a/arch/x86/include/asm/pgtable-3level.h
17957+++ b/arch/x86/include/asm/pgtable-3level.h
17958@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17959
17960 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17961 {
17962+ pax_open_kernel();
17963 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17964+ pax_close_kernel();
17965 }
17966
17967 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17968 {
17969+ pax_open_kernel();
17970 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17971+ pax_close_kernel();
17972 }
17973
17974 /*
17975diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17976index e8a5454..1539359 100644
17977--- a/arch/x86/include/asm/pgtable.h
17978+++ b/arch/x86/include/asm/pgtable.h
17979@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17980
17981 #ifndef __PAGETABLE_PUD_FOLDED
17982 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17983+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17984 #define pgd_clear(pgd) native_pgd_clear(pgd)
17985 #endif
17986
17987@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17988
17989 #define arch_end_context_switch(prev) do {} while(0)
17990
17991+#define pax_open_kernel() native_pax_open_kernel()
17992+#define pax_close_kernel() native_pax_close_kernel()
17993 #endif /* CONFIG_PARAVIRT */
17994
17995+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17996+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17997+
17998+#ifdef CONFIG_PAX_KERNEXEC
17999+static inline unsigned long native_pax_open_kernel(void)
18000+{
18001+ unsigned long cr0;
18002+
18003+ preempt_disable();
18004+ barrier();
18005+ cr0 = read_cr0() ^ X86_CR0_WP;
18006+ BUG_ON(cr0 & X86_CR0_WP);
18007+ write_cr0(cr0);
18008+ barrier();
18009+ return cr0 ^ X86_CR0_WP;
18010+}
18011+
18012+static inline unsigned long native_pax_close_kernel(void)
18013+{
18014+ unsigned long cr0;
18015+
18016+ barrier();
18017+ cr0 = read_cr0() ^ X86_CR0_WP;
18018+ BUG_ON(!(cr0 & X86_CR0_WP));
18019+ write_cr0(cr0);
18020+ barrier();
18021+ preempt_enable_no_resched();
18022+ return cr0 ^ X86_CR0_WP;
18023+}
18024+#else
18025+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18026+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18027+#endif
18028+
18029 /*
18030 * The following only work if pte_present() is true.
18031 * Undefined behaviour if not..
18032 */
18033+static inline int pte_user(pte_t pte)
18034+{
18035+ return pte_val(pte) & _PAGE_USER;
18036+}
18037+
18038 static inline int pte_dirty(pte_t pte)
18039 {
18040 return pte_flags(pte) & _PAGE_DIRTY;
18041@@ -161,6 +203,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18042 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18043 }
18044
18045+static inline unsigned long pgd_pfn(pgd_t pgd)
18046+{
18047+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18048+}
18049+
18050 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18051
18052 static inline int pmd_large(pmd_t pte)
18053@@ -214,9 +261,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18054 return pte_clear_flags(pte, _PAGE_RW);
18055 }
18056
18057+static inline pte_t pte_mkread(pte_t pte)
18058+{
18059+ return __pte(pte_val(pte) | _PAGE_USER);
18060+}
18061+
18062 static inline pte_t pte_mkexec(pte_t pte)
18063 {
18064- return pte_clear_flags(pte, _PAGE_NX);
18065+#ifdef CONFIG_X86_PAE
18066+ if (__supported_pte_mask & _PAGE_NX)
18067+ return pte_clear_flags(pte, _PAGE_NX);
18068+ else
18069+#endif
18070+ return pte_set_flags(pte, _PAGE_USER);
18071+}
18072+
18073+static inline pte_t pte_exprotect(pte_t pte)
18074+{
18075+#ifdef CONFIG_X86_PAE
18076+ if (__supported_pte_mask & _PAGE_NX)
18077+ return pte_set_flags(pte, _PAGE_NX);
18078+ else
18079+#endif
18080+ return pte_clear_flags(pte, _PAGE_USER);
18081 }
18082
18083 static inline pte_t pte_mkdirty(pte_t pte)
18084@@ -446,6 +513,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18085 #endif
18086
18087 #ifndef __ASSEMBLY__
18088+
18089+#ifdef CONFIG_PAX_PER_CPU_PGD
18090+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18091+enum cpu_pgd_type {kernel = 0, user = 1};
18092+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18093+{
18094+ return cpu_pgd[cpu][type];
18095+}
18096+#endif
18097+
18098 #include <linux/mm_types.h>
18099 #include <linux/mmdebug.h>
18100 #include <linux/log2.h>
18101@@ -592,7 +669,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18102 * Currently stuck as a macro due to indirect forward reference to
18103 * linux/mmzone.h's __section_mem_map_addr() definition:
18104 */
18105-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18106+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18107
18108 /* Find an entry in the second-level page table.. */
18109 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18110@@ -632,7 +709,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18111 * Currently stuck as a macro due to indirect forward reference to
18112 * linux/mmzone.h's __section_mem_map_addr() definition:
18113 */
18114-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18115+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18116
18117 /* to find an entry in a page-table-directory. */
18118 static inline unsigned long pud_index(unsigned long address)
18119@@ -647,7 +724,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18120
18121 static inline int pgd_bad(pgd_t pgd)
18122 {
18123- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18124+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18125 }
18126
18127 static inline int pgd_none(pgd_t pgd)
18128@@ -670,7 +747,12 @@ static inline int pgd_none(pgd_t pgd)
18129 * pgd_offset() returns a (pgd_t *)
18130 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18131 */
18132-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18133+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18134+
18135+#ifdef CONFIG_PAX_PER_CPU_PGD
18136+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18137+#endif
18138+
18139 /*
18140 * a shortcut which implies the use of the kernel's pgd, instead
18141 * of a process's
18142@@ -681,6 +763,23 @@ static inline int pgd_none(pgd_t pgd)
18143 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18144 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18145
18146+#ifdef CONFIG_X86_32
18147+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18148+#else
18149+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18150+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18151+
18152+#ifdef CONFIG_PAX_MEMORY_UDEREF
18153+#ifdef __ASSEMBLY__
18154+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18155+#else
18156+extern unsigned long pax_user_shadow_base;
18157+extern pgdval_t clone_pgd_mask;
18158+#endif
18159+#endif
18160+
18161+#endif
18162+
18163 #ifndef __ASSEMBLY__
18164
18165 extern int direct_gbpages;
18166@@ -847,11 +946,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18167 * dst and src can be on the same page, but the range must not overlap,
18168 * and must not cross a page boundary.
18169 */
18170-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18171+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18172 {
18173- memcpy(dst, src, count * sizeof(pgd_t));
18174+ pax_open_kernel();
18175+ while (count--)
18176+ *dst++ = *src++;
18177+ pax_close_kernel();
18178 }
18179
18180+#ifdef CONFIG_PAX_PER_CPU_PGD
18181+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18182+#endif
18183+
18184+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18185+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18186+#else
18187+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18188+#endif
18189+
18190 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18191 static inline int page_level_shift(enum pg_level level)
18192 {
18193diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18194index b6c0b40..3535d47 100644
18195--- a/arch/x86/include/asm/pgtable_32.h
18196+++ b/arch/x86/include/asm/pgtable_32.h
18197@@ -25,9 +25,6 @@
18198 struct mm_struct;
18199 struct vm_area_struct;
18200
18201-extern pgd_t swapper_pg_dir[1024];
18202-extern pgd_t initial_page_table[1024];
18203-
18204 static inline void pgtable_cache_init(void) { }
18205 static inline void check_pgt_cache(void) { }
18206 void paging_init(void);
18207@@ -45,6 +42,12 @@ void paging_init(void);
18208 # include <asm/pgtable-2level.h>
18209 #endif
18210
18211+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18212+extern pgd_t initial_page_table[PTRS_PER_PGD];
18213+#ifdef CONFIG_X86_PAE
18214+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18215+#endif
18216+
18217 #if defined(CONFIG_HIGHPTE)
18218 #define pte_offset_map(dir, address) \
18219 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18220@@ -59,12 +62,17 @@ void paging_init(void);
18221 /* Clear a kernel PTE and flush it from the TLB */
18222 #define kpte_clear_flush(ptep, vaddr) \
18223 do { \
18224+ pax_open_kernel(); \
18225 pte_clear(&init_mm, (vaddr), (ptep)); \
18226+ pax_close_kernel(); \
18227 __flush_tlb_one((vaddr)); \
18228 } while (0)
18229
18230 #endif /* !__ASSEMBLY__ */
18231
18232+#define HAVE_ARCH_UNMAPPED_AREA
18233+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18234+
18235 /*
18236 * kern_addr_valid() is (1) for FLATMEM and (0) for
18237 * SPARSEMEM and DISCONTIGMEM
18238diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18239index 9fb2f2b..b04b4bf 100644
18240--- a/arch/x86/include/asm/pgtable_32_types.h
18241+++ b/arch/x86/include/asm/pgtable_32_types.h
18242@@ -8,7 +8,7 @@
18243 */
18244 #ifdef CONFIG_X86_PAE
18245 # include <asm/pgtable-3level_types.h>
18246-# define PMD_SIZE (1UL << PMD_SHIFT)
18247+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18248 # define PMD_MASK (~(PMD_SIZE - 1))
18249 #else
18250 # include <asm/pgtable-2level_types.h>
18251@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18252 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18253 #endif
18254
18255+#ifdef CONFIG_PAX_KERNEXEC
18256+#ifndef __ASSEMBLY__
18257+extern unsigned char MODULES_EXEC_VADDR[];
18258+extern unsigned char MODULES_EXEC_END[];
18259+#endif
18260+#include <asm/boot.h>
18261+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18262+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18263+#else
18264+#define ktla_ktva(addr) (addr)
18265+#define ktva_ktla(addr) (addr)
18266+#endif
18267+
18268 #define MODULES_VADDR VMALLOC_START
18269 #define MODULES_END VMALLOC_END
18270 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18271diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18272index 4572b2f..4430113 100644
18273--- a/arch/x86/include/asm/pgtable_64.h
18274+++ b/arch/x86/include/asm/pgtable_64.h
18275@@ -16,11 +16,16 @@
18276
18277 extern pud_t level3_kernel_pgt[512];
18278 extern pud_t level3_ident_pgt[512];
18279+extern pud_t level3_vmalloc_start_pgt[512];
18280+extern pud_t level3_vmalloc_end_pgt[512];
18281+extern pud_t level3_vmemmap_pgt[512];
18282+extern pud_t level2_vmemmap_pgt[512];
18283 extern pmd_t level2_kernel_pgt[512];
18284 extern pmd_t level2_fixmap_pgt[512];
18285-extern pmd_t level2_ident_pgt[512];
18286+extern pmd_t level2_ident_pgt[512*2];
18287 extern pte_t level1_fixmap_pgt[512];
18288-extern pgd_t init_level4_pgt[];
18289+extern pte_t level1_vsyscall_pgt[512];
18290+extern pgd_t init_level4_pgt[512];
18291
18292 #define swapper_pg_dir init_level4_pgt
18293
18294@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18295
18296 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18297 {
18298+ pax_open_kernel();
18299 *pmdp = pmd;
18300+ pax_close_kernel();
18301 }
18302
18303 static inline void native_pmd_clear(pmd_t *pmd)
18304@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18305
18306 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18307 {
18308+ pax_open_kernel();
18309 *pudp = pud;
18310+ pax_close_kernel();
18311 }
18312
18313 static inline void native_pud_clear(pud_t *pud)
18314@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18315
18316 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18317 {
18318+ pax_open_kernel();
18319+ *pgdp = pgd;
18320+ pax_close_kernel();
18321+}
18322+
18323+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18324+{
18325 *pgdp = pgd;
18326 }
18327
18328diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18329index 602b602..acb53ed 100644
18330--- a/arch/x86/include/asm/pgtable_64_types.h
18331+++ b/arch/x86/include/asm/pgtable_64_types.h
18332@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18333 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18334 #define MODULES_END _AC(0xffffffffff000000, UL)
18335 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18336+#define MODULES_EXEC_VADDR MODULES_VADDR
18337+#define MODULES_EXEC_END MODULES_END
18338 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18339 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18340 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18341 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18342
18343+#define ktla_ktva(addr) (addr)
18344+#define ktva_ktla(addr) (addr)
18345+
18346 #define EARLY_DYNAMIC_PAGE_TABLES 64
18347
18348 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18349diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18350index 25bcd4a..bf3f815 100644
18351--- a/arch/x86/include/asm/pgtable_types.h
18352+++ b/arch/x86/include/asm/pgtable_types.h
18353@@ -110,8 +110,10 @@
18354
18355 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18356 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18357-#else
18358+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18359 #define _PAGE_NX (_AT(pteval_t, 0))
18360+#else
18361+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18362 #endif
18363
18364 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
18365@@ -167,6 +169,9 @@ enum page_cache_mode {
18366 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18367 _PAGE_ACCESSED)
18368
18369+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18370+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18371+
18372 #define __PAGE_KERNEL_EXEC \
18373 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18374 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18375@@ -174,7 +179,7 @@ enum page_cache_mode {
18376 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18377 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18378 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18379-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18380+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18381 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18382 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18383 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18384@@ -220,7 +225,7 @@ enum page_cache_mode {
18385 #ifdef CONFIG_X86_64
18386 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18387 #else
18388-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18389+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18390 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18391 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18392 #endif
18393@@ -259,7 +264,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18394 {
18395 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18396 }
18397+#endif
18398
18399+#if PAGETABLE_LEVELS == 3
18400+#include <asm-generic/pgtable-nopud.h>
18401+#endif
18402+
18403+#if PAGETABLE_LEVELS == 2
18404+#include <asm-generic/pgtable-nopmd.h>
18405+#endif
18406+
18407+#ifndef __ASSEMBLY__
18408 #if PAGETABLE_LEVELS > 3
18409 typedef struct { pudval_t pud; } pud_t;
18410
18411@@ -273,8 +288,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18412 return pud.pud;
18413 }
18414 #else
18415-#include <asm-generic/pgtable-nopud.h>
18416-
18417 static inline pudval_t native_pud_val(pud_t pud)
18418 {
18419 return native_pgd_val(pud.pgd);
18420@@ -294,8 +307,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18421 return pmd.pmd;
18422 }
18423 #else
18424-#include <asm-generic/pgtable-nopmd.h>
18425-
18426 static inline pmdval_t native_pmd_val(pmd_t pmd)
18427 {
18428 return native_pgd_val(pmd.pud.pgd);
18429@@ -402,7 +413,6 @@ typedef struct page *pgtable_t;
18430
18431 extern pteval_t __supported_pte_mask;
18432 extern void set_nx(void);
18433-extern int nx_enabled;
18434
18435 #define pgprot_writecombine pgprot_writecombine
18436 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18437diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18438index 8f327184..368fb29 100644
18439--- a/arch/x86/include/asm/preempt.h
18440+++ b/arch/x86/include/asm/preempt.h
18441@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18442 */
18443 static __always_inline bool __preempt_count_dec_and_test(void)
18444 {
18445- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18446+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18447 }
18448
18449 /*
18450diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18451index a092a0c..8e9640b 100644
18452--- a/arch/x86/include/asm/processor.h
18453+++ b/arch/x86/include/asm/processor.h
18454@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18455 /* Index into per_cpu list: */
18456 u16 cpu_index;
18457 u32 microcode;
18458-};
18459+} __randomize_layout;
18460
18461 #define X86_VENDOR_INTEL 0
18462 #define X86_VENDOR_CYRIX 1
18463@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18464 : "memory");
18465 }
18466
18467+/* invpcid (%rdx),%rax */
18468+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18469+
18470+#define INVPCID_SINGLE_ADDRESS 0UL
18471+#define INVPCID_SINGLE_CONTEXT 1UL
18472+#define INVPCID_ALL_GLOBAL 2UL
18473+#define INVPCID_ALL_NONGLOBAL 3UL
18474+
18475+#define PCID_KERNEL 0UL
18476+#define PCID_USER 1UL
18477+#define PCID_NOFLUSH (1UL << 63)
18478+
18479 static inline void load_cr3(pgd_t *pgdir)
18480 {
18481- write_cr3(__pa(pgdir));
18482+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18483 }
18484
18485 #ifdef CONFIG_X86_32
18486@@ -282,7 +294,7 @@ struct tss_struct {
18487
18488 } ____cacheline_aligned;
18489
18490-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18491+extern struct tss_struct init_tss[NR_CPUS];
18492
18493 /*
18494 * Save the original ist values for checking stack pointers during debugging
18495@@ -479,6 +491,7 @@ struct thread_struct {
18496 unsigned short ds;
18497 unsigned short fsindex;
18498 unsigned short gsindex;
18499+ unsigned short ss;
18500 #endif
18501 #ifdef CONFIG_X86_32
18502 unsigned long ip;
18503@@ -588,29 +601,8 @@ static inline void load_sp0(struct tss_struct *tss,
18504 extern unsigned long mmu_cr4_features;
18505 extern u32 *trampoline_cr4_features;
18506
18507-static inline void set_in_cr4(unsigned long mask)
18508-{
18509- unsigned long cr4;
18510-
18511- mmu_cr4_features |= mask;
18512- if (trampoline_cr4_features)
18513- *trampoline_cr4_features = mmu_cr4_features;
18514- cr4 = read_cr4();
18515- cr4 |= mask;
18516- write_cr4(cr4);
18517-}
18518-
18519-static inline void clear_in_cr4(unsigned long mask)
18520-{
18521- unsigned long cr4;
18522-
18523- mmu_cr4_features &= ~mask;
18524- if (trampoline_cr4_features)
18525- *trampoline_cr4_features = mmu_cr4_features;
18526- cr4 = read_cr4();
18527- cr4 &= ~mask;
18528- write_cr4(cr4);
18529-}
18530+extern void set_in_cr4(unsigned long mask);
18531+extern void clear_in_cr4(unsigned long mask);
18532
18533 typedef struct {
18534 unsigned long seg;
18535@@ -838,11 +830,18 @@ static inline void spin_lock_prefetch(const void *x)
18536 */
18537 #define TASK_SIZE PAGE_OFFSET
18538 #define TASK_SIZE_MAX TASK_SIZE
18539+
18540+#ifdef CONFIG_PAX_SEGMEXEC
18541+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18542+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18543+#else
18544 #define STACK_TOP TASK_SIZE
18545-#define STACK_TOP_MAX STACK_TOP
18546+#endif
18547+
18548+#define STACK_TOP_MAX TASK_SIZE
18549
18550 #define INIT_THREAD { \
18551- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18552+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18553 .vm86_info = NULL, \
18554 .sysenter_cs = __KERNEL_CS, \
18555 .io_bitmap_ptr = NULL, \
18556@@ -856,7 +855,7 @@ static inline void spin_lock_prefetch(const void *x)
18557 */
18558 #define INIT_TSS { \
18559 .x86_tss = { \
18560- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18561+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18562 .ss0 = __KERNEL_DS, \
18563 .ss1 = __KERNEL_CS, \
18564 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18565@@ -867,11 +866,7 @@ static inline void spin_lock_prefetch(const void *x)
18566 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18567
18568 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18569-#define KSTK_TOP(info) \
18570-({ \
18571- unsigned long *__ptr = (unsigned long *)(info); \
18572- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18573-})
18574+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18575
18576 /*
18577 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18578@@ -886,7 +881,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18579 #define task_pt_regs(task) \
18580 ({ \
18581 struct pt_regs *__regs__; \
18582- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18583+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18584 __regs__ - 1; \
18585 })
18586
18587@@ -902,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18588 * particular problem by preventing anything from being mapped
18589 * at the maximum canonical address.
18590 */
18591-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18592+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18593
18594 /* This decides where the kernel will search for a free chunk of vm
18595 * space during mmap's.
18596 */
18597 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18598- 0xc0000000 : 0xFFFFe000)
18599+ 0xc0000000 : 0xFFFFf000)
18600
18601 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18602 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18603@@ -919,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18604 #define STACK_TOP_MAX TASK_SIZE_MAX
18605
18606 #define INIT_THREAD { \
18607- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18608+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18609 }
18610
18611 #define INIT_TSS { \
18612- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18613+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18614 }
18615
18616 /*
18617@@ -951,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18618 */
18619 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18620
18621+#ifdef CONFIG_PAX_SEGMEXEC
18622+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18623+#endif
18624+
18625 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18626
18627 /* Get/set a process' ability to use the timestamp counter instruction */
18628@@ -995,7 +994,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18629 return 0;
18630 }
18631
18632-extern unsigned long arch_align_stack(unsigned long sp);
18633+#define arch_align_stack(x) ((x) & ~0xfUL)
18634 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18635
18636 void default_idle(void);
18637@@ -1005,6 +1004,6 @@ bool xen_set_default_idle(void);
18638 #define xen_set_default_idle 0
18639 #endif
18640
18641-void stop_this_cpu(void *dummy);
18642+void stop_this_cpu(void *dummy) __noreturn;
18643 void df_debug(struct pt_regs *regs, long error_code);
18644 #endif /* _ASM_X86_PROCESSOR_H */
18645diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18646index 86fc2bb..bd5049a 100644
18647--- a/arch/x86/include/asm/ptrace.h
18648+++ b/arch/x86/include/asm/ptrace.h
18649@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18650 }
18651
18652 /*
18653- * user_mode_vm(regs) determines whether a register set came from user mode.
18654+ * user_mode(regs) determines whether a register set came from user mode.
18655 * This is true if V8086 mode was enabled OR if the register set was from
18656 * protected mode with RPL-3 CS value. This tricky test checks that with
18657 * one comparison. Many places in the kernel can bypass this full check
18658- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18659+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18660+ * be used.
18661 */
18662-static inline int user_mode(struct pt_regs *regs)
18663+static inline int user_mode_novm(struct pt_regs *regs)
18664 {
18665 #ifdef CONFIG_X86_32
18666 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18667 #else
18668- return !!(regs->cs & 3);
18669+ return !!(regs->cs & SEGMENT_RPL_MASK);
18670 #endif
18671 }
18672
18673-static inline int user_mode_vm(struct pt_regs *regs)
18674+static inline int user_mode(struct pt_regs *regs)
18675 {
18676 #ifdef CONFIG_X86_32
18677 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18678 USER_RPL;
18679 #else
18680- return user_mode(regs);
18681+ return user_mode_novm(regs);
18682 #endif
18683 }
18684
18685@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18686 #ifdef CONFIG_X86_64
18687 static inline bool user_64bit_mode(struct pt_regs *regs)
18688 {
18689+ unsigned long cs = regs->cs & 0xffff;
18690 #ifndef CONFIG_PARAVIRT
18691 /*
18692 * On non-paravirt systems, this is the only long mode CPL 3
18693 * selector. We do not allow long mode selectors in the LDT.
18694 */
18695- return regs->cs == __USER_CS;
18696+ return cs == __USER_CS;
18697 #else
18698 /* Headers are too twisted for this to go in paravirt.h. */
18699- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18700+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18701 #endif
18702 }
18703
18704@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18705 * Traps from the kernel do not save sp and ss.
18706 * Use the helper function to retrieve sp.
18707 */
18708- if (offset == offsetof(struct pt_regs, sp) &&
18709- regs->cs == __KERNEL_CS)
18710- return kernel_stack_pointer(regs);
18711+ if (offset == offsetof(struct pt_regs, sp)) {
18712+ unsigned long cs = regs->cs & 0xffff;
18713+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18714+ return kernel_stack_pointer(regs);
18715+ }
18716 #endif
18717 return *(unsigned long *)((unsigned long)regs + offset);
18718 }
18719diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18720index ae0e241..e80b10b 100644
18721--- a/arch/x86/include/asm/qrwlock.h
18722+++ b/arch/x86/include/asm/qrwlock.h
18723@@ -7,8 +7,8 @@
18724 #define queue_write_unlock queue_write_unlock
18725 static inline void queue_write_unlock(struct qrwlock *lock)
18726 {
18727- barrier();
18728- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18729+ barrier();
18730+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18731 }
18732 #endif
18733
18734diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18735index 9c6b890..5305f53 100644
18736--- a/arch/x86/include/asm/realmode.h
18737+++ b/arch/x86/include/asm/realmode.h
18738@@ -22,16 +22,14 @@ struct real_mode_header {
18739 #endif
18740 /* APM/BIOS reboot */
18741 u32 machine_real_restart_asm;
18742-#ifdef CONFIG_X86_64
18743 u32 machine_real_restart_seg;
18744-#endif
18745 };
18746
18747 /* This must match data at trampoline_32/64.S */
18748 struct trampoline_header {
18749 #ifdef CONFIG_X86_32
18750 u32 start;
18751- u16 gdt_pad;
18752+ u16 boot_cs;
18753 u16 gdt_limit;
18754 u32 gdt_base;
18755 #else
18756diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18757index a82c4f1..ac45053 100644
18758--- a/arch/x86/include/asm/reboot.h
18759+++ b/arch/x86/include/asm/reboot.h
18760@@ -6,13 +6,13 @@
18761 struct pt_regs;
18762
18763 struct machine_ops {
18764- void (*restart)(char *cmd);
18765- void (*halt)(void);
18766- void (*power_off)(void);
18767+ void (* __noreturn restart)(char *cmd);
18768+ void (* __noreturn halt)(void);
18769+ void (* __noreturn power_off)(void);
18770 void (*shutdown)(void);
18771 void (*crash_shutdown)(struct pt_regs *);
18772- void (*emergency_restart)(void);
18773-};
18774+ void (* __noreturn emergency_restart)(void);
18775+} __no_const;
18776
18777 extern struct machine_ops machine_ops;
18778
18779diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18780index 8f7866a..e442f20 100644
18781--- a/arch/x86/include/asm/rmwcc.h
18782+++ b/arch/x86/include/asm/rmwcc.h
18783@@ -3,7 +3,34 @@
18784
18785 #ifdef CC_HAVE_ASM_GOTO
18786
18787-#define __GEN_RMWcc(fullop, var, cc, ...) \
18788+#ifdef CONFIG_PAX_REFCOUNT
18789+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18790+do { \
18791+ asm_volatile_goto (fullop \
18792+ ";jno 0f\n" \
18793+ fullantiop \
18794+ ";int $4\n0:\n" \
18795+ _ASM_EXTABLE(0b, 0b) \
18796+ ";j" cc " %l[cc_label]" \
18797+ : : "m" (var), ## __VA_ARGS__ \
18798+ : "memory" : cc_label); \
18799+ return 0; \
18800+cc_label: \
18801+ return 1; \
18802+} while (0)
18803+#else
18804+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18805+do { \
18806+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18807+ : : "m" (var), ## __VA_ARGS__ \
18808+ : "memory" : cc_label); \
18809+ return 0; \
18810+cc_label: \
18811+ return 1; \
18812+} while (0)
18813+#endif
18814+
18815+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18816 do { \
18817 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18818 : : "m" (var), ## __VA_ARGS__ \
18819@@ -13,15 +40,46 @@ cc_label: \
18820 return 1; \
18821 } while (0)
18822
18823-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18824- __GEN_RMWcc(op " " arg0, var, cc)
18825+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18826+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18827
18828-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18829- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18830+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18831+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18832+
18833+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18834+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18835+
18836+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18837+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18838
18839 #else /* !CC_HAVE_ASM_GOTO */
18840
18841-#define __GEN_RMWcc(fullop, var, cc, ...) \
18842+#ifdef CONFIG_PAX_REFCOUNT
18843+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18844+do { \
18845+ char c; \
18846+ asm volatile (fullop \
18847+ ";jno 0f\n" \
18848+ fullantiop \
18849+ ";int $4\n0:\n" \
18850+ _ASM_EXTABLE(0b, 0b) \
18851+ "; set" cc " %1" \
18852+ : "+m" (var), "=qm" (c) \
18853+ : __VA_ARGS__ : "memory"); \
18854+ return c != 0; \
18855+} while (0)
18856+#else
18857+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18858+do { \
18859+ char c; \
18860+ asm volatile (fullop "; set" cc " %1" \
18861+ : "+m" (var), "=qm" (c) \
18862+ : __VA_ARGS__ : "memory"); \
18863+ return c != 0; \
18864+} while (0)
18865+#endif
18866+
18867+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18868 do { \
18869 char c; \
18870 asm volatile (fullop "; set" cc " %1" \
18871@@ -30,11 +88,17 @@ do { \
18872 return c != 0; \
18873 } while (0)
18874
18875-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18876- __GEN_RMWcc(op " " arg0, var, cc)
18877+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18878+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18879+
18880+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18881+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18882+
18883+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18884+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18885
18886-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18887- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18888+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18889+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18890
18891 #endif /* CC_HAVE_ASM_GOTO */
18892
18893diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18894index cad82c9..2e5c5c1 100644
18895--- a/arch/x86/include/asm/rwsem.h
18896+++ b/arch/x86/include/asm/rwsem.h
18897@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18898 {
18899 asm volatile("# beginning down_read\n\t"
18900 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18901+
18902+#ifdef CONFIG_PAX_REFCOUNT
18903+ "jno 0f\n"
18904+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18905+ "int $4\n0:\n"
18906+ _ASM_EXTABLE(0b, 0b)
18907+#endif
18908+
18909 /* adds 0x00000001 */
18910 " jns 1f\n"
18911 " call call_rwsem_down_read_failed\n"
18912@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18913 "1:\n\t"
18914 " mov %1,%2\n\t"
18915 " add %3,%2\n\t"
18916+
18917+#ifdef CONFIG_PAX_REFCOUNT
18918+ "jno 0f\n"
18919+ "sub %3,%2\n"
18920+ "int $4\n0:\n"
18921+ _ASM_EXTABLE(0b, 0b)
18922+#endif
18923+
18924 " jle 2f\n\t"
18925 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18926 " jnz 1b\n\t"
18927@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18928 long tmp;
18929 asm volatile("# beginning down_write\n\t"
18930 LOCK_PREFIX " xadd %1,(%2)\n\t"
18931+
18932+#ifdef CONFIG_PAX_REFCOUNT
18933+ "jno 0f\n"
18934+ "mov %1,(%2)\n"
18935+ "int $4\n0:\n"
18936+ _ASM_EXTABLE(0b, 0b)
18937+#endif
18938+
18939 /* adds 0xffff0001, returns the old value */
18940 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18941 /* was the active mask 0 before? */
18942@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18943 long tmp;
18944 asm volatile("# beginning __up_read\n\t"
18945 LOCK_PREFIX " xadd %1,(%2)\n\t"
18946+
18947+#ifdef CONFIG_PAX_REFCOUNT
18948+ "jno 0f\n"
18949+ "mov %1,(%2)\n"
18950+ "int $4\n0:\n"
18951+ _ASM_EXTABLE(0b, 0b)
18952+#endif
18953+
18954 /* subtracts 1, returns the old value */
18955 " jns 1f\n\t"
18956 " call call_rwsem_wake\n" /* expects old value in %edx */
18957@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18958 long tmp;
18959 asm volatile("# beginning __up_write\n\t"
18960 LOCK_PREFIX " xadd %1,(%2)\n\t"
18961+
18962+#ifdef CONFIG_PAX_REFCOUNT
18963+ "jno 0f\n"
18964+ "mov %1,(%2)\n"
18965+ "int $4\n0:\n"
18966+ _ASM_EXTABLE(0b, 0b)
18967+#endif
18968+
18969 /* subtracts 0xffff0001, returns the old value */
18970 " jns 1f\n\t"
18971 " call call_rwsem_wake\n" /* expects old value in %edx */
18972@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18973 {
18974 asm volatile("# beginning __downgrade_write\n\t"
18975 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18976+
18977+#ifdef CONFIG_PAX_REFCOUNT
18978+ "jno 0f\n"
18979+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18980+ "int $4\n0:\n"
18981+ _ASM_EXTABLE(0b, 0b)
18982+#endif
18983+
18984 /*
18985 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18986 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18987@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18988 */
18989 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18990 {
18991- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18992+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18993+
18994+#ifdef CONFIG_PAX_REFCOUNT
18995+ "jno 0f\n"
18996+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18997+ "int $4\n0:\n"
18998+ _ASM_EXTABLE(0b, 0b)
18999+#endif
19000+
19001 : "+m" (sem->count)
19002 : "er" (delta));
19003 }
19004@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19005 */
19006 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19007 {
19008- return delta + xadd(&sem->count, delta);
19009+ return delta + xadd_check_overflow(&sem->count, delta);
19010 }
19011
19012 #endif /* __KERNEL__ */
19013diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19014index db257a5..b91bc77 100644
19015--- a/arch/x86/include/asm/segment.h
19016+++ b/arch/x86/include/asm/segment.h
19017@@ -73,10 +73,15 @@
19018 * 26 - ESPFIX small SS
19019 * 27 - per-cpu [ offset to per-cpu data area ]
19020 * 28 - stack_canary-20 [ for stack protector ]
19021- * 29 - unused
19022- * 30 - unused
19023+ * 29 - PCI BIOS CS
19024+ * 30 - PCI BIOS DS
19025 * 31 - TSS for double fault handler
19026 */
19027+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19028+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19029+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19030+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19031+
19032 #define GDT_ENTRY_TLS_MIN 6
19033 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19034
19035@@ -88,6 +93,8 @@
19036
19037 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19038
19039+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19040+
19041 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19042
19043 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19044@@ -113,6 +120,12 @@
19045 #define __KERNEL_STACK_CANARY 0
19046 #endif
19047
19048+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19049+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19050+
19051+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19052+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19053+
19054 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19055
19056 /*
19057@@ -140,7 +153,7 @@
19058 */
19059
19060 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19061-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19062+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19063
19064
19065 #else
19066@@ -164,6 +177,8 @@
19067 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19068 #define __USER32_DS __USER_DS
19069
19070+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19071+
19072 #define GDT_ENTRY_TSS 8 /* needs two entries */
19073 #define GDT_ENTRY_LDT 10 /* needs two entries */
19074 #define GDT_ENTRY_TLS_MIN 12
19075@@ -172,6 +187,8 @@
19076 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19077 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19078
19079+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19080+
19081 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19082 #define FS_TLS 0
19083 #define GS_TLS 1
19084@@ -179,12 +196,14 @@
19085 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19086 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19087
19088-#define GDT_ENTRIES 16
19089+#define GDT_ENTRIES 17
19090
19091 #endif
19092
19093 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19094+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19095 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19096+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19097 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19098 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19099 #ifndef CONFIG_PARAVIRT
19100@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
19101 {
19102 unsigned long __limit;
19103 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19104- return __limit + 1;
19105+ return __limit;
19106 }
19107
19108 #endif /* !__ASSEMBLY__ */
19109diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19110index 8d3120f..352b440 100644
19111--- a/arch/x86/include/asm/smap.h
19112+++ b/arch/x86/include/asm/smap.h
19113@@ -25,11 +25,40 @@
19114
19115 #include <asm/alternative-asm.h>
19116
19117+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19118+#define ASM_PAX_OPEN_USERLAND \
19119+ 661: jmp 663f; \
19120+ .pushsection .altinstr_replacement, "a" ; \
19121+ 662: pushq %rax; nop; \
19122+ .popsection ; \
19123+ .pushsection .altinstructions, "a" ; \
19124+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19125+ .popsection ; \
19126+ call __pax_open_userland; \
19127+ popq %rax; \
19128+ 663:
19129+
19130+#define ASM_PAX_CLOSE_USERLAND \
19131+ 661: jmp 663f; \
19132+ .pushsection .altinstr_replacement, "a" ; \
19133+ 662: pushq %rax; nop; \
19134+ .popsection; \
19135+ .pushsection .altinstructions, "a" ; \
19136+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19137+ .popsection; \
19138+ call __pax_close_userland; \
19139+ popq %rax; \
19140+ 663:
19141+#else
19142+#define ASM_PAX_OPEN_USERLAND
19143+#define ASM_PAX_CLOSE_USERLAND
19144+#endif
19145+
19146 #ifdef CONFIG_X86_SMAP
19147
19148 #define ASM_CLAC \
19149 661: ASM_NOP3 ; \
19150- .pushsection .altinstr_replacement, "ax" ; \
19151+ .pushsection .altinstr_replacement, "a" ; \
19152 662: __ASM_CLAC ; \
19153 .popsection ; \
19154 .pushsection .altinstructions, "a" ; \
19155@@ -38,7 +67,7 @@
19156
19157 #define ASM_STAC \
19158 661: ASM_NOP3 ; \
19159- .pushsection .altinstr_replacement, "ax" ; \
19160+ .pushsection .altinstr_replacement, "a" ; \
19161 662: __ASM_STAC ; \
19162 .popsection ; \
19163 .pushsection .altinstructions, "a" ; \
19164@@ -56,6 +85,37 @@
19165
19166 #include <asm/alternative.h>
19167
19168+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19169+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19170+
19171+extern void __pax_open_userland(void);
19172+static __always_inline unsigned long pax_open_userland(void)
19173+{
19174+
19175+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19176+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19177+ :
19178+ : [open] "i" (__pax_open_userland)
19179+ : "memory", "rax");
19180+#endif
19181+
19182+ return 0;
19183+}
19184+
19185+extern void __pax_close_userland(void);
19186+static __always_inline unsigned long pax_close_userland(void)
19187+{
19188+
19189+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19190+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19191+ :
19192+ : [close] "i" (__pax_close_userland)
19193+ : "memory", "rax");
19194+#endif
19195+
19196+ return 0;
19197+}
19198+
19199 #ifdef CONFIG_X86_SMAP
19200
19201 static __always_inline void clac(void)
19202diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19203index 8cd1cc3..827e09e 100644
19204--- a/arch/x86/include/asm/smp.h
19205+++ b/arch/x86/include/asm/smp.h
19206@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19207 /* cpus sharing the last level cache: */
19208 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19209 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19210-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19211+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19212
19213 static inline struct cpumask *cpu_sibling_mask(int cpu)
19214 {
19215@@ -78,7 +78,7 @@ struct smp_ops {
19216
19217 void (*send_call_func_ipi)(const struct cpumask *mask);
19218 void (*send_call_func_single_ipi)(int cpu);
19219-};
19220+} __no_const;
19221
19222 /* Globals due to paravirt */
19223 extern void set_cpu_sibling_map(int cpu);
19224@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19225 extern int safe_smp_processor_id(void);
19226
19227 #elif defined(CONFIG_X86_64_SMP)
19228-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19229-
19230-#define stack_smp_processor_id() \
19231-({ \
19232- struct thread_info *ti; \
19233- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19234- ti->cpu; \
19235-})
19236+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19237+#define stack_smp_processor_id() raw_smp_processor_id()
19238 #define safe_smp_processor_id() smp_processor_id()
19239
19240 #endif
19241diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19242index 6a99859..03cb807 100644
19243--- a/arch/x86/include/asm/stackprotector.h
19244+++ b/arch/x86/include/asm/stackprotector.h
19245@@ -47,7 +47,7 @@
19246 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19247 */
19248 #define GDT_STACK_CANARY_INIT \
19249- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19250+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19251
19252 /*
19253 * Initialize the stackprotector canary value.
19254@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19255
19256 static inline void load_stack_canary_segment(void)
19257 {
19258-#ifdef CONFIG_X86_32
19259+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19260 asm volatile ("mov %0, %%gs" : : "r" (0));
19261 #endif
19262 }
19263diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19264index 70bbe39..4ae2bd4 100644
19265--- a/arch/x86/include/asm/stacktrace.h
19266+++ b/arch/x86/include/asm/stacktrace.h
19267@@ -11,28 +11,20 @@
19268
19269 extern int kstack_depth_to_print;
19270
19271-struct thread_info;
19272+struct task_struct;
19273 struct stacktrace_ops;
19274
19275-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19276- unsigned long *stack,
19277- unsigned long bp,
19278- const struct stacktrace_ops *ops,
19279- void *data,
19280- unsigned long *end,
19281- int *graph);
19282+typedef unsigned long walk_stack_t(struct task_struct *task,
19283+ void *stack_start,
19284+ unsigned long *stack,
19285+ unsigned long bp,
19286+ const struct stacktrace_ops *ops,
19287+ void *data,
19288+ unsigned long *end,
19289+ int *graph);
19290
19291-extern unsigned long
19292-print_context_stack(struct thread_info *tinfo,
19293- unsigned long *stack, unsigned long bp,
19294- const struct stacktrace_ops *ops, void *data,
19295- unsigned long *end, int *graph);
19296-
19297-extern unsigned long
19298-print_context_stack_bp(struct thread_info *tinfo,
19299- unsigned long *stack, unsigned long bp,
19300- const struct stacktrace_ops *ops, void *data,
19301- unsigned long *end, int *graph);
19302+extern walk_stack_t print_context_stack;
19303+extern walk_stack_t print_context_stack_bp;
19304
19305 /* Generic stack tracer with callbacks */
19306
19307@@ -40,7 +32,7 @@ struct stacktrace_ops {
19308 void (*address)(void *data, unsigned long address, int reliable);
19309 /* On negative return stop dumping */
19310 int (*stack)(void *data, char *name);
19311- walk_stack_t walk_stack;
19312+ walk_stack_t *walk_stack;
19313 };
19314
19315 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19316diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19317index 751bf4b..a1278b5 100644
19318--- a/arch/x86/include/asm/switch_to.h
19319+++ b/arch/x86/include/asm/switch_to.h
19320@@ -112,7 +112,7 @@ do { \
19321 "call __switch_to\n\t" \
19322 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19323 __switch_canary \
19324- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19325+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19326 "movq %%rax,%%rdi\n\t" \
19327 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19328 "jnz ret_from_fork\n\t" \
19329@@ -123,7 +123,7 @@ do { \
19330 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19331 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19332 [_tif_fork] "i" (_TIF_FORK), \
19333- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19334+ [thread_info] "m" (current_tinfo), \
19335 [current_task] "m" (current_task) \
19336 __switch_canary_iparam \
19337 : "memory", "cc" __EXTRA_CLOBBER)
19338diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19339index 547e344..6be1175 100644
19340--- a/arch/x86/include/asm/thread_info.h
19341+++ b/arch/x86/include/asm/thread_info.h
19342@@ -24,7 +24,6 @@ struct exec_domain;
19343 #include <linux/atomic.h>
19344
19345 struct thread_info {
19346- struct task_struct *task; /* main task structure */
19347 struct exec_domain *exec_domain; /* execution domain */
19348 __u32 flags; /* low level flags */
19349 __u32 status; /* thread synchronous flags */
19350@@ -33,13 +32,13 @@ struct thread_info {
19351 mm_segment_t addr_limit;
19352 struct restart_block restart_block;
19353 void __user *sysenter_return;
19354+ unsigned long lowest_stack;
19355 unsigned int sig_on_uaccess_error:1;
19356 unsigned int uaccess_err:1; /* uaccess failed */
19357 };
19358
19359-#define INIT_THREAD_INFO(tsk) \
19360+#define INIT_THREAD_INFO \
19361 { \
19362- .task = &tsk, \
19363 .exec_domain = &default_exec_domain, \
19364 .flags = 0, \
19365 .cpu = 0, \
19366@@ -50,7 +49,7 @@ struct thread_info {
19367 }, \
19368 }
19369
19370-#define init_thread_info (init_thread_union.thread_info)
19371+#define init_thread_info (init_thread_union.stack)
19372 #define init_stack (init_thread_union.stack)
19373
19374 #else /* !__ASSEMBLY__ */
19375@@ -91,6 +90,7 @@ struct thread_info {
19376 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19377 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19378 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19379+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19380
19381 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19382 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19383@@ -115,17 +115,18 @@ struct thread_info {
19384 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19385 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19386 #define _TIF_X32 (1 << TIF_X32)
19387+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19388
19389 /* work to do in syscall_trace_enter() */
19390 #define _TIF_WORK_SYSCALL_ENTRY \
19391 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19392 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19393- _TIF_NOHZ)
19394+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19395
19396 /* work to do in syscall_trace_leave() */
19397 #define _TIF_WORK_SYSCALL_EXIT \
19398 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19399- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19400+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19401
19402 /* work to do on interrupt/exception return */
19403 #define _TIF_WORK_MASK \
19404@@ -136,7 +137,7 @@ struct thread_info {
19405 /* work to do on any return to user space */
19406 #define _TIF_ALLWORK_MASK \
19407 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19408- _TIF_NOHZ)
19409+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19410
19411 /* Only used for 64 bit */
19412 #define _TIF_DO_NOTIFY_MASK \
19413@@ -151,7 +152,6 @@ struct thread_info {
19414 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19415
19416 #define STACK_WARN (THREAD_SIZE/8)
19417-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19418
19419 /*
19420 * macros/functions for gaining access to the thread information structure
19421@@ -162,26 +162,18 @@ struct thread_info {
19422
19423 DECLARE_PER_CPU(unsigned long, kernel_stack);
19424
19425+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19426+
19427 static inline struct thread_info *current_thread_info(void)
19428 {
19429- struct thread_info *ti;
19430- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19431- KERNEL_STACK_OFFSET - THREAD_SIZE);
19432- return ti;
19433+ return this_cpu_read_stable(current_tinfo);
19434 }
19435
19436 #else /* !__ASSEMBLY__ */
19437
19438 /* how to get the thread information struct from ASM */
19439 #define GET_THREAD_INFO(reg) \
19440- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19441- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19442-
19443-/*
19444- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19445- * a certain register (to be used in assembler memory operands).
19446- */
19447-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19448+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19449
19450 #endif
19451
19452@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
19453 extern void arch_task_cache_init(void);
19454 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19455 extern void arch_release_task_struct(struct task_struct *tsk);
19456+
19457+#define __HAVE_THREAD_FUNCTIONS
19458+#define task_thread_info(task) (&(task)->tinfo)
19459+#define task_stack_page(task) ((task)->stack)
19460+#define setup_thread_stack(p, org) do {} while (0)
19461+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19462+
19463 #endif
19464 #endif /* _ASM_X86_THREAD_INFO_H */
19465diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19466index 04905bf..1178cdf 100644
19467--- a/arch/x86/include/asm/tlbflush.h
19468+++ b/arch/x86/include/asm/tlbflush.h
19469@@ -17,18 +17,44 @@
19470
19471 static inline void __native_flush_tlb(void)
19472 {
19473+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19474+ u64 descriptor[2];
19475+
19476+ descriptor[0] = PCID_KERNEL;
19477+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19478+ return;
19479+ }
19480+
19481+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19482+ if (static_cpu_has(X86_FEATURE_PCID)) {
19483+ unsigned int cpu = raw_get_cpu();
19484+
19485+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19486+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19487+ raw_put_cpu_no_resched();
19488+ return;
19489+ }
19490+#endif
19491+
19492 native_write_cr3(native_read_cr3());
19493 }
19494
19495 static inline void __native_flush_tlb_global_irq_disabled(void)
19496 {
19497- unsigned long cr4;
19498+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19499+ u64 descriptor[2];
19500
19501- cr4 = native_read_cr4();
19502- /* clear PGE */
19503- native_write_cr4(cr4 & ~X86_CR4_PGE);
19504- /* write old PGE again and flush TLBs */
19505- native_write_cr4(cr4);
19506+ descriptor[0] = PCID_KERNEL;
19507+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19508+ } else {
19509+ unsigned long cr4;
19510+
19511+ cr4 = native_read_cr4();
19512+ /* clear PGE */
19513+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19514+ /* write old PGE again and flush TLBs */
19515+ native_write_cr4(cr4);
19516+ }
19517 }
19518
19519 static inline void __native_flush_tlb_global(void)
19520@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19521
19522 static inline void __native_flush_tlb_single(unsigned long addr)
19523 {
19524+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19525+ u64 descriptor[2];
19526+
19527+ descriptor[0] = PCID_KERNEL;
19528+ descriptor[1] = addr;
19529+
19530+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19531+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19532+ if (addr < TASK_SIZE_MAX)
19533+ descriptor[1] += pax_user_shadow_base;
19534+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19535+ }
19536+
19537+ descriptor[0] = PCID_USER;
19538+ descriptor[1] = addr;
19539+#endif
19540+
19541+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19542+ return;
19543+ }
19544+
19545+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19546+ if (static_cpu_has(X86_FEATURE_PCID)) {
19547+ unsigned int cpu = raw_get_cpu();
19548+
19549+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19550+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19551+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19552+ raw_put_cpu_no_resched();
19553+
19554+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19555+ addr += pax_user_shadow_base;
19556+ }
19557+#endif
19558+
19559 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19560 }
19561
19562diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19563index 0d592e0..7430aad 100644
19564--- a/arch/x86/include/asm/uaccess.h
19565+++ b/arch/x86/include/asm/uaccess.h
19566@@ -7,6 +7,7 @@
19567 #include <linux/compiler.h>
19568 #include <linux/thread_info.h>
19569 #include <linux/string.h>
19570+#include <linux/spinlock.h>
19571 #include <asm/asm.h>
19572 #include <asm/page.h>
19573 #include <asm/smap.h>
19574@@ -29,7 +30,12 @@
19575
19576 #define get_ds() (KERNEL_DS)
19577 #define get_fs() (current_thread_info()->addr_limit)
19578+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19579+void __set_fs(mm_segment_t x);
19580+void set_fs(mm_segment_t x);
19581+#else
19582 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19583+#endif
19584
19585 #define segment_eq(a, b) ((a).seg == (b).seg)
19586
19587@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19588 * checks that the pointer is in the user space range - after calling
19589 * this function, memory access functions may still return -EFAULT.
19590 */
19591-#define access_ok(type, addr, size) \
19592- likely(!__range_not_ok(addr, size, user_addr_max()))
19593+extern int _cond_resched(void);
19594+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19595+#define access_ok(type, addr, size) \
19596+({ \
19597+ unsigned long __size = size; \
19598+ unsigned long __addr = (unsigned long)addr; \
19599+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19600+ if (__ret_ao && __size) { \
19601+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19602+ unsigned long __end_ao = __addr + __size - 1; \
19603+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19604+ while (__addr_ao <= __end_ao) { \
19605+ char __c_ao; \
19606+ __addr_ao += PAGE_SIZE; \
19607+ if (__size > PAGE_SIZE) \
19608+ _cond_resched(); \
19609+ if (__get_user(__c_ao, (char __user *)__addr)) \
19610+ break; \
19611+ if (type != VERIFY_WRITE) { \
19612+ __addr = __addr_ao; \
19613+ continue; \
19614+ } \
19615+ if (__put_user(__c_ao, (char __user *)__addr)) \
19616+ break; \
19617+ __addr = __addr_ao; \
19618+ } \
19619+ } \
19620+ } \
19621+ __ret_ao; \
19622+})
19623
19624 /*
19625 * The exception table consists of pairs of addresses relative to the
19626@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19627 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19628 __chk_user_ptr(ptr); \
19629 might_fault(); \
19630+ pax_open_userland(); \
19631 asm volatile("call __get_user_%P3" \
19632 : "=a" (__ret_gu), "=r" (__val_gu) \
19633 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19634 (x) = (__typeof__(*(ptr))) __val_gu; \
19635+ pax_close_userland(); \
19636 __ret_gu; \
19637 })
19638
19639@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19640 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19641 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19642
19643-
19644+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19645+#define __copyuser_seg "gs;"
19646+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19647+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19648+#else
19649+#define __copyuser_seg
19650+#define __COPYUSER_SET_ES
19651+#define __COPYUSER_RESTORE_ES
19652+#endif
19653
19654 #ifdef CONFIG_X86_32
19655 #define __put_user_asm_u64(x, addr, err, errret) \
19656 asm volatile(ASM_STAC "\n" \
19657- "1: movl %%eax,0(%2)\n" \
19658- "2: movl %%edx,4(%2)\n" \
19659+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19660+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19661 "3: " ASM_CLAC "\n" \
19662 ".section .fixup,\"ax\"\n" \
19663 "4: movl %3,%0\n" \
19664@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19665
19666 #define __put_user_asm_ex_u64(x, addr) \
19667 asm volatile(ASM_STAC "\n" \
19668- "1: movl %%eax,0(%1)\n" \
19669- "2: movl %%edx,4(%1)\n" \
19670+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19671+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19672 "3: " ASM_CLAC "\n" \
19673 _ASM_EXTABLE_EX(1b, 2b) \
19674 _ASM_EXTABLE_EX(2b, 3b) \
19675@@ -257,7 +301,8 @@ extern void __put_user_8(void);
19676 __typeof__(*(ptr)) __pu_val; \
19677 __chk_user_ptr(ptr); \
19678 might_fault(); \
19679- __pu_val = x; \
19680+ __pu_val = (x); \
19681+ pax_open_userland(); \
19682 switch (sizeof(*(ptr))) { \
19683 case 1: \
19684 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19685@@ -275,6 +320,7 @@ extern void __put_user_8(void);
19686 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19687 break; \
19688 } \
19689+ pax_close_userland(); \
19690 __ret_pu; \
19691 })
19692
19693@@ -355,8 +401,10 @@ do { \
19694 } while (0)
19695
19696 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19697+do { \
19698+ pax_open_userland(); \
19699 asm volatile(ASM_STAC "\n" \
19700- "1: mov"itype" %2,%"rtype"1\n" \
19701+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19702 "2: " ASM_CLAC "\n" \
19703 ".section .fixup,\"ax\"\n" \
19704 "3: mov %3,%0\n" \
19705@@ -364,8 +412,10 @@ do { \
19706 " jmp 2b\n" \
19707 ".previous\n" \
19708 _ASM_EXTABLE(1b, 3b) \
19709- : "=r" (err), ltype(x) \
19710- : "m" (__m(addr)), "i" (errret), "0" (err))
19711+ : "=r" (err), ltype (x) \
19712+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19713+ pax_close_userland(); \
19714+} while (0)
19715
19716 #define __get_user_size_ex(x, ptr, size) \
19717 do { \
19718@@ -389,7 +439,7 @@ do { \
19719 } while (0)
19720
19721 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19722- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19723+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19724 "2:\n" \
19725 _ASM_EXTABLE_EX(1b, 2b) \
19726 : ltype(x) : "m" (__m(addr)))
19727@@ -406,13 +456,24 @@ do { \
19728 int __gu_err; \
19729 unsigned long __gu_val; \
19730 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19731- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19732+ (x) = (__typeof__(*(ptr)))__gu_val; \
19733 __gu_err; \
19734 })
19735
19736 /* FIXME: this hack is definitely wrong -AK */
19737 struct __large_struct { unsigned long buf[100]; };
19738-#define __m(x) (*(struct __large_struct __user *)(x))
19739+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19740+#define ____m(x) \
19741+({ \
19742+ unsigned long ____x = (unsigned long)(x); \
19743+ if (____x < pax_user_shadow_base) \
19744+ ____x += pax_user_shadow_base; \
19745+ (typeof(x))____x; \
19746+})
19747+#else
19748+#define ____m(x) (x)
19749+#endif
19750+#define __m(x) (*(struct __large_struct __user *)____m(x))
19751
19752 /*
19753 * Tell gcc we read from memory instead of writing: this is because
19754@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
19755 * aliasing issues.
19756 */
19757 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19758+do { \
19759+ pax_open_userland(); \
19760 asm volatile(ASM_STAC "\n" \
19761- "1: mov"itype" %"rtype"1,%2\n" \
19762+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19763 "2: " ASM_CLAC "\n" \
19764 ".section .fixup,\"ax\"\n" \
19765 "3: mov %3,%0\n" \
19766@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
19767 ".previous\n" \
19768 _ASM_EXTABLE(1b, 3b) \
19769 : "=r"(err) \
19770- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19771+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19772+ pax_close_userland(); \
19773+} while (0)
19774
19775 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19776- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19777+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19778 "2:\n" \
19779 _ASM_EXTABLE_EX(1b, 2b) \
19780 : : ltype(x), "m" (__m(addr)))
19781@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
19782 */
19783 #define uaccess_try do { \
19784 current_thread_info()->uaccess_err = 0; \
19785+ pax_open_userland(); \
19786 stac(); \
19787 barrier();
19788
19789 #define uaccess_catch(err) \
19790 clac(); \
19791+ pax_close_userland(); \
19792 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19793 } while (0)
19794
19795@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
19796 * On error, the variable @x is set to zero.
19797 */
19798
19799+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19800+#define __get_user(x, ptr) get_user((x), (ptr))
19801+#else
19802 #define __get_user(x, ptr) \
19803 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19804+#endif
19805
19806 /**
19807 * __put_user: - Write a simple value into user space, with less checking.
19808@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
19809 * Returns zero on success, or -EFAULT on error.
19810 */
19811
19812+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19813+#define __put_user(x, ptr) put_user((x), (ptr))
19814+#else
19815 #define __put_user(x, ptr) \
19816 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19817+#endif
19818
19819 #define __get_user_unaligned __get_user
19820 #define __put_user_unaligned __put_user
19821@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
19822 #define get_user_ex(x, ptr) do { \
19823 unsigned long __gue_val; \
19824 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19825- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19826+ (x) = (__typeof__(*(ptr)))__gue_val; \
19827 } while (0)
19828
19829 #define put_user_try uaccess_try
19830@@ -531,7 +606,7 @@ extern __must_check long strlen_user(const char __user *str);
19831 extern __must_check long strnlen_user(const char __user *str, long n);
19832
19833 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19834-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19835+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19836
19837 extern void __cmpxchg_wrong_size(void)
19838 __compiletime_error("Bad argument size for cmpxchg");
19839@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
19840 __typeof__(ptr) __uval = (uval); \
19841 __typeof__(*(ptr)) __old = (old); \
19842 __typeof__(*(ptr)) __new = (new); \
19843+ pax_open_userland(); \
19844 switch (size) { \
19845 case 1: \
19846 { \
19847 asm volatile("\t" ASM_STAC "\n" \
19848- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19849+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19850 "2:\t" ASM_CLAC "\n" \
19851 "\t.section .fixup, \"ax\"\n" \
19852 "3:\tmov %3, %0\n" \
19853 "\tjmp 2b\n" \
19854 "\t.previous\n" \
19855 _ASM_EXTABLE(1b, 3b) \
19856- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19857+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19858 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19859 : "memory" \
19860 ); \
19861@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
19862 case 2: \
19863 { \
19864 asm volatile("\t" ASM_STAC "\n" \
19865- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19866+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19867 "2:\t" ASM_CLAC "\n" \
19868 "\t.section .fixup, \"ax\"\n" \
19869 "3:\tmov %3, %0\n" \
19870 "\tjmp 2b\n" \
19871 "\t.previous\n" \
19872 _ASM_EXTABLE(1b, 3b) \
19873- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19874+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19875 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19876 : "memory" \
19877 ); \
19878@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
19879 case 4: \
19880 { \
19881 asm volatile("\t" ASM_STAC "\n" \
19882- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19883+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19884 "2:\t" ASM_CLAC "\n" \
19885 "\t.section .fixup, \"ax\"\n" \
19886 "3:\tmov %3, %0\n" \
19887 "\tjmp 2b\n" \
19888 "\t.previous\n" \
19889 _ASM_EXTABLE(1b, 3b) \
19890- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19891+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19892 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19893 : "memory" \
19894 ); \
19895@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
19896 __cmpxchg_wrong_size(); \
19897 \
19898 asm volatile("\t" ASM_STAC "\n" \
19899- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19900+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19901 "2:\t" ASM_CLAC "\n" \
19902 "\t.section .fixup, \"ax\"\n" \
19903 "3:\tmov %3, %0\n" \
19904 "\tjmp 2b\n" \
19905 "\t.previous\n" \
19906 _ASM_EXTABLE(1b, 3b) \
19907- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19908+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19909 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19910 : "memory" \
19911 ); \
19912@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
19913 default: \
19914 __cmpxchg_wrong_size(); \
19915 } \
19916+ pax_close_userland(); \
19917 *__uval = __old; \
19918 __ret; \
19919 })
19920@@ -636,17 +713,6 @@ extern struct movsl_mask {
19921
19922 #define ARCH_HAS_NOCACHE_UACCESS 1
19923
19924-#ifdef CONFIG_X86_32
19925-# include <asm/uaccess_32.h>
19926-#else
19927-# include <asm/uaccess_64.h>
19928-#endif
19929-
19930-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19931- unsigned n);
19932-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19933- unsigned n);
19934-
19935 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19936 # define copy_user_diag __compiletime_error
19937 #else
19938@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19939 extern void copy_user_diag("copy_from_user() buffer size is too small")
19940 copy_from_user_overflow(void);
19941 extern void copy_user_diag("copy_to_user() buffer size is too small")
19942-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19943+copy_to_user_overflow(void);
19944
19945 #undef copy_user_diag
19946
19947@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19948
19949 extern void
19950 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19951-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19952+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19953 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19954
19955 #else
19956@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19957
19958 #endif
19959
19960+#ifdef CONFIG_X86_32
19961+# include <asm/uaccess_32.h>
19962+#else
19963+# include <asm/uaccess_64.h>
19964+#endif
19965+
19966 static inline unsigned long __must_check
19967 copy_from_user(void *to, const void __user *from, unsigned long n)
19968 {
19969- int sz = __compiletime_object_size(to);
19970+ size_t sz = __compiletime_object_size(to);
19971
19972 might_fault();
19973
19974@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19975 * case, and do only runtime checking for non-constant sizes.
19976 */
19977
19978- if (likely(sz < 0 || sz >= n))
19979- n = _copy_from_user(to, from, n);
19980- else if(__builtin_constant_p(n))
19981- copy_from_user_overflow();
19982- else
19983- __copy_from_user_overflow(sz, n);
19984+ if (likely(sz != (size_t)-1 && sz < n)) {
19985+ if(__builtin_constant_p(n))
19986+ copy_from_user_overflow();
19987+ else
19988+ __copy_from_user_overflow(sz, n);
19989+ } else if (access_ok(VERIFY_READ, from, n))
19990+ n = __copy_from_user(to, from, n);
19991+ else if ((long)n > 0)
19992+ memset(to, 0, n);
19993
19994 return n;
19995 }
19996@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19997 static inline unsigned long __must_check
19998 copy_to_user(void __user *to, const void *from, unsigned long n)
19999 {
20000- int sz = __compiletime_object_size(from);
20001+ size_t sz = __compiletime_object_size(from);
20002
20003 might_fault();
20004
20005 /* See the comment in copy_from_user() above. */
20006- if (likely(sz < 0 || sz >= n))
20007- n = _copy_to_user(to, from, n);
20008- else if(__builtin_constant_p(n))
20009- copy_to_user_overflow();
20010- else
20011- __copy_to_user_overflow(sz, n);
20012+ if (likely(sz != (size_t)-1 && sz < n)) {
20013+ if(__builtin_constant_p(n))
20014+ copy_to_user_overflow();
20015+ else
20016+ __copy_to_user_overflow(sz, n);
20017+ } else if (access_ok(VERIFY_WRITE, to, n))
20018+ n = __copy_to_user(to, from, n);
20019
20020 return n;
20021 }
20022diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20023index 3c03a5d..edb68ae 100644
20024--- a/arch/x86/include/asm/uaccess_32.h
20025+++ b/arch/x86/include/asm/uaccess_32.h
20026@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20027 * anything, so this is accurate.
20028 */
20029
20030-static __always_inline unsigned long __must_check
20031+static __always_inline __size_overflow(3) unsigned long __must_check
20032 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20033 {
20034+ if ((long)n < 0)
20035+ return n;
20036+
20037+ check_object_size(from, n, true);
20038+
20039 if (__builtin_constant_p(n)) {
20040 unsigned long ret;
20041
20042@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20043 __copy_to_user(void __user *to, const void *from, unsigned long n)
20044 {
20045 might_fault();
20046+
20047 return __copy_to_user_inatomic(to, from, n);
20048 }
20049
20050-static __always_inline unsigned long
20051+static __always_inline __size_overflow(3) unsigned long
20052 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20053 {
20054+ if ((long)n < 0)
20055+ return n;
20056+
20057 /* Avoid zeroing the tail if the copy fails..
20058 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20059 * but as the zeroing behaviour is only significant when n is not
20060@@ -137,6 +146,12 @@ static __always_inline unsigned long
20061 __copy_from_user(void *to, const void __user *from, unsigned long n)
20062 {
20063 might_fault();
20064+
20065+ if ((long)n < 0)
20066+ return n;
20067+
20068+ check_object_size(to, n, false);
20069+
20070 if (__builtin_constant_p(n)) {
20071 unsigned long ret;
20072
20073@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20074 const void __user *from, unsigned long n)
20075 {
20076 might_fault();
20077+
20078+ if ((long)n < 0)
20079+ return n;
20080+
20081 if (__builtin_constant_p(n)) {
20082 unsigned long ret;
20083
20084@@ -181,7 +200,10 @@ static __always_inline unsigned long
20085 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20086 unsigned long n)
20087 {
20088- return __copy_from_user_ll_nocache_nozero(to, from, n);
20089+ if ((long)n < 0)
20090+ return n;
20091+
20092+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20093 }
20094
20095 #endif /* _ASM_X86_UACCESS_32_H */
20096diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20097index 12a26b9..c36fff5 100644
20098--- a/arch/x86/include/asm/uaccess_64.h
20099+++ b/arch/x86/include/asm/uaccess_64.h
20100@@ -10,6 +10,9 @@
20101 #include <asm/alternative.h>
20102 #include <asm/cpufeature.h>
20103 #include <asm/page.h>
20104+#include <asm/pgtable.h>
20105+
20106+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20107
20108 /*
20109 * Copy To/From Userspace
20110@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20111 __must_check unsigned long
20112 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20113
20114-static __always_inline __must_check unsigned long
20115-copy_user_generic(void *to, const void *from, unsigned len)
20116+static __always_inline __must_check __size_overflow(3) unsigned long
20117+copy_user_generic(void *to, const void *from, unsigned long len)
20118 {
20119 unsigned ret;
20120
20121@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20122 }
20123
20124 __must_check unsigned long
20125-copy_in_user(void __user *to, const void __user *from, unsigned len);
20126+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20127
20128 static __always_inline __must_check
20129-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20130+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20131 {
20132- int ret = 0;
20133+ size_t sz = __compiletime_object_size(dst);
20134+ unsigned ret = 0;
20135+
20136+ if (size > INT_MAX)
20137+ return size;
20138+
20139+ check_object_size(dst, size, false);
20140+
20141+#ifdef CONFIG_PAX_MEMORY_UDEREF
20142+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20143+ return size;
20144+#endif
20145+
20146+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20147+ if(__builtin_constant_p(size))
20148+ copy_from_user_overflow();
20149+ else
20150+ __copy_from_user_overflow(sz, size);
20151+ return size;
20152+ }
20153
20154 if (!__builtin_constant_p(size))
20155- return copy_user_generic(dst, (__force void *)src, size);
20156+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20157 switch (size) {
20158- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20159+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20160 ret, "b", "b", "=q", 1);
20161 return ret;
20162- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20163+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20164 ret, "w", "w", "=r", 2);
20165 return ret;
20166- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20167+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20168 ret, "l", "k", "=r", 4);
20169 return ret;
20170- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20171+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20172 ret, "q", "", "=r", 8);
20173 return ret;
20174 case 10:
20175- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20176+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20177 ret, "q", "", "=r", 10);
20178 if (unlikely(ret))
20179 return ret;
20180 __get_user_asm(*(u16 *)(8 + (char *)dst),
20181- (u16 __user *)(8 + (char __user *)src),
20182+ (const u16 __user *)(8 + (const char __user *)src),
20183 ret, "w", "w", "=r", 2);
20184 return ret;
20185 case 16:
20186- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20187+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20188 ret, "q", "", "=r", 16);
20189 if (unlikely(ret))
20190 return ret;
20191 __get_user_asm(*(u64 *)(8 + (char *)dst),
20192- (u64 __user *)(8 + (char __user *)src),
20193+ (const u64 __user *)(8 + (const char __user *)src),
20194 ret, "q", "", "=r", 8);
20195 return ret;
20196 default:
20197- return copy_user_generic(dst, (__force void *)src, size);
20198+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20199 }
20200 }
20201
20202 static __always_inline __must_check
20203-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20204+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20205 {
20206 might_fault();
20207 return __copy_from_user_nocheck(dst, src, size);
20208 }
20209
20210 static __always_inline __must_check
20211-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20212+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20213 {
20214- int ret = 0;
20215+ size_t sz = __compiletime_object_size(src);
20216+ unsigned ret = 0;
20217+
20218+ if (size > INT_MAX)
20219+ return size;
20220+
20221+ check_object_size(src, size, true);
20222+
20223+#ifdef CONFIG_PAX_MEMORY_UDEREF
20224+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20225+ return size;
20226+#endif
20227+
20228+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20229+ if(__builtin_constant_p(size))
20230+ copy_to_user_overflow();
20231+ else
20232+ __copy_to_user_overflow(sz, size);
20233+ return size;
20234+ }
20235
20236 if (!__builtin_constant_p(size))
20237- return copy_user_generic((__force void *)dst, src, size);
20238+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20239 switch (size) {
20240- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20241+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20242 ret, "b", "b", "iq", 1);
20243 return ret;
20244- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20245+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20246 ret, "w", "w", "ir", 2);
20247 return ret;
20248- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20249+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20250 ret, "l", "k", "ir", 4);
20251 return ret;
20252- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20253+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20254 ret, "q", "", "er", 8);
20255 return ret;
20256 case 10:
20257- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20258+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20259 ret, "q", "", "er", 10);
20260 if (unlikely(ret))
20261 return ret;
20262 asm("":::"memory");
20263- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20264+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20265 ret, "w", "w", "ir", 2);
20266 return ret;
20267 case 16:
20268- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20269+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20270 ret, "q", "", "er", 16);
20271 if (unlikely(ret))
20272 return ret;
20273 asm("":::"memory");
20274- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20275+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20276 ret, "q", "", "er", 8);
20277 return ret;
20278 default:
20279- return copy_user_generic((__force void *)dst, src, size);
20280+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20281 }
20282 }
20283
20284 static __always_inline __must_check
20285-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20286+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20287 {
20288 might_fault();
20289 return __copy_to_user_nocheck(dst, src, size);
20290 }
20291
20292 static __always_inline __must_check
20293-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20294+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20295 {
20296- int ret = 0;
20297+ unsigned ret = 0;
20298
20299 might_fault();
20300+
20301+ if (size > INT_MAX)
20302+ return size;
20303+
20304+#ifdef CONFIG_PAX_MEMORY_UDEREF
20305+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20306+ return size;
20307+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20308+ return size;
20309+#endif
20310+
20311 if (!__builtin_constant_p(size))
20312- return copy_user_generic((__force void *)dst,
20313- (__force void *)src, size);
20314+ return copy_user_generic((__force_kernel void *)____m(dst),
20315+ (__force_kernel const void *)____m(src), size);
20316 switch (size) {
20317 case 1: {
20318 u8 tmp;
20319- __get_user_asm(tmp, (u8 __user *)src,
20320+ __get_user_asm(tmp, (const u8 __user *)src,
20321 ret, "b", "b", "=q", 1);
20322 if (likely(!ret))
20323 __put_user_asm(tmp, (u8 __user *)dst,
20324@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20325 }
20326 case 2: {
20327 u16 tmp;
20328- __get_user_asm(tmp, (u16 __user *)src,
20329+ __get_user_asm(tmp, (const u16 __user *)src,
20330 ret, "w", "w", "=r", 2);
20331 if (likely(!ret))
20332 __put_user_asm(tmp, (u16 __user *)dst,
20333@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20334
20335 case 4: {
20336 u32 tmp;
20337- __get_user_asm(tmp, (u32 __user *)src,
20338+ __get_user_asm(tmp, (const u32 __user *)src,
20339 ret, "l", "k", "=r", 4);
20340 if (likely(!ret))
20341 __put_user_asm(tmp, (u32 __user *)dst,
20342@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20343 }
20344 case 8: {
20345 u64 tmp;
20346- __get_user_asm(tmp, (u64 __user *)src,
20347+ __get_user_asm(tmp, (const u64 __user *)src,
20348 ret, "q", "", "=r", 8);
20349 if (likely(!ret))
20350 __put_user_asm(tmp, (u64 __user *)dst,
20351@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20352 return ret;
20353 }
20354 default:
20355- return copy_user_generic((__force void *)dst,
20356- (__force void *)src, size);
20357+ return copy_user_generic((__force_kernel void *)____m(dst),
20358+ (__force_kernel const void *)____m(src), size);
20359 }
20360 }
20361
20362-static __must_check __always_inline int
20363-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20364+static __must_check __always_inline unsigned long
20365+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20366 {
20367 return __copy_from_user_nocheck(dst, src, size);
20368 }
20369
20370-static __must_check __always_inline int
20371-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20372+static __must_check __always_inline unsigned long
20373+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20374 {
20375 return __copy_to_user_nocheck(dst, src, size);
20376 }
20377
20378-extern long __copy_user_nocache(void *dst, const void __user *src,
20379- unsigned size, int zerorest);
20380+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20381+ unsigned long size, int zerorest);
20382
20383-static inline int
20384-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20385+static inline unsigned long
20386+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20387 {
20388 might_fault();
20389+
20390+ if (size > INT_MAX)
20391+ return size;
20392+
20393+#ifdef CONFIG_PAX_MEMORY_UDEREF
20394+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20395+ return size;
20396+#endif
20397+
20398 return __copy_user_nocache(dst, src, size, 1);
20399 }
20400
20401-static inline int
20402+static inline unsigned long
20403 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20404- unsigned size)
20405+ unsigned long size)
20406 {
20407+ if (size > INT_MAX)
20408+ return size;
20409+
20410+#ifdef CONFIG_PAX_MEMORY_UDEREF
20411+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20412+ return size;
20413+#endif
20414+
20415 return __copy_user_nocache(dst, src, size, 0);
20416 }
20417
20418 unsigned long
20419-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20420+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20421
20422 #endif /* _ASM_X86_UACCESS_64_H */
20423diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20424index 5b238981..77fdd78 100644
20425--- a/arch/x86/include/asm/word-at-a-time.h
20426+++ b/arch/x86/include/asm/word-at-a-time.h
20427@@ -11,7 +11,7 @@
20428 * and shift, for example.
20429 */
20430 struct word_at_a_time {
20431- const unsigned long one_bits, high_bits;
20432+ unsigned long one_bits, high_bits;
20433 };
20434
20435 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20436diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20437index f58a9c7..dc378042a 100644
20438--- a/arch/x86/include/asm/x86_init.h
20439+++ b/arch/x86/include/asm/x86_init.h
20440@@ -129,7 +129,7 @@ struct x86_init_ops {
20441 struct x86_init_timers timers;
20442 struct x86_init_iommu iommu;
20443 struct x86_init_pci pci;
20444-};
20445+} __no_const;
20446
20447 /**
20448 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20449@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20450 void (*setup_percpu_clockev)(void);
20451 void (*early_percpu_clock_init)(void);
20452 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20453-};
20454+} __no_const;
20455
20456 struct timespec;
20457
20458@@ -168,7 +168,7 @@ struct x86_platform_ops {
20459 void (*save_sched_clock_state)(void);
20460 void (*restore_sched_clock_state)(void);
20461 void (*apic_post_init)(void);
20462-};
20463+} __no_const;
20464
20465 struct pci_dev;
20466 struct msi_msg;
20467@@ -182,7 +182,7 @@ struct x86_msi_ops {
20468 void (*teardown_msi_irqs)(struct pci_dev *dev);
20469 void (*restore_msi_irqs)(struct pci_dev *dev);
20470 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20471-};
20472+} __no_const;
20473
20474 struct IO_APIC_route_entry;
20475 struct io_apic_irq_attr;
20476@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20477 unsigned int destination, int vector,
20478 struct io_apic_irq_attr *attr);
20479 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20480-};
20481+} __no_const;
20482
20483 extern struct x86_init_ops x86_init;
20484 extern struct x86_cpuinit_ops x86_cpuinit;
20485diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20486index 5eea099..ff7ef8d 100644
20487--- a/arch/x86/include/asm/xen/page.h
20488+++ b/arch/x86/include/asm/xen/page.h
20489@@ -83,7 +83,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20490 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20491 * cases needing an extended handling.
20492 */
20493-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20494+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20495 {
20496 unsigned long mfn;
20497
20498diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20499index c9a6d68..cb57f42 100644
20500--- a/arch/x86/include/asm/xsave.h
20501+++ b/arch/x86/include/asm/xsave.h
20502@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20503 if (unlikely(err))
20504 return -EFAULT;
20505
20506+ pax_open_userland();
20507 __asm__ __volatile__(ASM_STAC "\n"
20508- "1:"XSAVE"\n"
20509+ "1:"
20510+ __copyuser_seg
20511+ XSAVE"\n"
20512 "2: " ASM_CLAC "\n"
20513 xstate_fault
20514 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20515 : "memory");
20516+ pax_close_userland();
20517 return err;
20518 }
20519
20520@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20521 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20522 {
20523 int err = 0;
20524- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20525+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20526 u32 lmask = mask;
20527 u32 hmask = mask >> 32;
20528
20529+ pax_open_userland();
20530 __asm__ __volatile__(ASM_STAC "\n"
20531- "1:"XRSTOR"\n"
20532+ "1:"
20533+ __copyuser_seg
20534+ XRSTOR"\n"
20535 "2: " ASM_CLAC "\n"
20536 xstate_fault
20537 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20538 : "memory"); /* memory required? */
20539+ pax_close_userland();
20540 return err;
20541 }
20542
20543diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20544index d993e33..8db1b18 100644
20545--- a/arch/x86/include/uapi/asm/e820.h
20546+++ b/arch/x86/include/uapi/asm/e820.h
20547@@ -58,7 +58,7 @@ struct e820map {
20548 #define ISA_START_ADDRESS 0xa0000
20549 #define ISA_END_ADDRESS 0x100000
20550
20551-#define BIOS_BEGIN 0x000a0000
20552+#define BIOS_BEGIN 0x000c0000
20553 #define BIOS_END 0x00100000
20554
20555 #define BIOS_ROM_BASE 0xffe00000
20556diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20557index 7b0a55a..ad115bf 100644
20558--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20559+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20560@@ -49,7 +49,6 @@
20561 #define EFLAGS 144
20562 #define RSP 152
20563 #define SS 160
20564-#define ARGOFFSET R11
20565 #endif /* __ASSEMBLY__ */
20566
20567 /* top of stack page */
20568diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20569index 5d4502c..a567e09 100644
20570--- a/arch/x86/kernel/Makefile
20571+++ b/arch/x86/kernel/Makefile
20572@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20573 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20574 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20575 obj-y += probe_roms.o
20576-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20577+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20578 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20579 obj-$(CONFIG_X86_64) += mcount_64.o
20580 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20581diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20582index b5ddc96..490b4e4 100644
20583--- a/arch/x86/kernel/acpi/boot.c
20584+++ b/arch/x86/kernel/acpi/boot.c
20585@@ -1351,7 +1351,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20586 * If your system is blacklisted here, but you find that acpi=force
20587 * works for you, please contact linux-acpi@vger.kernel.org
20588 */
20589-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20590+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20591 /*
20592 * Boxes that need ACPI disabled
20593 */
20594@@ -1426,7 +1426,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20595 };
20596
20597 /* second table for DMI checks that should run after early-quirks */
20598-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20599+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20600 /*
20601 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20602 * which includes some code which overrides all temperature
20603diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20604index 3136820..e2c6577 100644
20605--- a/arch/x86/kernel/acpi/sleep.c
20606+++ b/arch/x86/kernel/acpi/sleep.c
20607@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20608 #else /* CONFIG_64BIT */
20609 #ifdef CONFIG_SMP
20610 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20611+
20612+ pax_open_kernel();
20613 early_gdt_descr.address =
20614 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20615+ pax_close_kernel();
20616+
20617 initial_gs = per_cpu_offset(smp_processor_id());
20618 #endif
20619 initial_code = (unsigned long)wakeup_long64;
20620diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20621index 665c6b7..eae4d56 100644
20622--- a/arch/x86/kernel/acpi/wakeup_32.S
20623+++ b/arch/x86/kernel/acpi/wakeup_32.S
20624@@ -29,13 +29,11 @@ wakeup_pmode_return:
20625 # and restore the stack ... but you need gdt for this to work
20626 movl saved_context_esp, %esp
20627
20628- movl %cs:saved_magic, %eax
20629- cmpl $0x12345678, %eax
20630+ cmpl $0x12345678, saved_magic
20631 jne bogus_magic
20632
20633 # jump to place where we left off
20634- movl saved_eip, %eax
20635- jmp *%eax
20636+ jmp *(saved_eip)
20637
20638 bogus_magic:
20639 jmp bogus_magic
20640diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20641index 703130f..27a155d 100644
20642--- a/arch/x86/kernel/alternative.c
20643+++ b/arch/x86/kernel/alternative.c
20644@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20645 */
20646 for (a = start; a < end; a++) {
20647 instr = (u8 *)&a->instr_offset + a->instr_offset;
20648+
20649+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20650+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20651+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20652+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20653+#endif
20654+
20655 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20656 BUG_ON(a->replacementlen > a->instrlen);
20657 BUG_ON(a->instrlen > sizeof(insnbuf));
20658@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20659 add_nops(insnbuf + a->replacementlen,
20660 a->instrlen - a->replacementlen);
20661
20662+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20663+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20664+ instr = ktva_ktla(instr);
20665+#endif
20666+
20667 text_poke_early(instr, insnbuf, a->instrlen);
20668 }
20669 }
20670@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20671 for (poff = start; poff < end; poff++) {
20672 u8 *ptr = (u8 *)poff + *poff;
20673
20674+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20675+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20676+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20677+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20678+#endif
20679+
20680 if (!*poff || ptr < text || ptr >= text_end)
20681 continue;
20682 /* turn DS segment override prefix into lock prefix */
20683- if (*ptr == 0x3e)
20684+ if (*ktla_ktva(ptr) == 0x3e)
20685 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20686 }
20687 mutex_unlock(&text_mutex);
20688@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20689 for (poff = start; poff < end; poff++) {
20690 u8 *ptr = (u8 *)poff + *poff;
20691
20692+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20693+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20694+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20695+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20696+#endif
20697+
20698 if (!*poff || ptr < text || ptr >= text_end)
20699 continue;
20700 /* turn lock prefix into DS segment override prefix */
20701- if (*ptr == 0xf0)
20702+ if (*ktla_ktva(ptr) == 0xf0)
20703 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20704 }
20705 mutex_unlock(&text_mutex);
20706@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20707
20708 BUG_ON(p->len > MAX_PATCH_LEN);
20709 /* prep the buffer with the original instructions */
20710- memcpy(insnbuf, p->instr, p->len);
20711+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20712 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20713 (unsigned long)p->instr, p->len);
20714
20715@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20716 if (!uniproc_patched || num_possible_cpus() == 1)
20717 free_init_pages("SMP alternatives",
20718 (unsigned long)__smp_locks,
20719- (unsigned long)__smp_locks_end);
20720+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20721 #endif
20722
20723 apply_paravirt(__parainstructions, __parainstructions_end);
20724@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20725 * instructions. And on the local CPU you need to be protected again NMI or MCE
20726 * handlers seeing an inconsistent instruction while you patch.
20727 */
20728-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20729+void *__kprobes text_poke_early(void *addr, const void *opcode,
20730 size_t len)
20731 {
20732 unsigned long flags;
20733 local_irq_save(flags);
20734- memcpy(addr, opcode, len);
20735+
20736+ pax_open_kernel();
20737+ memcpy(ktla_ktva(addr), opcode, len);
20738 sync_core();
20739+ pax_close_kernel();
20740+
20741 local_irq_restore(flags);
20742 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20743 that causes hangs on some VIA CPUs. */
20744@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20745 */
20746 void *text_poke(void *addr, const void *opcode, size_t len)
20747 {
20748- unsigned long flags;
20749- char *vaddr;
20750+ unsigned char *vaddr = ktla_ktva(addr);
20751 struct page *pages[2];
20752- int i;
20753+ size_t i;
20754
20755 if (!core_kernel_text((unsigned long)addr)) {
20756- pages[0] = vmalloc_to_page(addr);
20757- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20758+ pages[0] = vmalloc_to_page(vaddr);
20759+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20760 } else {
20761- pages[0] = virt_to_page(addr);
20762+ pages[0] = virt_to_page(vaddr);
20763 WARN_ON(!PageReserved(pages[0]));
20764- pages[1] = virt_to_page(addr + PAGE_SIZE);
20765+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20766 }
20767 BUG_ON(!pages[0]);
20768- local_irq_save(flags);
20769- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20770- if (pages[1])
20771- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20772- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20773- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20774- clear_fixmap(FIX_TEXT_POKE0);
20775- if (pages[1])
20776- clear_fixmap(FIX_TEXT_POKE1);
20777- local_flush_tlb();
20778- sync_core();
20779- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20780- that causes hangs on some VIA CPUs. */
20781+ text_poke_early(addr, opcode, len);
20782 for (i = 0; i < len; i++)
20783- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20784- local_irq_restore(flags);
20785+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20786 return addr;
20787 }
20788
20789@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20790 if (likely(!bp_patching_in_progress))
20791 return 0;
20792
20793- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20794+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20795 return 0;
20796
20797 /* set up the specified breakpoint handler */
20798@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20799 */
20800 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20801 {
20802- unsigned char int3 = 0xcc;
20803+ const unsigned char int3 = 0xcc;
20804
20805 bp_int3_handler = handler;
20806 bp_int3_addr = (u8 *)addr + sizeof(int3);
20807diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20808index 29b5b18..3bdfc29 100644
20809--- a/arch/x86/kernel/apic/apic.c
20810+++ b/arch/x86/kernel/apic/apic.c
20811@@ -201,7 +201,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20812 /*
20813 * Debug level, exported for io_apic.c
20814 */
20815-unsigned int apic_verbosity;
20816+int apic_verbosity;
20817
20818 int pic_mode;
20819
20820@@ -1991,7 +1991,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20821 apic_write(APIC_ESR, 0);
20822 v = apic_read(APIC_ESR);
20823 ack_APIC_irq();
20824- atomic_inc(&irq_err_count);
20825+ atomic_inc_unchecked(&irq_err_count);
20826
20827 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20828 smp_processor_id(), v);
20829diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20830index de918c4..32eed23 100644
20831--- a/arch/x86/kernel/apic/apic_flat_64.c
20832+++ b/arch/x86/kernel/apic/apic_flat_64.c
20833@@ -154,7 +154,7 @@ static int flat_probe(void)
20834 return 1;
20835 }
20836
20837-static struct apic apic_flat = {
20838+static struct apic apic_flat __read_only = {
20839 .name = "flat",
20840 .probe = flat_probe,
20841 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20842@@ -260,7 +260,7 @@ static int physflat_probe(void)
20843 return 0;
20844 }
20845
20846-static struct apic apic_physflat = {
20847+static struct apic apic_physflat __read_only = {
20848
20849 .name = "physical flat",
20850 .probe = physflat_probe,
20851diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20852index b205cdb..d8503ff 100644
20853--- a/arch/x86/kernel/apic/apic_noop.c
20854+++ b/arch/x86/kernel/apic/apic_noop.c
20855@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20856 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20857 }
20858
20859-struct apic apic_noop = {
20860+struct apic apic_noop __read_only = {
20861 .name = "noop",
20862 .probe = noop_probe,
20863 .acpi_madt_oem_check = NULL,
20864diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20865index c4a8d63..fe893ac 100644
20866--- a/arch/x86/kernel/apic/bigsmp_32.c
20867+++ b/arch/x86/kernel/apic/bigsmp_32.c
20868@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20869 return dmi_bigsmp;
20870 }
20871
20872-static struct apic apic_bigsmp = {
20873+static struct apic apic_bigsmp __read_only = {
20874
20875 .name = "bigsmp",
20876 .probe = probe_bigsmp,
20877diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20878index 3f5f604..309c0e6 100644
20879--- a/arch/x86/kernel/apic/io_apic.c
20880+++ b/arch/x86/kernel/apic/io_apic.c
20881@@ -1859,7 +1859,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20882 return ret;
20883 }
20884
20885-atomic_t irq_mis_count;
20886+atomic_unchecked_t irq_mis_count;
20887
20888 #ifdef CONFIG_GENERIC_PENDING_IRQ
20889 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20890@@ -2000,7 +2000,7 @@ static void ack_ioapic_level(struct irq_data *data)
20891 * at the cpu.
20892 */
20893 if (!(v & (1 << (i & 0x1f)))) {
20894- atomic_inc(&irq_mis_count);
20895+ atomic_inc_unchecked(&irq_mis_count);
20896
20897 eoi_ioapic_irq(irq, cfg);
20898 }
20899diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20900index bda4886..f9c7195 100644
20901--- a/arch/x86/kernel/apic/probe_32.c
20902+++ b/arch/x86/kernel/apic/probe_32.c
20903@@ -72,7 +72,7 @@ static int probe_default(void)
20904 return 1;
20905 }
20906
20907-static struct apic apic_default = {
20908+static struct apic apic_default __read_only = {
20909
20910 .name = "default",
20911 .probe = probe_default,
20912diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
20913index 6cedd79..023ff8e 100644
20914--- a/arch/x86/kernel/apic/vector.c
20915+++ b/arch/x86/kernel/apic/vector.c
20916@@ -21,7 +21,7 @@
20917
20918 static DEFINE_RAW_SPINLOCK(vector_lock);
20919
20920-void lock_vector_lock(void)
20921+void lock_vector_lock(void) __acquires(vector_lock)
20922 {
20923 /* Used to the online set of cpus does not change
20924 * during assign_irq_vector.
20925@@ -29,7 +29,7 @@ void lock_vector_lock(void)
20926 raw_spin_lock(&vector_lock);
20927 }
20928
20929-void unlock_vector_lock(void)
20930+void unlock_vector_lock(void) __releases(vector_lock)
20931 {
20932 raw_spin_unlock(&vector_lock);
20933 }
20934diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20935index e658f21..b695a1a 100644
20936--- a/arch/x86/kernel/apic/x2apic_cluster.c
20937+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20938@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20939 return notifier_from_errno(err);
20940 }
20941
20942-static struct notifier_block __refdata x2apic_cpu_notifier = {
20943+static struct notifier_block x2apic_cpu_notifier = {
20944 .notifier_call = update_clusterinfo,
20945 };
20946
20947@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20948 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20949 }
20950
20951-static struct apic apic_x2apic_cluster = {
20952+static struct apic apic_x2apic_cluster __read_only = {
20953
20954 .name = "cluster x2apic",
20955 .probe = x2apic_cluster_probe,
20956diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20957index 6fae733..5ca17af 100644
20958--- a/arch/x86/kernel/apic/x2apic_phys.c
20959+++ b/arch/x86/kernel/apic/x2apic_phys.c
20960@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
20961 return apic == &apic_x2apic_phys;
20962 }
20963
20964-static struct apic apic_x2apic_phys = {
20965+static struct apic apic_x2apic_phys __read_only = {
20966
20967 .name = "physical x2apic",
20968 .probe = x2apic_phys_probe,
20969diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20970index 8e9dcfd..c61b3e4 100644
20971--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20972+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20973@@ -348,7 +348,7 @@ static int uv_probe(void)
20974 return apic == &apic_x2apic_uv_x;
20975 }
20976
20977-static struct apic __refdata apic_x2apic_uv_x = {
20978+static struct apic apic_x2apic_uv_x __read_only = {
20979
20980 .name = "UV large system",
20981 .probe = uv_probe,
20982diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20983index 927ec92..0dc3bd4 100644
20984--- a/arch/x86/kernel/apm_32.c
20985+++ b/arch/x86/kernel/apm_32.c
20986@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
20987 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20988 * even though they are called in protected mode.
20989 */
20990-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20991+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20992 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20993
20994 static const char driver_version[] = "1.16ac"; /* no spaces */
20995@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
20996 BUG_ON(cpu != 0);
20997 gdt = get_cpu_gdt_table(cpu);
20998 save_desc_40 = gdt[0x40 / 8];
20999+
21000+ pax_open_kernel();
21001 gdt[0x40 / 8] = bad_bios_desc;
21002+ pax_close_kernel();
21003
21004 apm_irq_save(flags);
21005 APM_DO_SAVE_SEGS;
21006@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
21007 &call->esi);
21008 APM_DO_RESTORE_SEGS;
21009 apm_irq_restore(flags);
21010+
21011+ pax_open_kernel();
21012 gdt[0x40 / 8] = save_desc_40;
21013+ pax_close_kernel();
21014+
21015 put_cpu();
21016
21017 return call->eax & 0xff;
21018@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
21019 BUG_ON(cpu != 0);
21020 gdt = get_cpu_gdt_table(cpu);
21021 save_desc_40 = gdt[0x40 / 8];
21022+
21023+ pax_open_kernel();
21024 gdt[0x40 / 8] = bad_bios_desc;
21025+ pax_close_kernel();
21026
21027 apm_irq_save(flags);
21028 APM_DO_SAVE_SEGS;
21029@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
21030 &call->eax);
21031 APM_DO_RESTORE_SEGS;
21032 apm_irq_restore(flags);
21033+
21034+ pax_open_kernel();
21035 gdt[0x40 / 8] = save_desc_40;
21036+ pax_close_kernel();
21037+
21038 put_cpu();
21039 return error;
21040 }
21041@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21042 * code to that CPU.
21043 */
21044 gdt = get_cpu_gdt_table(0);
21045+
21046+ pax_open_kernel();
21047 set_desc_base(&gdt[APM_CS >> 3],
21048 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21049 set_desc_base(&gdt[APM_CS_16 >> 3],
21050 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21051 set_desc_base(&gdt[APM_DS >> 3],
21052 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21053+ pax_close_kernel();
21054
21055 proc_create("apm", 0, NULL, &apm_file_ops);
21056
21057diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21058index 9f6b934..cf5ffb3 100644
21059--- a/arch/x86/kernel/asm-offsets.c
21060+++ b/arch/x86/kernel/asm-offsets.c
21061@@ -32,6 +32,8 @@ void common(void) {
21062 OFFSET(TI_flags, thread_info, flags);
21063 OFFSET(TI_status, thread_info, status);
21064 OFFSET(TI_addr_limit, thread_info, addr_limit);
21065+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21066+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21067
21068 BLANK();
21069 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21070@@ -52,8 +54,26 @@ void common(void) {
21071 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21072 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21073 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21074+
21075+#ifdef CONFIG_PAX_KERNEXEC
21076+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21077 #endif
21078
21079+#ifdef CONFIG_PAX_MEMORY_UDEREF
21080+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21081+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21082+#ifdef CONFIG_X86_64
21083+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21084+#endif
21085+#endif
21086+
21087+#endif
21088+
21089+ BLANK();
21090+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21091+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21092+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21093+
21094 #ifdef CONFIG_XEN
21095 BLANK();
21096 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21097diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21098index fdcbb4d..036dd93 100644
21099--- a/arch/x86/kernel/asm-offsets_64.c
21100+++ b/arch/x86/kernel/asm-offsets_64.c
21101@@ -80,6 +80,7 @@ int main(void)
21102 BLANK();
21103 #undef ENTRY
21104
21105+ DEFINE(TSS_size, sizeof(struct tss_struct));
21106 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21107 BLANK();
21108
21109diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21110index 80091ae..0c5184f 100644
21111--- a/arch/x86/kernel/cpu/Makefile
21112+++ b/arch/x86/kernel/cpu/Makefile
21113@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21114 CFLAGS_REMOVE_perf_event.o = -pg
21115 endif
21116
21117-# Make sure load_percpu_segment has no stackprotector
21118-nostackp := $(call cc-option, -fno-stack-protector)
21119-CFLAGS_common.o := $(nostackp)
21120-
21121 obj-y := intel_cacheinfo.o scattered.o topology.o
21122 obj-y += common.o
21123 obj-y += rdrand.o
21124diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21125index 15c5df9..d9a604a 100644
21126--- a/arch/x86/kernel/cpu/amd.c
21127+++ b/arch/x86/kernel/cpu/amd.c
21128@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21129 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21130 {
21131 /* AMD errata T13 (order #21922) */
21132- if ((c->x86 == 6)) {
21133+ if (c->x86 == 6) {
21134 /* Duron Rev A0 */
21135 if (c->x86_model == 3 && c->x86_mask == 0)
21136 size = 64;
21137diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21138index c604965..0b0e28a 100644
21139--- a/arch/x86/kernel/cpu/common.c
21140+++ b/arch/x86/kernel/cpu/common.c
21141@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21142
21143 static const struct cpu_dev *this_cpu = &default_cpu;
21144
21145-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21146-#ifdef CONFIG_X86_64
21147- /*
21148- * We need valid kernel segments for data and code in long mode too
21149- * IRET will check the segment types kkeil 2000/10/28
21150- * Also sysret mandates a special GDT layout
21151- *
21152- * TLS descriptors are currently at a different place compared to i386.
21153- * Hopefully nobody expects them at a fixed place (Wine?)
21154- */
21155- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21156- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21157- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21158- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21159- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21160- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21161-#else
21162- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21163- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21164- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21165- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21166- /*
21167- * Segments used for calling PnP BIOS have byte granularity.
21168- * They code segments and data segments have fixed 64k limits,
21169- * the transfer segment sizes are set at run time.
21170- */
21171- /* 32-bit code */
21172- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21173- /* 16-bit code */
21174- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21175- /* 16-bit data */
21176- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21177- /* 16-bit data */
21178- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21179- /* 16-bit data */
21180- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21181- /*
21182- * The APM segments have byte granularity and their bases
21183- * are set at run time. All have 64k limits.
21184- */
21185- /* 32-bit code */
21186- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21187- /* 16-bit code */
21188- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21189- /* data */
21190- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21191-
21192- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21193- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21194- GDT_STACK_CANARY_INIT
21195-#endif
21196-} };
21197-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21198-
21199 static int __init x86_xsave_setup(char *s)
21200 {
21201 if (strlen(s))
21202@@ -305,6 +251,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21203 }
21204 }
21205
21206+#ifdef CONFIG_X86_64
21207+static __init int setup_disable_pcid(char *arg)
21208+{
21209+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21210+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21211+
21212+#ifdef CONFIG_PAX_MEMORY_UDEREF
21213+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21214+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21215+#endif
21216+
21217+ return 1;
21218+}
21219+__setup("nopcid", setup_disable_pcid);
21220+
21221+static void setup_pcid(struct cpuinfo_x86 *c)
21222+{
21223+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21224+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21225+
21226+#ifdef CONFIG_PAX_MEMORY_UDEREF
21227+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21228+ pax_open_kernel();
21229+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21230+ pax_close_kernel();
21231+ printk("PAX: slow and weak UDEREF enabled\n");
21232+ } else
21233+ printk("PAX: UDEREF disabled\n");
21234+#endif
21235+
21236+ return;
21237+ }
21238+
21239+ printk("PAX: PCID detected\n");
21240+ set_in_cr4(X86_CR4_PCIDE);
21241+
21242+#ifdef CONFIG_PAX_MEMORY_UDEREF
21243+ pax_open_kernel();
21244+ clone_pgd_mask = ~(pgdval_t)0UL;
21245+ pax_close_kernel();
21246+ if (pax_user_shadow_base)
21247+ printk("PAX: weak UDEREF enabled\n");
21248+ else {
21249+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21250+ printk("PAX: strong UDEREF enabled\n");
21251+ }
21252+#endif
21253+
21254+ if (cpu_has(c, X86_FEATURE_INVPCID))
21255+ printk("PAX: INVPCID detected\n");
21256+}
21257+#endif
21258+
21259 /*
21260 * Some CPU features depend on higher CPUID levels, which may not always
21261 * be available due to CPUID level capping or broken virtualization
21262@@ -405,7 +404,7 @@ void switch_to_new_gdt(int cpu)
21263 {
21264 struct desc_ptr gdt_descr;
21265
21266- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21267+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21268 gdt_descr.size = GDT_SIZE - 1;
21269 load_gdt(&gdt_descr);
21270 /* Reload the per-cpu base */
21271@@ -895,6 +894,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21272 setup_smep(c);
21273 setup_smap(c);
21274
21275+#ifdef CONFIG_X86_64
21276+ setup_pcid(c);
21277+#endif
21278+
21279 /*
21280 * The vendor-specific functions might have changed features.
21281 * Now we do "generic changes."
21282@@ -903,6 +906,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21283 /* Filter out anything that depends on CPUID levels we don't have */
21284 filter_cpuid_features(c, true);
21285
21286+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
21287+ setup_clear_cpu_cap(X86_FEATURE_SEP);
21288+#endif
21289+
21290 /* If the model name is still unset, do table lookup. */
21291 if (!c->x86_model_id[0]) {
21292 const char *p;
21293@@ -977,7 +984,7 @@ static void syscall32_cpu_init(void)
21294 void enable_sep_cpu(void)
21295 {
21296 int cpu = get_cpu();
21297- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21298+ struct tss_struct *tss = init_tss + cpu;
21299
21300 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21301 put_cpu();
21302@@ -1115,14 +1122,16 @@ static __init int setup_disablecpuid(char *arg)
21303 }
21304 __setup("clearcpuid=", setup_disablecpuid);
21305
21306+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21307+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21308+
21309 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21310- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21311+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21312 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21313
21314 #ifdef CONFIG_X86_64
21315-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21316-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21317- (unsigned long) debug_idt_table };
21318+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21319+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21320
21321 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21322 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21323@@ -1299,7 +1308,7 @@ void cpu_init(void)
21324 */
21325 load_ucode_ap();
21326
21327- t = &per_cpu(init_tss, cpu);
21328+ t = init_tss + cpu;
21329 oist = &per_cpu(orig_ist, cpu);
21330
21331 #ifdef CONFIG_NUMA
21332@@ -1331,7 +1340,6 @@ void cpu_init(void)
21333 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21334 barrier();
21335
21336- x86_configure_nx();
21337 enable_x2apic();
21338
21339 /*
21340@@ -1383,7 +1391,7 @@ void cpu_init(void)
21341 {
21342 int cpu = smp_processor_id();
21343 struct task_struct *curr = current;
21344- struct tss_struct *t = &per_cpu(init_tss, cpu);
21345+ struct tss_struct *t = init_tss + cpu;
21346 struct thread_struct *thread = &curr->thread;
21347
21348 wait_for_master_cpu(cpu);
21349diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21350index c703507..28535e3 100644
21351--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21352+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21353@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
21354 };
21355
21356 #ifdef CONFIG_AMD_NB
21357+static struct attribute *default_attrs_amd_nb[] = {
21358+ &type.attr,
21359+ &level.attr,
21360+ &coherency_line_size.attr,
21361+ &physical_line_partition.attr,
21362+ &ways_of_associativity.attr,
21363+ &number_of_sets.attr,
21364+ &size.attr,
21365+ &shared_cpu_map.attr,
21366+ &shared_cpu_list.attr,
21367+ NULL,
21368+ NULL,
21369+ NULL,
21370+ NULL
21371+};
21372+
21373 static struct attribute **amd_l3_attrs(void)
21374 {
21375 static struct attribute **attrs;
21376@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
21377
21378 n = ARRAY_SIZE(default_attrs);
21379
21380- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21381- n += 2;
21382-
21383- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21384- n += 1;
21385-
21386- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21387- if (attrs == NULL)
21388- return attrs = default_attrs;
21389-
21390- for (n = 0; default_attrs[n]; n++)
21391- attrs[n] = default_attrs[n];
21392+ attrs = default_attrs_amd_nb;
21393
21394 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21395 attrs[n++] = &cache_disable_0.attr;
21396@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
21397 .default_attrs = default_attrs,
21398 };
21399
21400+#ifdef CONFIG_AMD_NB
21401+static struct kobj_type ktype_cache_amd_nb = {
21402+ .sysfs_ops = &sysfs_ops,
21403+ .default_attrs = default_attrs_amd_nb,
21404+};
21405+#endif
21406+
21407 static struct kobj_type ktype_percpu_entry = {
21408 .sysfs_ops = &sysfs_ops,
21409 };
21410@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
21411 return retval;
21412 }
21413
21414+#ifdef CONFIG_AMD_NB
21415+ amd_l3_attrs();
21416+#endif
21417+
21418 for (i = 0; i < num_cache_leaves; i++) {
21419+ struct kobj_type *ktype;
21420+
21421 this_object = INDEX_KOBJECT_PTR(cpu, i);
21422 this_object->cpu = cpu;
21423 this_object->index = i;
21424
21425 this_leaf = CPUID4_INFO_IDX(cpu, i);
21426
21427- ktype_cache.default_attrs = default_attrs;
21428+ ktype = &ktype_cache;
21429 #ifdef CONFIG_AMD_NB
21430 if (this_leaf->base.nb)
21431- ktype_cache.default_attrs = amd_l3_attrs();
21432+ ktype = &ktype_cache_amd_nb;
21433 #endif
21434 retval = kobject_init_and_add(&(this_object->kobj),
21435- &ktype_cache,
21436+ ktype,
21437 per_cpu(ici_cache_kobject, cpu),
21438 "index%1lu", i);
21439 if (unlikely(retval)) {
21440diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21441index d2c6116..62fd7aa 100644
21442--- a/arch/x86/kernel/cpu/mcheck/mce.c
21443+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21444@@ -45,6 +45,7 @@
21445 #include <asm/processor.h>
21446 #include <asm/mce.h>
21447 #include <asm/msr.h>
21448+#include <asm/local.h>
21449
21450 #include "mce-internal.h"
21451
21452@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
21453 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21454 m->cs, m->ip);
21455
21456- if (m->cs == __KERNEL_CS)
21457+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21458 print_symbol("{%s}", m->ip);
21459 pr_cont("\n");
21460 }
21461@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
21462
21463 #define PANIC_TIMEOUT 5 /* 5 seconds */
21464
21465-static atomic_t mce_panicked;
21466+static atomic_unchecked_t mce_panicked;
21467
21468 static int fake_panic;
21469-static atomic_t mce_fake_panicked;
21470+static atomic_unchecked_t mce_fake_panicked;
21471
21472 /* Panic in progress. Enable interrupts and wait for final IPI */
21473 static void wait_for_panic(void)
21474@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21475 /*
21476 * Make sure only one CPU runs in machine check panic
21477 */
21478- if (atomic_inc_return(&mce_panicked) > 1)
21479+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21480 wait_for_panic();
21481 barrier();
21482
21483@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21484 console_verbose();
21485 } else {
21486 /* Don't log too much for fake panic */
21487- if (atomic_inc_return(&mce_fake_panicked) > 1)
21488+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21489 return;
21490 }
21491 /* First print corrected ones that are still unlogged */
21492@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21493 if (!fake_panic) {
21494 if (panic_timeout == 0)
21495 panic_timeout = mca_cfg.panic_timeout;
21496- panic(msg);
21497+ panic("%s", msg);
21498 } else
21499 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21500 }
21501@@ -744,7 +745,7 @@ static int mce_timed_out(u64 *t)
21502 * might have been modified by someone else.
21503 */
21504 rmb();
21505- if (atomic_read(&mce_panicked))
21506+ if (atomic_read_unchecked(&mce_panicked))
21507 wait_for_panic();
21508 if (!mca_cfg.monarch_timeout)
21509 goto out;
21510@@ -1722,7 +1723,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21511 }
21512
21513 /* Call the installed machine check handler for this CPU setup. */
21514-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21515+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21516 unexpected_machine_check;
21517
21518 /*
21519@@ -1745,7 +1746,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21520 return;
21521 }
21522
21523+ pax_open_kernel();
21524 machine_check_vector = do_machine_check;
21525+ pax_close_kernel();
21526
21527 __mcheck_cpu_init_generic();
21528 __mcheck_cpu_init_vendor(c);
21529@@ -1759,7 +1762,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21530 */
21531
21532 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21533-static int mce_chrdev_open_count; /* #times opened */
21534+static local_t mce_chrdev_open_count; /* #times opened */
21535 static int mce_chrdev_open_exclu; /* already open exclusive? */
21536
21537 static int mce_chrdev_open(struct inode *inode, struct file *file)
21538@@ -1767,7 +1770,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21539 spin_lock(&mce_chrdev_state_lock);
21540
21541 if (mce_chrdev_open_exclu ||
21542- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21543+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21544 spin_unlock(&mce_chrdev_state_lock);
21545
21546 return -EBUSY;
21547@@ -1775,7 +1778,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21548
21549 if (file->f_flags & O_EXCL)
21550 mce_chrdev_open_exclu = 1;
21551- mce_chrdev_open_count++;
21552+ local_inc(&mce_chrdev_open_count);
21553
21554 spin_unlock(&mce_chrdev_state_lock);
21555
21556@@ -1786,7 +1789,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21557 {
21558 spin_lock(&mce_chrdev_state_lock);
21559
21560- mce_chrdev_open_count--;
21561+ local_dec(&mce_chrdev_open_count);
21562 mce_chrdev_open_exclu = 0;
21563
21564 spin_unlock(&mce_chrdev_state_lock);
21565@@ -2461,7 +2464,7 @@ static __init void mce_init_banks(void)
21566
21567 for (i = 0; i < mca_cfg.banks; i++) {
21568 struct mce_bank *b = &mce_banks[i];
21569- struct device_attribute *a = &b->attr;
21570+ device_attribute_no_const *a = &b->attr;
21571
21572 sysfs_attr_init(&a->attr);
21573 a->attr.name = b->attrname;
21574@@ -2568,7 +2571,7 @@ struct dentry *mce_get_debugfs_dir(void)
21575 static void mce_reset(void)
21576 {
21577 cpu_missing = 0;
21578- atomic_set(&mce_fake_panicked, 0);
21579+ atomic_set_unchecked(&mce_fake_panicked, 0);
21580 atomic_set(&mce_executing, 0);
21581 atomic_set(&mce_callin, 0);
21582 atomic_set(&global_nwo, 0);
21583diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21584index a304298..49b6d06 100644
21585--- a/arch/x86/kernel/cpu/mcheck/p5.c
21586+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21587@@ -10,6 +10,7 @@
21588 #include <asm/processor.h>
21589 #include <asm/mce.h>
21590 #include <asm/msr.h>
21591+#include <asm/pgtable.h>
21592
21593 /* By default disabled */
21594 int mce_p5_enabled __read_mostly;
21595@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21596 if (!cpu_has(c, X86_FEATURE_MCE))
21597 return;
21598
21599+ pax_open_kernel();
21600 machine_check_vector = pentium_machine_check;
21601+ pax_close_kernel();
21602 /* Make sure the vector pointer is visible before we enable MCEs: */
21603 wmb();
21604
21605diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21606index 7dc5564..1273569 100644
21607--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21608+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21609@@ -9,6 +9,7 @@
21610 #include <asm/processor.h>
21611 #include <asm/mce.h>
21612 #include <asm/msr.h>
21613+#include <asm/pgtable.h>
21614
21615 /* Machine check handler for WinChip C6: */
21616 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21617@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21618 {
21619 u32 lo, hi;
21620
21621+ pax_open_kernel();
21622 machine_check_vector = winchip_machine_check;
21623+ pax_close_kernel();
21624 /* Make sure the vector pointer is visible before we enable MCEs: */
21625 wmb();
21626
21627diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21628index 36a8361..e7058c2 100644
21629--- a/arch/x86/kernel/cpu/microcode/core.c
21630+++ b/arch/x86/kernel/cpu/microcode/core.c
21631@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21632 return NOTIFY_OK;
21633 }
21634
21635-static struct notifier_block __refdata mc_cpu_notifier = {
21636+static struct notifier_block mc_cpu_notifier = {
21637 .notifier_call = mc_cpu_callback,
21638 };
21639
21640diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21641index c6826d1..8dc677e 100644
21642--- a/arch/x86/kernel/cpu/microcode/intel.c
21643+++ b/arch/x86/kernel/cpu/microcode/intel.c
21644@@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
21645 struct microcode_header_intel mc_header;
21646 unsigned int mc_size;
21647
21648+ if (leftover < sizeof(mc_header)) {
21649+ pr_err("error! Truncated header in microcode data file\n");
21650+ break;
21651+ }
21652+
21653 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
21654 break;
21655
21656@@ -293,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21657
21658 static int get_ucode_user(void *to, const void *from, size_t n)
21659 {
21660- return copy_from_user(to, from, n);
21661+ return copy_from_user(to, (const void __force_user *)from, n);
21662 }
21663
21664 static enum ucode_state
21665 request_microcode_user(int cpu, const void __user *buf, size_t size)
21666 {
21667- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21668+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21669 }
21670
21671 static void microcode_fini_cpu(int cpu)
21672diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
21673index ec9df6f..420eb93 100644
21674--- a/arch/x86/kernel/cpu/microcode/intel_early.c
21675+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
21676@@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start,
21677 unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
21678 int i;
21679
21680- while (leftover) {
21681+ while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
21682+
21683+ if (leftover < sizeof(mc_header))
21684+ break;
21685+
21686 mc_header = (struct microcode_header_intel *)ucode_ptr;
21687
21688 mc_size = get_totalsize(mc_header);
21689diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21690index ea5f363..cb0e905 100644
21691--- a/arch/x86/kernel/cpu/mtrr/main.c
21692+++ b/arch/x86/kernel/cpu/mtrr/main.c
21693@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21694 u64 size_or_mask, size_and_mask;
21695 static bool mtrr_aps_delayed_init;
21696
21697-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21698+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21699
21700 const struct mtrr_ops *mtrr_if;
21701
21702diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21703index df5e41f..816c719 100644
21704--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21705+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21706@@ -25,7 +25,7 @@ struct mtrr_ops {
21707 int (*validate_add_page)(unsigned long base, unsigned long size,
21708 unsigned int type);
21709 int (*have_wrcomb)(void);
21710-};
21711+} __do_const;
21712
21713 extern int generic_get_free_region(unsigned long base, unsigned long size,
21714 int replace_reg);
21715diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21716index 143e5f5..5825081 100644
21717--- a/arch/x86/kernel/cpu/perf_event.c
21718+++ b/arch/x86/kernel/cpu/perf_event.c
21719@@ -1374,7 +1374,7 @@ static void __init pmu_check_apic(void)
21720
21721 }
21722
21723-static struct attribute_group x86_pmu_format_group = {
21724+static attribute_group_no_const x86_pmu_format_group = {
21725 .name = "format",
21726 .attrs = NULL,
21727 };
21728@@ -1473,7 +1473,7 @@ static struct attribute *events_attr[] = {
21729 NULL,
21730 };
21731
21732-static struct attribute_group x86_pmu_events_group = {
21733+static attribute_group_no_const x86_pmu_events_group = {
21734 .name = "events",
21735 .attrs = events_attr,
21736 };
21737@@ -1997,7 +1997,7 @@ static unsigned long get_segment_base(unsigned int segment)
21738 if (idx > GDT_ENTRIES)
21739 return 0;
21740
21741- desc = raw_cpu_ptr(gdt_page.gdt);
21742+ desc = get_cpu_gdt_table(smp_processor_id());
21743 }
21744
21745 return get_desc_base(desc + idx);
21746@@ -2087,7 +2087,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21747 break;
21748
21749 perf_callchain_store(entry, frame.return_address);
21750- fp = frame.next_frame;
21751+ fp = (const void __force_user *)frame.next_frame;
21752 }
21753 }
21754
21755diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21756index 97242a9..cf9c30e 100644
21757--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21758+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21759@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21760 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21761 {
21762 struct attribute **attrs;
21763- struct attribute_group *attr_group;
21764+ attribute_group_no_const *attr_group;
21765 int i = 0, j;
21766
21767 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21768diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21769index 498b6d9..4126515 100644
21770--- a/arch/x86/kernel/cpu/perf_event_intel.c
21771+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21772@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21773 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21774
21775 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21776- u64 capabilities;
21777+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21778
21779- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21780- x86_pmu.intel_cap.capabilities = capabilities;
21781+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21782+ x86_pmu.intel_cap.capabilities = capabilities;
21783 }
21784
21785 intel_ds_init();
21786diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21787index c4bb8b8..9f7384d 100644
21788--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21789+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21790@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21791 NULL,
21792 };
21793
21794-static struct attribute_group rapl_pmu_events_group = {
21795+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21796 .name = "events",
21797 .attrs = NULL, /* patched at runtime */
21798 };
21799diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21800index c635b8b..b78835e 100644
21801--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21802+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21803@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21804 static int __init uncore_type_init(struct intel_uncore_type *type)
21805 {
21806 struct intel_uncore_pmu *pmus;
21807- struct attribute_group *attr_group;
21808+ attribute_group_no_const *attr_group;
21809 struct attribute **attrs;
21810 int i, j;
21811
21812diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21813index 6c8c1e7..515b98a 100644
21814--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21815+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21816@@ -114,7 +114,7 @@ struct intel_uncore_box {
21817 struct uncore_event_desc {
21818 struct kobj_attribute attr;
21819 const char *config;
21820-};
21821+} __do_const;
21822
21823 ssize_t uncore_event_show(struct kobject *kobj,
21824 struct kobj_attribute *attr, char *buf);
21825diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21826index 83741a7..bd3507d 100644
21827--- a/arch/x86/kernel/cpuid.c
21828+++ b/arch/x86/kernel/cpuid.c
21829@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21830 return notifier_from_errno(err);
21831 }
21832
21833-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21834+static struct notifier_block cpuid_class_cpu_notifier =
21835 {
21836 .notifier_call = cpuid_class_cpu_callback,
21837 };
21838diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21839index aceb2f9..c76d3e3 100644
21840--- a/arch/x86/kernel/crash.c
21841+++ b/arch/x86/kernel/crash.c
21842@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21843 #ifdef CONFIG_X86_32
21844 struct pt_regs fixed_regs;
21845
21846- if (!user_mode_vm(regs)) {
21847+ if (!user_mode(regs)) {
21848 crash_fixup_ss_esp(&fixed_regs, regs);
21849 regs = &fixed_regs;
21850 }
21851diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21852index afa64ad..dce67dd 100644
21853--- a/arch/x86/kernel/crash_dump_64.c
21854+++ b/arch/x86/kernel/crash_dump_64.c
21855@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21856 return -ENOMEM;
21857
21858 if (userbuf) {
21859- if (copy_to_user(buf, vaddr + offset, csize)) {
21860+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21861 iounmap(vaddr);
21862 return -EFAULT;
21863 }
21864diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21865index f6dfd93..892ade4 100644
21866--- a/arch/x86/kernel/doublefault.c
21867+++ b/arch/x86/kernel/doublefault.c
21868@@ -12,7 +12,7 @@
21869
21870 #define DOUBLEFAULT_STACKSIZE (1024)
21871 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21872-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21873+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21874
21875 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21876
21877@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21878 unsigned long gdt, tss;
21879
21880 native_store_gdt(&gdt_desc);
21881- gdt = gdt_desc.address;
21882+ gdt = (unsigned long)gdt_desc.address;
21883
21884 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21885
21886@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21887 /* 0x2 bit is always set */
21888 .flags = X86_EFLAGS_SF | 0x2,
21889 .sp = STACK_START,
21890- .es = __USER_DS,
21891+ .es = __KERNEL_DS,
21892 .cs = __KERNEL_CS,
21893 .ss = __KERNEL_DS,
21894- .ds = __USER_DS,
21895+ .ds = __KERNEL_DS,
21896 .fs = __KERNEL_PERCPU,
21897
21898 .__cr3 = __pa_nodebug(swapper_pg_dir),
21899diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21900index b74ebc7..2c95874 100644
21901--- a/arch/x86/kernel/dumpstack.c
21902+++ b/arch/x86/kernel/dumpstack.c
21903@@ -2,6 +2,9 @@
21904 * Copyright (C) 1991, 1992 Linus Torvalds
21905 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21906 */
21907+#ifdef CONFIG_GRKERNSEC_HIDESYM
21908+#define __INCLUDED_BY_HIDESYM 1
21909+#endif
21910 #include <linux/kallsyms.h>
21911 #include <linux/kprobes.h>
21912 #include <linux/uaccess.h>
21913@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
21914
21915 void printk_address(unsigned long address)
21916 {
21917- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
21918+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
21919 }
21920
21921 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
21922 static void
21923 print_ftrace_graph_addr(unsigned long addr, void *data,
21924 const struct stacktrace_ops *ops,
21925- struct thread_info *tinfo, int *graph)
21926+ struct task_struct *task, int *graph)
21927 {
21928- struct task_struct *task;
21929 unsigned long ret_addr;
21930 int index;
21931
21932 if (addr != (unsigned long)return_to_handler)
21933 return;
21934
21935- task = tinfo->task;
21936 index = task->curr_ret_stack;
21937
21938 if (!task->ret_stack || index < *graph)
21939@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21940 static inline void
21941 print_ftrace_graph_addr(unsigned long addr, void *data,
21942 const struct stacktrace_ops *ops,
21943- struct thread_info *tinfo, int *graph)
21944+ struct task_struct *task, int *graph)
21945 { }
21946 #endif
21947
21948@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21949 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21950 */
21951
21952-static inline int valid_stack_ptr(struct thread_info *tinfo,
21953- void *p, unsigned int size, void *end)
21954+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21955 {
21956- void *t = tinfo;
21957 if (end) {
21958 if (p < end && p >= (end-THREAD_SIZE))
21959 return 1;
21960@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21961 }
21962
21963 unsigned long
21964-print_context_stack(struct thread_info *tinfo,
21965+print_context_stack(struct task_struct *task, void *stack_start,
21966 unsigned long *stack, unsigned long bp,
21967 const struct stacktrace_ops *ops, void *data,
21968 unsigned long *end, int *graph)
21969 {
21970 struct stack_frame *frame = (struct stack_frame *)bp;
21971
21972- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21973+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21974 unsigned long addr;
21975
21976 addr = *stack;
21977@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21978 } else {
21979 ops->address(data, addr, 0);
21980 }
21981- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21982+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21983 }
21984 stack++;
21985 }
21986@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21987 EXPORT_SYMBOL_GPL(print_context_stack);
21988
21989 unsigned long
21990-print_context_stack_bp(struct thread_info *tinfo,
21991+print_context_stack_bp(struct task_struct *task, void *stack_start,
21992 unsigned long *stack, unsigned long bp,
21993 const struct stacktrace_ops *ops, void *data,
21994 unsigned long *end, int *graph)
21995@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21996 struct stack_frame *frame = (struct stack_frame *)bp;
21997 unsigned long *ret_addr = &frame->return_address;
21998
21999- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
22000+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
22001 unsigned long addr = *ret_addr;
22002
22003 if (!__kernel_text_address(addr))
22004@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22005 ops->address(data, addr, 1);
22006 frame = frame->next_frame;
22007 ret_addr = &frame->return_address;
22008- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22009+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22010 }
22011
22012 return (unsigned long)frame;
22013@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22014 static void print_trace_address(void *data, unsigned long addr, int reliable)
22015 {
22016 touch_nmi_watchdog();
22017- printk(data);
22018+ printk("%s", (char *)data);
22019 printk_stack_address(addr, reliable);
22020 }
22021
22022@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22023 EXPORT_SYMBOL_GPL(oops_begin);
22024 NOKPROBE_SYMBOL(oops_begin);
22025
22026+extern void gr_handle_kernel_exploit(void);
22027+
22028 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22029 {
22030 if (regs && kexec_should_crash(current))
22031@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22032 panic("Fatal exception in interrupt");
22033 if (panic_on_oops)
22034 panic("Fatal exception");
22035- do_exit(signr);
22036+
22037+ gr_handle_kernel_exploit();
22038+
22039+ do_group_exit(signr);
22040 }
22041 NOKPROBE_SYMBOL(oops_end);
22042
22043@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22044 print_modules();
22045 show_regs(regs);
22046 #ifdef CONFIG_X86_32
22047- if (user_mode_vm(regs)) {
22048+ if (user_mode(regs)) {
22049 sp = regs->sp;
22050 ss = regs->ss & 0xffff;
22051 } else {
22052@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22053 unsigned long flags = oops_begin();
22054 int sig = SIGSEGV;
22055
22056- if (!user_mode_vm(regs))
22057+ if (!user_mode(regs))
22058 report_bug(regs->ip, regs);
22059
22060 if (__die(str, regs, err))
22061diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22062index 5abd4cd..c65733b 100644
22063--- a/arch/x86/kernel/dumpstack_32.c
22064+++ b/arch/x86/kernel/dumpstack_32.c
22065@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22066 bp = stack_frame(task, regs);
22067
22068 for (;;) {
22069- struct thread_info *context;
22070+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22071 void *end_stack;
22072
22073 end_stack = is_hardirq_stack(stack, cpu);
22074 if (!end_stack)
22075 end_stack = is_softirq_stack(stack, cpu);
22076
22077- context = task_thread_info(task);
22078- bp = ops->walk_stack(context, stack, bp, ops, data,
22079+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22080 end_stack, &graph);
22081
22082 /* Stop if not on irq stack */
22083@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22084 int i;
22085
22086 show_regs_print_info(KERN_EMERG);
22087- __show_regs(regs, !user_mode_vm(regs));
22088+ __show_regs(regs, !user_mode(regs));
22089
22090 /*
22091 * When in-kernel, we also print out the stack and code at the
22092 * time of the fault..
22093 */
22094- if (!user_mode_vm(regs)) {
22095+ if (!user_mode(regs)) {
22096 unsigned int code_prologue = code_bytes * 43 / 64;
22097 unsigned int code_len = code_bytes;
22098 unsigned char c;
22099 u8 *ip;
22100+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22101
22102 pr_emerg("Stack:\n");
22103 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22104
22105 pr_emerg("Code:");
22106
22107- ip = (u8 *)regs->ip - code_prologue;
22108+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22109 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22110 /* try starting at IP */
22111- ip = (u8 *)regs->ip;
22112+ ip = (u8 *)regs->ip + cs_base;
22113 code_len = code_len - code_prologue + 1;
22114 }
22115 for (i = 0; i < code_len; i++, ip++) {
22116@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22117 pr_cont(" Bad EIP value.");
22118 break;
22119 }
22120- if (ip == (u8 *)regs->ip)
22121+ if (ip == (u8 *)regs->ip + cs_base)
22122 pr_cont(" <%02x>", c);
22123 else
22124 pr_cont(" %02x", c);
22125@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22126 {
22127 unsigned short ud2;
22128
22129+ ip = ktla_ktva(ip);
22130 if (ip < PAGE_OFFSET)
22131 return 0;
22132 if (probe_kernel_address((unsigned short *)ip, ud2))
22133@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22134
22135 return ud2 == 0x0b0f;
22136 }
22137+
22138+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22139+void pax_check_alloca(unsigned long size)
22140+{
22141+ unsigned long sp = (unsigned long)&sp, stack_left;
22142+
22143+ /* all kernel stacks are of the same size */
22144+ stack_left = sp & (THREAD_SIZE - 1);
22145+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22146+}
22147+EXPORT_SYMBOL(pax_check_alloca);
22148+#endif
22149diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22150index ff86f19..73eabf4 100644
22151--- a/arch/x86/kernel/dumpstack_64.c
22152+++ b/arch/x86/kernel/dumpstack_64.c
22153@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22154 const struct stacktrace_ops *ops, void *data)
22155 {
22156 const unsigned cpu = get_cpu();
22157- struct thread_info *tinfo;
22158 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22159 unsigned long dummy;
22160 unsigned used = 0;
22161 int graph = 0;
22162 int done = 0;
22163+ void *stack_start;
22164
22165 if (!task)
22166 task = current;
22167@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22168 * current stack address. If the stacks consist of nested
22169 * exceptions
22170 */
22171- tinfo = task_thread_info(task);
22172 while (!done) {
22173 unsigned long *stack_end;
22174 enum stack_type stype;
22175@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22176 if (ops->stack(data, id) < 0)
22177 break;
22178
22179- bp = ops->walk_stack(tinfo, stack, bp, ops,
22180+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22181 data, stack_end, &graph);
22182 ops->stack(data, "<EOE>");
22183 /*
22184@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22185 * second-to-last pointer (index -2 to end) in the
22186 * exception stack:
22187 */
22188+ if ((u16)stack_end[-1] != __KERNEL_DS)
22189+ goto out;
22190 stack = (unsigned long *) stack_end[-2];
22191 done = 0;
22192 break;
22193@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22194
22195 if (ops->stack(data, "IRQ") < 0)
22196 break;
22197- bp = ops->walk_stack(tinfo, stack, bp,
22198+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22199 ops, data, stack_end, &graph);
22200 /*
22201 * We link to the next stack (which would be
22202@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22203 /*
22204 * This handles the process stack:
22205 */
22206- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22207+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22208+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22209+out:
22210 put_cpu();
22211 }
22212 EXPORT_SYMBOL(dump_trace);
22213@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22214 {
22215 unsigned short ud2;
22216
22217- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22218+ if (probe_kernel_address((unsigned short *)ip, ud2))
22219 return 0;
22220
22221 return ud2 == 0x0b0f;
22222 }
22223+
22224+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22225+void pax_check_alloca(unsigned long size)
22226+{
22227+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22228+ unsigned cpu, used;
22229+ char *id;
22230+
22231+ /* check the process stack first */
22232+ stack_start = (unsigned long)task_stack_page(current);
22233+ stack_end = stack_start + THREAD_SIZE;
22234+ if (likely(stack_start <= sp && sp < stack_end)) {
22235+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22236+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22237+ return;
22238+ }
22239+
22240+ cpu = get_cpu();
22241+
22242+ /* check the irq stacks */
22243+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22244+ stack_start = stack_end - IRQ_STACK_SIZE;
22245+ if (stack_start <= sp && sp < stack_end) {
22246+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22247+ put_cpu();
22248+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22249+ return;
22250+ }
22251+
22252+ /* check the exception stacks */
22253+ used = 0;
22254+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22255+ stack_start = stack_end - EXCEPTION_STKSZ;
22256+ if (stack_end && stack_start <= sp && sp < stack_end) {
22257+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22258+ put_cpu();
22259+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22260+ return;
22261+ }
22262+
22263+ put_cpu();
22264+
22265+ /* unknown stack */
22266+ BUG();
22267+}
22268+EXPORT_SYMBOL(pax_check_alloca);
22269+#endif
22270diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22271index dd2f07a..845dc05 100644
22272--- a/arch/x86/kernel/e820.c
22273+++ b/arch/x86/kernel/e820.c
22274@@ -802,8 +802,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22275
22276 static void early_panic(char *msg)
22277 {
22278- early_printk(msg);
22279- panic(msg);
22280+ early_printk("%s", msg);
22281+ panic("%s", msg);
22282 }
22283
22284 static int userdef __initdata;
22285diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22286index 01d1c18..8073693 100644
22287--- a/arch/x86/kernel/early_printk.c
22288+++ b/arch/x86/kernel/early_printk.c
22289@@ -7,6 +7,7 @@
22290 #include <linux/pci_regs.h>
22291 #include <linux/pci_ids.h>
22292 #include <linux/errno.h>
22293+#include <linux/sched.h>
22294 #include <asm/io.h>
22295 #include <asm/processor.h>
22296 #include <asm/fcntl.h>
22297diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22298index 000d419..8f66802 100644
22299--- a/arch/x86/kernel/entry_32.S
22300+++ b/arch/x86/kernel/entry_32.S
22301@@ -177,13 +177,154 @@
22302 /*CFI_REL_OFFSET gs, PT_GS*/
22303 .endm
22304 .macro SET_KERNEL_GS reg
22305+
22306+#ifdef CONFIG_CC_STACKPROTECTOR
22307 movl $(__KERNEL_STACK_CANARY), \reg
22308+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22309+ movl $(__USER_DS), \reg
22310+#else
22311+ xorl \reg, \reg
22312+#endif
22313+
22314 movl \reg, %gs
22315 .endm
22316
22317 #endif /* CONFIG_X86_32_LAZY_GS */
22318
22319-.macro SAVE_ALL
22320+.macro pax_enter_kernel
22321+#ifdef CONFIG_PAX_KERNEXEC
22322+ call pax_enter_kernel
22323+#endif
22324+.endm
22325+
22326+.macro pax_exit_kernel
22327+#ifdef CONFIG_PAX_KERNEXEC
22328+ call pax_exit_kernel
22329+#endif
22330+.endm
22331+
22332+#ifdef CONFIG_PAX_KERNEXEC
22333+ENTRY(pax_enter_kernel)
22334+#ifdef CONFIG_PARAVIRT
22335+ pushl %eax
22336+ pushl %ecx
22337+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22338+ mov %eax, %esi
22339+#else
22340+ mov %cr0, %esi
22341+#endif
22342+ bts $16, %esi
22343+ jnc 1f
22344+ mov %cs, %esi
22345+ cmp $__KERNEL_CS, %esi
22346+ jz 3f
22347+ ljmp $__KERNEL_CS, $3f
22348+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22349+2:
22350+#ifdef CONFIG_PARAVIRT
22351+ mov %esi, %eax
22352+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22353+#else
22354+ mov %esi, %cr0
22355+#endif
22356+3:
22357+#ifdef CONFIG_PARAVIRT
22358+ popl %ecx
22359+ popl %eax
22360+#endif
22361+ ret
22362+ENDPROC(pax_enter_kernel)
22363+
22364+ENTRY(pax_exit_kernel)
22365+#ifdef CONFIG_PARAVIRT
22366+ pushl %eax
22367+ pushl %ecx
22368+#endif
22369+ mov %cs, %esi
22370+ cmp $__KERNEXEC_KERNEL_CS, %esi
22371+ jnz 2f
22372+#ifdef CONFIG_PARAVIRT
22373+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22374+ mov %eax, %esi
22375+#else
22376+ mov %cr0, %esi
22377+#endif
22378+ btr $16, %esi
22379+ ljmp $__KERNEL_CS, $1f
22380+1:
22381+#ifdef CONFIG_PARAVIRT
22382+ mov %esi, %eax
22383+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22384+#else
22385+ mov %esi, %cr0
22386+#endif
22387+2:
22388+#ifdef CONFIG_PARAVIRT
22389+ popl %ecx
22390+ popl %eax
22391+#endif
22392+ ret
22393+ENDPROC(pax_exit_kernel)
22394+#endif
22395+
22396+ .macro pax_erase_kstack
22397+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22398+ call pax_erase_kstack
22399+#endif
22400+ .endm
22401+
22402+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22403+/*
22404+ * ebp: thread_info
22405+ */
22406+ENTRY(pax_erase_kstack)
22407+ pushl %edi
22408+ pushl %ecx
22409+ pushl %eax
22410+
22411+ mov TI_lowest_stack(%ebp), %edi
22412+ mov $-0xBEEF, %eax
22413+ std
22414+
22415+1: mov %edi, %ecx
22416+ and $THREAD_SIZE_asm - 1, %ecx
22417+ shr $2, %ecx
22418+ repne scasl
22419+ jecxz 2f
22420+
22421+ cmp $2*16, %ecx
22422+ jc 2f
22423+
22424+ mov $2*16, %ecx
22425+ repe scasl
22426+ jecxz 2f
22427+ jne 1b
22428+
22429+2: cld
22430+ or $2*4, %edi
22431+ mov %esp, %ecx
22432+ sub %edi, %ecx
22433+
22434+ cmp $THREAD_SIZE_asm, %ecx
22435+ jb 3f
22436+ ud2
22437+3:
22438+
22439+ shr $2, %ecx
22440+ rep stosl
22441+
22442+ mov TI_task_thread_sp0(%ebp), %edi
22443+ sub $128, %edi
22444+ mov %edi, TI_lowest_stack(%ebp)
22445+
22446+ popl %eax
22447+ popl %ecx
22448+ popl %edi
22449+ ret
22450+ENDPROC(pax_erase_kstack)
22451+#endif
22452+
22453+.macro __SAVE_ALL _DS
22454 cld
22455 PUSH_GS
22456 pushl_cfi %fs
22457@@ -206,7 +347,7 @@
22458 CFI_REL_OFFSET ecx, 0
22459 pushl_cfi %ebx
22460 CFI_REL_OFFSET ebx, 0
22461- movl $(__USER_DS), %edx
22462+ movl $\_DS, %edx
22463 movl %edx, %ds
22464 movl %edx, %es
22465 movl $(__KERNEL_PERCPU), %edx
22466@@ -214,6 +355,15 @@
22467 SET_KERNEL_GS %edx
22468 .endm
22469
22470+.macro SAVE_ALL
22471+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22472+ __SAVE_ALL __KERNEL_DS
22473+ pax_enter_kernel
22474+#else
22475+ __SAVE_ALL __USER_DS
22476+#endif
22477+.endm
22478+
22479 .macro RESTORE_INT_REGS
22480 popl_cfi %ebx
22481 CFI_RESTORE ebx
22482@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22483 popfl_cfi
22484 jmp syscall_exit
22485 CFI_ENDPROC
22486-END(ret_from_fork)
22487+ENDPROC(ret_from_fork)
22488
22489 ENTRY(ret_from_kernel_thread)
22490 CFI_STARTPROC
22491@@ -340,7 +490,15 @@ ret_from_intr:
22492 andl $SEGMENT_RPL_MASK, %eax
22493 #endif
22494 cmpl $USER_RPL, %eax
22495+
22496+#ifdef CONFIG_PAX_KERNEXEC
22497+ jae resume_userspace
22498+
22499+ pax_exit_kernel
22500+ jmp resume_kernel
22501+#else
22502 jb resume_kernel # not returning to v8086 or userspace
22503+#endif
22504
22505 ENTRY(resume_userspace)
22506 LOCKDEP_SYS_EXIT
22507@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22508 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22509 # int/exception return?
22510 jne work_pending
22511- jmp restore_all
22512-END(ret_from_exception)
22513+ jmp restore_all_pax
22514+ENDPROC(ret_from_exception)
22515
22516 #ifdef CONFIG_PREEMPT
22517 ENTRY(resume_kernel)
22518@@ -365,7 +523,7 @@ need_resched:
22519 jz restore_all
22520 call preempt_schedule_irq
22521 jmp need_resched
22522-END(resume_kernel)
22523+ENDPROC(resume_kernel)
22524 #endif
22525 CFI_ENDPROC
22526
22527@@ -395,30 +553,45 @@ sysenter_past_esp:
22528 /*CFI_REL_OFFSET cs, 0*/
22529 /*
22530 * Push current_thread_info()->sysenter_return to the stack.
22531- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22532- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22533 */
22534- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22535+ pushl_cfi $0
22536 CFI_REL_OFFSET eip, 0
22537
22538 pushl_cfi %eax
22539 SAVE_ALL
22540+ GET_THREAD_INFO(%ebp)
22541+ movl TI_sysenter_return(%ebp),%ebp
22542+ movl %ebp,PT_EIP(%esp)
22543 ENABLE_INTERRUPTS(CLBR_NONE)
22544
22545 /*
22546 * Load the potential sixth argument from user stack.
22547 * Careful about security.
22548 */
22549+ movl PT_OLDESP(%esp),%ebp
22550+
22551+#ifdef CONFIG_PAX_MEMORY_UDEREF
22552+ mov PT_OLDSS(%esp),%ds
22553+1: movl %ds:(%ebp),%ebp
22554+ push %ss
22555+ pop %ds
22556+#else
22557 cmpl $__PAGE_OFFSET-3,%ebp
22558 jae syscall_fault
22559 ASM_STAC
22560 1: movl (%ebp),%ebp
22561 ASM_CLAC
22562+#endif
22563+
22564 movl %ebp,PT_EBP(%esp)
22565 _ASM_EXTABLE(1b,syscall_fault)
22566
22567 GET_THREAD_INFO(%ebp)
22568
22569+#ifdef CONFIG_PAX_RANDKSTACK
22570+ pax_erase_kstack
22571+#endif
22572+
22573 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22574 jnz sysenter_audit
22575 sysenter_do_call:
22576@@ -434,12 +607,24 @@ sysenter_after_call:
22577 testl $_TIF_ALLWORK_MASK, %ecx
22578 jne sysexit_audit
22579 sysenter_exit:
22580+
22581+#ifdef CONFIG_PAX_RANDKSTACK
22582+ pushl_cfi %eax
22583+ movl %esp, %eax
22584+ call pax_randomize_kstack
22585+ popl_cfi %eax
22586+#endif
22587+
22588+ pax_erase_kstack
22589+
22590 /* if something modifies registers it must also disable sysexit */
22591 movl PT_EIP(%esp), %edx
22592 movl PT_OLDESP(%esp), %ecx
22593 xorl %ebp,%ebp
22594 TRACE_IRQS_ON
22595 1: mov PT_FS(%esp), %fs
22596+2: mov PT_DS(%esp), %ds
22597+3: mov PT_ES(%esp), %es
22598 PTGS_TO_GS
22599 ENABLE_INTERRUPTS_SYSEXIT
22600
22601@@ -453,6 +638,9 @@ sysenter_audit:
22602 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22603 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22604 call __audit_syscall_entry
22605+
22606+ pax_erase_kstack
22607+
22608 popl_cfi %ecx /* get that remapped edx off the stack */
22609 popl_cfi %ecx /* get that remapped esi off the stack */
22610 movl PT_EAX(%esp),%eax /* reload syscall number */
22611@@ -479,10 +667,16 @@ sysexit_audit:
22612
22613 CFI_ENDPROC
22614 .pushsection .fixup,"ax"
22615-2: movl $0,PT_FS(%esp)
22616+4: movl $0,PT_FS(%esp)
22617+ jmp 1b
22618+5: movl $0,PT_DS(%esp)
22619+ jmp 1b
22620+6: movl $0,PT_ES(%esp)
22621 jmp 1b
22622 .popsection
22623- _ASM_EXTABLE(1b,2b)
22624+ _ASM_EXTABLE(1b,4b)
22625+ _ASM_EXTABLE(2b,5b)
22626+ _ASM_EXTABLE(3b,6b)
22627 PTGS_TO_GS_EX
22628 ENDPROC(ia32_sysenter_target)
22629
22630@@ -493,6 +687,11 @@ ENTRY(system_call)
22631 pushl_cfi %eax # save orig_eax
22632 SAVE_ALL
22633 GET_THREAD_INFO(%ebp)
22634+
22635+#ifdef CONFIG_PAX_RANDKSTACK
22636+ pax_erase_kstack
22637+#endif
22638+
22639 # system call tracing in operation / emulation
22640 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22641 jnz syscall_trace_entry
22642@@ -512,6 +711,15 @@ syscall_exit:
22643 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22644 jne syscall_exit_work
22645
22646+restore_all_pax:
22647+
22648+#ifdef CONFIG_PAX_RANDKSTACK
22649+ movl %esp, %eax
22650+ call pax_randomize_kstack
22651+#endif
22652+
22653+ pax_erase_kstack
22654+
22655 restore_all:
22656 TRACE_IRQS_IRET
22657 restore_all_notrace:
22658@@ -566,14 +774,34 @@ ldt_ss:
22659 * compensating for the offset by changing to the ESPFIX segment with
22660 * a base address that matches for the difference.
22661 */
22662-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22663+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22664 mov %esp, %edx /* load kernel esp */
22665 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22666 mov %dx, %ax /* eax: new kernel esp */
22667 sub %eax, %edx /* offset (low word is 0) */
22668+#ifdef CONFIG_SMP
22669+ movl PER_CPU_VAR(cpu_number), %ebx
22670+ shll $PAGE_SHIFT_asm, %ebx
22671+ addl $cpu_gdt_table, %ebx
22672+#else
22673+ movl $cpu_gdt_table, %ebx
22674+#endif
22675 shr $16, %edx
22676- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22677- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22678+
22679+#ifdef CONFIG_PAX_KERNEXEC
22680+ mov %cr0, %esi
22681+ btr $16, %esi
22682+ mov %esi, %cr0
22683+#endif
22684+
22685+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22686+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22687+
22688+#ifdef CONFIG_PAX_KERNEXEC
22689+ bts $16, %esi
22690+ mov %esi, %cr0
22691+#endif
22692+
22693 pushl_cfi $__ESPFIX_SS
22694 pushl_cfi %eax /* new kernel esp */
22695 /* Disable interrupts, but do not irqtrace this section: we
22696@@ -603,20 +831,18 @@ work_resched:
22697 movl TI_flags(%ebp), %ecx
22698 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22699 # than syscall tracing?
22700- jz restore_all
22701+ jz restore_all_pax
22702 testb $_TIF_NEED_RESCHED, %cl
22703 jnz work_resched
22704
22705 work_notifysig: # deal with pending signals and
22706 # notify-resume requests
22707+ movl %esp, %eax
22708 #ifdef CONFIG_VM86
22709 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22710- movl %esp, %eax
22711 jne work_notifysig_v86 # returning to kernel-space or
22712 # vm86-space
22713 1:
22714-#else
22715- movl %esp, %eax
22716 #endif
22717 TRACE_IRQS_ON
22718 ENABLE_INTERRUPTS(CLBR_NONE)
22719@@ -637,7 +863,7 @@ work_notifysig_v86:
22720 movl %eax, %esp
22721 jmp 1b
22722 #endif
22723-END(work_pending)
22724+ENDPROC(work_pending)
22725
22726 # perform syscall exit tracing
22727 ALIGN
22728@@ -645,11 +871,14 @@ syscall_trace_entry:
22729 movl $-ENOSYS,PT_EAX(%esp)
22730 movl %esp, %eax
22731 call syscall_trace_enter
22732+
22733+ pax_erase_kstack
22734+
22735 /* What it returned is what we'll actually use. */
22736 cmpl $(NR_syscalls), %eax
22737 jnae syscall_call
22738 jmp syscall_exit
22739-END(syscall_trace_entry)
22740+ENDPROC(syscall_trace_entry)
22741
22742 # perform syscall exit tracing
22743 ALIGN
22744@@ -662,26 +891,30 @@ syscall_exit_work:
22745 movl %esp, %eax
22746 call syscall_trace_leave
22747 jmp resume_userspace
22748-END(syscall_exit_work)
22749+ENDPROC(syscall_exit_work)
22750 CFI_ENDPROC
22751
22752 RING0_INT_FRAME # can't unwind into user space anyway
22753 syscall_fault:
22754+#ifdef CONFIG_PAX_MEMORY_UDEREF
22755+ push %ss
22756+ pop %ds
22757+#endif
22758 ASM_CLAC
22759 GET_THREAD_INFO(%ebp)
22760 movl $-EFAULT,PT_EAX(%esp)
22761 jmp resume_userspace
22762-END(syscall_fault)
22763+ENDPROC(syscall_fault)
22764
22765 syscall_badsys:
22766 movl $-ENOSYS,%eax
22767 jmp syscall_after_call
22768-END(syscall_badsys)
22769+ENDPROC(syscall_badsys)
22770
22771 sysenter_badsys:
22772 movl $-ENOSYS,%eax
22773 jmp sysenter_after_call
22774-END(sysenter_badsys)
22775+ENDPROC(sysenter_badsys)
22776 CFI_ENDPROC
22777
22778 .macro FIXUP_ESPFIX_STACK
22779@@ -694,8 +927,15 @@ END(sysenter_badsys)
22780 */
22781 #ifdef CONFIG_X86_ESPFIX32
22782 /* fixup the stack */
22783- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22784- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22785+#ifdef CONFIG_SMP
22786+ movl PER_CPU_VAR(cpu_number), %ebx
22787+ shll $PAGE_SHIFT_asm, %ebx
22788+ addl $cpu_gdt_table, %ebx
22789+#else
22790+ movl $cpu_gdt_table, %ebx
22791+#endif
22792+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22793+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22794 shl $16, %eax
22795 addl %esp, %eax /* the adjusted stack pointer */
22796 pushl_cfi $__KERNEL_DS
22797@@ -751,7 +991,7 @@ vector=vector+1
22798 .endr
22799 2: jmp common_interrupt
22800 .endr
22801-END(irq_entries_start)
22802+ENDPROC(irq_entries_start)
22803
22804 .previous
22805 END(interrupt)
22806@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22807 pushl_cfi $do_coprocessor_error
22808 jmp error_code
22809 CFI_ENDPROC
22810-END(coprocessor_error)
22811+ENDPROC(coprocessor_error)
22812
22813 ENTRY(simd_coprocessor_error)
22814 RING0_INT_FRAME
22815@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22816 .section .altinstructions,"a"
22817 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22818 .previous
22819-.section .altinstr_replacement,"ax"
22820+.section .altinstr_replacement,"a"
22821 663: pushl $do_simd_coprocessor_error
22822 664:
22823 .previous
22824@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22825 #endif
22826 jmp error_code
22827 CFI_ENDPROC
22828-END(simd_coprocessor_error)
22829+ENDPROC(simd_coprocessor_error)
22830
22831 ENTRY(device_not_available)
22832 RING0_INT_FRAME
22833@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22834 pushl_cfi $do_device_not_available
22835 jmp error_code
22836 CFI_ENDPROC
22837-END(device_not_available)
22838+ENDPROC(device_not_available)
22839
22840 #ifdef CONFIG_PARAVIRT
22841 ENTRY(native_iret)
22842 iret
22843 _ASM_EXTABLE(native_iret, iret_exc)
22844-END(native_iret)
22845+ENDPROC(native_iret)
22846
22847 ENTRY(native_irq_enable_sysexit)
22848 sti
22849 sysexit
22850-END(native_irq_enable_sysexit)
22851+ENDPROC(native_irq_enable_sysexit)
22852 #endif
22853
22854 ENTRY(overflow)
22855@@ -860,7 +1100,7 @@ ENTRY(overflow)
22856 pushl_cfi $do_overflow
22857 jmp error_code
22858 CFI_ENDPROC
22859-END(overflow)
22860+ENDPROC(overflow)
22861
22862 ENTRY(bounds)
22863 RING0_INT_FRAME
22864@@ -869,7 +1109,7 @@ ENTRY(bounds)
22865 pushl_cfi $do_bounds
22866 jmp error_code
22867 CFI_ENDPROC
22868-END(bounds)
22869+ENDPROC(bounds)
22870
22871 ENTRY(invalid_op)
22872 RING0_INT_FRAME
22873@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22874 pushl_cfi $do_invalid_op
22875 jmp error_code
22876 CFI_ENDPROC
22877-END(invalid_op)
22878+ENDPROC(invalid_op)
22879
22880 ENTRY(coprocessor_segment_overrun)
22881 RING0_INT_FRAME
22882@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22883 pushl_cfi $do_coprocessor_segment_overrun
22884 jmp error_code
22885 CFI_ENDPROC
22886-END(coprocessor_segment_overrun)
22887+ENDPROC(coprocessor_segment_overrun)
22888
22889 ENTRY(invalid_TSS)
22890 RING0_EC_FRAME
22891@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22892 pushl_cfi $do_invalid_TSS
22893 jmp error_code
22894 CFI_ENDPROC
22895-END(invalid_TSS)
22896+ENDPROC(invalid_TSS)
22897
22898 ENTRY(segment_not_present)
22899 RING0_EC_FRAME
22900@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22901 pushl_cfi $do_segment_not_present
22902 jmp error_code
22903 CFI_ENDPROC
22904-END(segment_not_present)
22905+ENDPROC(segment_not_present)
22906
22907 ENTRY(stack_segment)
22908 RING0_EC_FRAME
22909@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
22910 pushl_cfi $do_stack_segment
22911 jmp error_code
22912 CFI_ENDPROC
22913-END(stack_segment)
22914+ENDPROC(stack_segment)
22915
22916 ENTRY(alignment_check)
22917 RING0_EC_FRAME
22918@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
22919 pushl_cfi $do_alignment_check
22920 jmp error_code
22921 CFI_ENDPROC
22922-END(alignment_check)
22923+ENDPROC(alignment_check)
22924
22925 ENTRY(divide_error)
22926 RING0_INT_FRAME
22927@@ -928,7 +1168,7 @@ ENTRY(divide_error)
22928 pushl_cfi $do_divide_error
22929 jmp error_code
22930 CFI_ENDPROC
22931-END(divide_error)
22932+ENDPROC(divide_error)
22933
22934 #ifdef CONFIG_X86_MCE
22935 ENTRY(machine_check)
22936@@ -938,7 +1178,7 @@ ENTRY(machine_check)
22937 pushl_cfi machine_check_vector
22938 jmp error_code
22939 CFI_ENDPROC
22940-END(machine_check)
22941+ENDPROC(machine_check)
22942 #endif
22943
22944 ENTRY(spurious_interrupt_bug)
22945@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
22946 pushl_cfi $do_spurious_interrupt_bug
22947 jmp error_code
22948 CFI_ENDPROC
22949-END(spurious_interrupt_bug)
22950+ENDPROC(spurious_interrupt_bug)
22951
22952 #ifdef CONFIG_XEN
22953 /* Xen doesn't set %esp to be precisely what the normal sysenter
22954@@ -1054,7 +1294,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22955
22956 ENTRY(mcount)
22957 ret
22958-END(mcount)
22959+ENDPROC(mcount)
22960
22961 ENTRY(ftrace_caller)
22962 pushl %eax
22963@@ -1084,7 +1324,7 @@ ftrace_graph_call:
22964 .globl ftrace_stub
22965 ftrace_stub:
22966 ret
22967-END(ftrace_caller)
22968+ENDPROC(ftrace_caller)
22969
22970 ENTRY(ftrace_regs_caller)
22971 pushf /* push flags before compare (in cs location) */
22972@@ -1182,7 +1422,7 @@ trace:
22973 popl %ecx
22974 popl %eax
22975 jmp ftrace_stub
22976-END(mcount)
22977+ENDPROC(mcount)
22978 #endif /* CONFIG_DYNAMIC_FTRACE */
22979 #endif /* CONFIG_FUNCTION_TRACER */
22980
22981@@ -1200,7 +1440,7 @@ ENTRY(ftrace_graph_caller)
22982 popl %ecx
22983 popl %eax
22984 ret
22985-END(ftrace_graph_caller)
22986+ENDPROC(ftrace_graph_caller)
22987
22988 .globl return_to_handler
22989 return_to_handler:
22990@@ -1261,15 +1501,18 @@ error_code:
22991 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22992 REG_TO_PTGS %ecx
22993 SET_KERNEL_GS %ecx
22994- movl $(__USER_DS), %ecx
22995+ movl $(__KERNEL_DS), %ecx
22996 movl %ecx, %ds
22997 movl %ecx, %es
22998+
22999+ pax_enter_kernel
23000+
23001 TRACE_IRQS_OFF
23002 movl %esp,%eax # pt_regs pointer
23003 call *%edi
23004 jmp ret_from_exception
23005 CFI_ENDPROC
23006-END(page_fault)
23007+ENDPROC(page_fault)
23008
23009 /*
23010 * Debug traps and NMI can happen at the one SYSENTER instruction
23011@@ -1312,7 +1555,7 @@ debug_stack_correct:
23012 call do_debug
23013 jmp ret_from_exception
23014 CFI_ENDPROC
23015-END(debug)
23016+ENDPROC(debug)
23017
23018 /*
23019 * NMI is doubly nasty. It can happen _while_ we're handling
23020@@ -1352,6 +1595,9 @@ nmi_stack_correct:
23021 xorl %edx,%edx # zero error code
23022 movl %esp,%eax # pt_regs pointer
23023 call do_nmi
23024+
23025+ pax_exit_kernel
23026+
23027 jmp restore_all_notrace
23028 CFI_ENDPROC
23029
23030@@ -1389,13 +1635,16 @@ nmi_espfix_stack:
23031 FIXUP_ESPFIX_STACK # %eax == %esp
23032 xorl %edx,%edx # zero error code
23033 call do_nmi
23034+
23035+ pax_exit_kernel
23036+
23037 RESTORE_REGS
23038 lss 12+4(%esp), %esp # back to espfix stack
23039 CFI_ADJUST_CFA_OFFSET -24
23040 jmp irq_return
23041 #endif
23042 CFI_ENDPROC
23043-END(nmi)
23044+ENDPROC(nmi)
23045
23046 ENTRY(int3)
23047 RING0_INT_FRAME
23048@@ -1408,14 +1657,14 @@ ENTRY(int3)
23049 call do_int3
23050 jmp ret_from_exception
23051 CFI_ENDPROC
23052-END(int3)
23053+ENDPROC(int3)
23054
23055 ENTRY(general_protection)
23056 RING0_EC_FRAME
23057 pushl_cfi $do_general_protection
23058 jmp error_code
23059 CFI_ENDPROC
23060-END(general_protection)
23061+ENDPROC(general_protection)
23062
23063 #ifdef CONFIG_KVM_GUEST
23064 ENTRY(async_page_fault)
23065@@ -1424,6 +1673,6 @@ ENTRY(async_page_fault)
23066 pushl_cfi $do_async_page_fault
23067 jmp error_code
23068 CFI_ENDPROC
23069-END(async_page_fault)
23070+ENDPROC(async_page_fault)
23071 #endif
23072
23073diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23074index 4ee9a23..c786610 100644
23075--- a/arch/x86/kernel/entry_64.S
23076+++ b/arch/x86/kernel/entry_64.S
23077@@ -59,6 +59,8 @@
23078 #include <asm/smap.h>
23079 #include <asm/pgtable_types.h>
23080 #include <linux/err.h>
23081+#include <asm/pgtable.h>
23082+#include <asm/alternative-asm.h>
23083
23084 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23085 #include <linux/elf-em.h>
23086@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23087 ENDPROC(native_usergs_sysret64)
23088 #endif /* CONFIG_PARAVIRT */
23089
23090+ .macro ljmpq sel, off
23091+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23092+ .byte 0x48; ljmp *1234f(%rip)
23093+ .pushsection .rodata
23094+ .align 16
23095+ 1234: .quad \off; .word \sel
23096+ .popsection
23097+#else
23098+ pushq $\sel
23099+ pushq $\off
23100+ lretq
23101+#endif
23102+ .endm
23103+
23104+ .macro pax_enter_kernel
23105+ pax_set_fptr_mask
23106+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23107+ call pax_enter_kernel
23108+#endif
23109+ .endm
23110+
23111+ .macro pax_exit_kernel
23112+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23113+ call pax_exit_kernel
23114+#endif
23115+
23116+ .endm
23117+
23118+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23119+ENTRY(pax_enter_kernel)
23120+ pushq %rdi
23121+
23122+#ifdef CONFIG_PARAVIRT
23123+ PV_SAVE_REGS(CLBR_RDI)
23124+#endif
23125+
23126+#ifdef CONFIG_PAX_KERNEXEC
23127+ GET_CR0_INTO_RDI
23128+ bts $16,%rdi
23129+ jnc 3f
23130+ mov %cs,%edi
23131+ cmp $__KERNEL_CS,%edi
23132+ jnz 2f
23133+1:
23134+#endif
23135+
23136+#ifdef CONFIG_PAX_MEMORY_UDEREF
23137+ 661: jmp 111f
23138+ .pushsection .altinstr_replacement, "a"
23139+ 662: ASM_NOP2
23140+ .popsection
23141+ .pushsection .altinstructions, "a"
23142+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23143+ .popsection
23144+ GET_CR3_INTO_RDI
23145+ cmp $0,%dil
23146+ jnz 112f
23147+ mov $__KERNEL_DS,%edi
23148+ mov %edi,%ss
23149+ jmp 111f
23150+112: cmp $1,%dil
23151+ jz 113f
23152+ ud2
23153+113: sub $4097,%rdi
23154+ bts $63,%rdi
23155+ SET_RDI_INTO_CR3
23156+ mov $__UDEREF_KERNEL_DS,%edi
23157+ mov %edi,%ss
23158+111:
23159+#endif
23160+
23161+#ifdef CONFIG_PARAVIRT
23162+ PV_RESTORE_REGS(CLBR_RDI)
23163+#endif
23164+
23165+ popq %rdi
23166+ pax_force_retaddr
23167+ retq
23168+
23169+#ifdef CONFIG_PAX_KERNEXEC
23170+2: ljmpq __KERNEL_CS,1b
23171+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23172+4: SET_RDI_INTO_CR0
23173+ jmp 1b
23174+#endif
23175+ENDPROC(pax_enter_kernel)
23176+
23177+ENTRY(pax_exit_kernel)
23178+ pushq %rdi
23179+
23180+#ifdef CONFIG_PARAVIRT
23181+ PV_SAVE_REGS(CLBR_RDI)
23182+#endif
23183+
23184+#ifdef CONFIG_PAX_KERNEXEC
23185+ mov %cs,%rdi
23186+ cmp $__KERNEXEC_KERNEL_CS,%edi
23187+ jz 2f
23188+ GET_CR0_INTO_RDI
23189+ bts $16,%rdi
23190+ jnc 4f
23191+1:
23192+#endif
23193+
23194+#ifdef CONFIG_PAX_MEMORY_UDEREF
23195+ 661: jmp 111f
23196+ .pushsection .altinstr_replacement, "a"
23197+ 662: ASM_NOP2
23198+ .popsection
23199+ .pushsection .altinstructions, "a"
23200+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23201+ .popsection
23202+ mov %ss,%edi
23203+ cmp $__UDEREF_KERNEL_DS,%edi
23204+ jnz 111f
23205+ GET_CR3_INTO_RDI
23206+ cmp $0,%dil
23207+ jz 112f
23208+ ud2
23209+112: add $4097,%rdi
23210+ bts $63,%rdi
23211+ SET_RDI_INTO_CR3
23212+ mov $__KERNEL_DS,%edi
23213+ mov %edi,%ss
23214+111:
23215+#endif
23216+
23217+#ifdef CONFIG_PARAVIRT
23218+ PV_RESTORE_REGS(CLBR_RDI);
23219+#endif
23220+
23221+ popq %rdi
23222+ pax_force_retaddr
23223+ retq
23224+
23225+#ifdef CONFIG_PAX_KERNEXEC
23226+2: GET_CR0_INTO_RDI
23227+ btr $16,%rdi
23228+ jnc 4f
23229+ ljmpq __KERNEL_CS,3f
23230+3: SET_RDI_INTO_CR0
23231+ jmp 1b
23232+4: ud2
23233+ jmp 4b
23234+#endif
23235+ENDPROC(pax_exit_kernel)
23236+#endif
23237+
23238+ .macro pax_enter_kernel_user
23239+ pax_set_fptr_mask
23240+#ifdef CONFIG_PAX_MEMORY_UDEREF
23241+ call pax_enter_kernel_user
23242+#endif
23243+ .endm
23244+
23245+ .macro pax_exit_kernel_user
23246+#ifdef CONFIG_PAX_MEMORY_UDEREF
23247+ call pax_exit_kernel_user
23248+#endif
23249+#ifdef CONFIG_PAX_RANDKSTACK
23250+ pushq %rax
23251+ pushq %r11
23252+ call pax_randomize_kstack
23253+ popq %r11
23254+ popq %rax
23255+#endif
23256+ .endm
23257+
23258+#ifdef CONFIG_PAX_MEMORY_UDEREF
23259+ENTRY(pax_enter_kernel_user)
23260+ pushq %rdi
23261+ pushq %rbx
23262+
23263+#ifdef CONFIG_PARAVIRT
23264+ PV_SAVE_REGS(CLBR_RDI)
23265+#endif
23266+
23267+ 661: jmp 111f
23268+ .pushsection .altinstr_replacement, "a"
23269+ 662: ASM_NOP2
23270+ .popsection
23271+ .pushsection .altinstructions, "a"
23272+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23273+ .popsection
23274+ GET_CR3_INTO_RDI
23275+ cmp $1,%dil
23276+ jnz 4f
23277+ sub $4097,%rdi
23278+ bts $63,%rdi
23279+ SET_RDI_INTO_CR3
23280+ jmp 3f
23281+111:
23282+
23283+ GET_CR3_INTO_RDI
23284+ mov %rdi,%rbx
23285+ add $__START_KERNEL_map,%rbx
23286+ sub phys_base(%rip),%rbx
23287+
23288+#ifdef CONFIG_PARAVIRT
23289+ cmpl $0, pv_info+PARAVIRT_enabled
23290+ jz 1f
23291+ pushq %rdi
23292+ i = 0
23293+ .rept USER_PGD_PTRS
23294+ mov i*8(%rbx),%rsi
23295+ mov $0,%sil
23296+ lea i*8(%rbx),%rdi
23297+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23298+ i = i + 1
23299+ .endr
23300+ popq %rdi
23301+ jmp 2f
23302+1:
23303+#endif
23304+
23305+ i = 0
23306+ .rept USER_PGD_PTRS
23307+ movb $0,i*8(%rbx)
23308+ i = i + 1
23309+ .endr
23310+
23311+2: SET_RDI_INTO_CR3
23312+
23313+#ifdef CONFIG_PAX_KERNEXEC
23314+ GET_CR0_INTO_RDI
23315+ bts $16,%rdi
23316+ SET_RDI_INTO_CR0
23317+#endif
23318+
23319+3:
23320+
23321+#ifdef CONFIG_PARAVIRT
23322+ PV_RESTORE_REGS(CLBR_RDI)
23323+#endif
23324+
23325+ popq %rbx
23326+ popq %rdi
23327+ pax_force_retaddr
23328+ retq
23329+4: ud2
23330+ENDPROC(pax_enter_kernel_user)
23331+
23332+ENTRY(pax_exit_kernel_user)
23333+ pushq %rdi
23334+ pushq %rbx
23335+
23336+#ifdef CONFIG_PARAVIRT
23337+ PV_SAVE_REGS(CLBR_RDI)
23338+#endif
23339+
23340+ GET_CR3_INTO_RDI
23341+ 661: jmp 1f
23342+ .pushsection .altinstr_replacement, "a"
23343+ 662: ASM_NOP2
23344+ .popsection
23345+ .pushsection .altinstructions, "a"
23346+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23347+ .popsection
23348+ cmp $0,%dil
23349+ jnz 3f
23350+ add $4097,%rdi
23351+ bts $63,%rdi
23352+ SET_RDI_INTO_CR3
23353+ jmp 2f
23354+1:
23355+
23356+ mov %rdi,%rbx
23357+
23358+#ifdef CONFIG_PAX_KERNEXEC
23359+ GET_CR0_INTO_RDI
23360+ btr $16,%rdi
23361+ jnc 3f
23362+ SET_RDI_INTO_CR0
23363+#endif
23364+
23365+ add $__START_KERNEL_map,%rbx
23366+ sub phys_base(%rip),%rbx
23367+
23368+#ifdef CONFIG_PARAVIRT
23369+ cmpl $0, pv_info+PARAVIRT_enabled
23370+ jz 1f
23371+ i = 0
23372+ .rept USER_PGD_PTRS
23373+ mov i*8(%rbx),%rsi
23374+ mov $0x67,%sil
23375+ lea i*8(%rbx),%rdi
23376+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23377+ i = i + 1
23378+ .endr
23379+ jmp 2f
23380+1:
23381+#endif
23382+
23383+ i = 0
23384+ .rept USER_PGD_PTRS
23385+ movb $0x67,i*8(%rbx)
23386+ i = i + 1
23387+ .endr
23388+2:
23389+
23390+#ifdef CONFIG_PARAVIRT
23391+ PV_RESTORE_REGS(CLBR_RDI)
23392+#endif
23393+
23394+ popq %rbx
23395+ popq %rdi
23396+ pax_force_retaddr
23397+ retq
23398+3: ud2
23399+ENDPROC(pax_exit_kernel_user)
23400+#endif
23401+
23402+ .macro pax_enter_kernel_nmi
23403+ pax_set_fptr_mask
23404+
23405+#ifdef CONFIG_PAX_KERNEXEC
23406+ GET_CR0_INTO_RDI
23407+ bts $16,%rdi
23408+ jc 110f
23409+ SET_RDI_INTO_CR0
23410+ or $2,%ebx
23411+110:
23412+#endif
23413+
23414+#ifdef CONFIG_PAX_MEMORY_UDEREF
23415+ 661: jmp 111f
23416+ .pushsection .altinstr_replacement, "a"
23417+ 662: ASM_NOP2
23418+ .popsection
23419+ .pushsection .altinstructions, "a"
23420+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23421+ .popsection
23422+ GET_CR3_INTO_RDI
23423+ cmp $0,%dil
23424+ jz 111f
23425+ sub $4097,%rdi
23426+ or $4,%ebx
23427+ bts $63,%rdi
23428+ SET_RDI_INTO_CR3
23429+ mov $__UDEREF_KERNEL_DS,%edi
23430+ mov %edi,%ss
23431+111:
23432+#endif
23433+ .endm
23434+
23435+ .macro pax_exit_kernel_nmi
23436+#ifdef CONFIG_PAX_KERNEXEC
23437+ btr $1,%ebx
23438+ jnc 110f
23439+ GET_CR0_INTO_RDI
23440+ btr $16,%rdi
23441+ SET_RDI_INTO_CR0
23442+110:
23443+#endif
23444+
23445+#ifdef CONFIG_PAX_MEMORY_UDEREF
23446+ btr $2,%ebx
23447+ jnc 111f
23448+ GET_CR3_INTO_RDI
23449+ add $4097,%rdi
23450+ bts $63,%rdi
23451+ SET_RDI_INTO_CR3
23452+ mov $__KERNEL_DS,%edi
23453+ mov %edi,%ss
23454+111:
23455+#endif
23456+ .endm
23457+
23458+ .macro pax_erase_kstack
23459+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23460+ call pax_erase_kstack
23461+#endif
23462+ .endm
23463+
23464+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23465+ENTRY(pax_erase_kstack)
23466+ pushq %rdi
23467+ pushq %rcx
23468+ pushq %rax
23469+ pushq %r11
23470+
23471+ GET_THREAD_INFO(%r11)
23472+ mov TI_lowest_stack(%r11), %rdi
23473+ mov $-0xBEEF, %rax
23474+ std
23475+
23476+1: mov %edi, %ecx
23477+ and $THREAD_SIZE_asm - 1, %ecx
23478+ shr $3, %ecx
23479+ repne scasq
23480+ jecxz 2f
23481+
23482+ cmp $2*8, %ecx
23483+ jc 2f
23484+
23485+ mov $2*8, %ecx
23486+ repe scasq
23487+ jecxz 2f
23488+ jne 1b
23489+
23490+2: cld
23491+ or $2*8, %rdi
23492+ mov %esp, %ecx
23493+ sub %edi, %ecx
23494+
23495+ cmp $THREAD_SIZE_asm, %rcx
23496+ jb 3f
23497+ ud2
23498+3:
23499+
23500+ shr $3, %ecx
23501+ rep stosq
23502+
23503+ mov TI_task_thread_sp0(%r11), %rdi
23504+ sub $256, %rdi
23505+ mov %rdi, TI_lowest_stack(%r11)
23506+
23507+ popq %r11
23508+ popq %rax
23509+ popq %rcx
23510+ popq %rdi
23511+ pax_force_retaddr
23512+ ret
23513+ENDPROC(pax_erase_kstack)
23514+#endif
23515
23516 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23517 #ifdef CONFIG_TRACE_IRQFLAGS
23518@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23519 .endm
23520
23521 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23522- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23523+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23524 jnc 1f
23525 TRACE_IRQS_ON_DEBUG
23526 1:
23527@@ -155,27 +582,6 @@ ENDPROC(native_usergs_sysret64)
23528 movq \tmp,R11+\offset(%rsp)
23529 .endm
23530
23531- .macro FAKE_STACK_FRAME child_rip
23532- /* push in order ss, rsp, eflags, cs, rip */
23533- xorl %eax, %eax
23534- pushq_cfi $__KERNEL_DS /* ss */
23535- /*CFI_REL_OFFSET ss,0*/
23536- pushq_cfi %rax /* rsp */
23537- CFI_REL_OFFSET rsp,0
23538- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23539- /*CFI_REL_OFFSET rflags,0*/
23540- pushq_cfi $__KERNEL_CS /* cs */
23541- /*CFI_REL_OFFSET cs,0*/
23542- pushq_cfi \child_rip /* rip */
23543- CFI_REL_OFFSET rip,0
23544- pushq_cfi %rax /* orig rax */
23545- .endm
23546-
23547- .macro UNFAKE_STACK_FRAME
23548- addq $8*6, %rsp
23549- CFI_ADJUST_CFA_OFFSET -(6*8)
23550- .endm
23551-
23552 /*
23553 * initial frame state for interrupts (and exceptions without error code)
23554 */
23555@@ -241,25 +647,26 @@ ENDPROC(native_usergs_sysret64)
23556 /* save partial stack frame */
23557 .macro SAVE_ARGS_IRQ
23558 cld
23559- /* start from rbp in pt_regs and jump over */
23560- movq_cfi rdi, (RDI-RBP)
23561- movq_cfi rsi, (RSI-RBP)
23562- movq_cfi rdx, (RDX-RBP)
23563- movq_cfi rcx, (RCX-RBP)
23564- movq_cfi rax, (RAX-RBP)
23565- movq_cfi r8, (R8-RBP)
23566- movq_cfi r9, (R9-RBP)
23567- movq_cfi r10, (R10-RBP)
23568- movq_cfi r11, (R11-RBP)
23569+ /* start from r15 in pt_regs and jump over */
23570+ movq_cfi rdi, RDI
23571+ movq_cfi rsi, RSI
23572+ movq_cfi rdx, RDX
23573+ movq_cfi rcx, RCX
23574+ movq_cfi rax, RAX
23575+ movq_cfi r8, R8
23576+ movq_cfi r9, R9
23577+ movq_cfi r10, R10
23578+ movq_cfi r11, R11
23579+ movq_cfi r12, R12
23580
23581 /* Save rbp so that we can unwind from get_irq_regs() */
23582- movq_cfi rbp, 0
23583+ movq_cfi rbp, RBP
23584
23585 /* Save previous stack value */
23586 movq %rsp, %rsi
23587
23588- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23589- testl $3, CS-RBP(%rsi)
23590+ movq %rsp,%rdi /* arg1 for handler */
23591+ testb $3, CS(%rsi)
23592 je 1f
23593 SWAPGS
23594 /*
23595@@ -279,6 +686,18 @@ ENDPROC(native_usergs_sysret64)
23596 0x06 /* DW_OP_deref */, \
23597 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23598 0x22 /* DW_OP_plus */
23599+
23600+#ifdef CONFIG_PAX_MEMORY_UDEREF
23601+ testb $3, CS(%rdi)
23602+ jnz 1f
23603+ pax_enter_kernel
23604+ jmp 2f
23605+1: pax_enter_kernel_user
23606+2:
23607+#else
23608+ pax_enter_kernel
23609+#endif
23610+
23611 /* We entered an interrupt context - irqs are off: */
23612 TRACE_IRQS_OFF
23613 .endm
23614@@ -308,9 +727,52 @@ ENTRY(save_paranoid)
23615 js 1f /* negative -> in kernel */
23616 SWAPGS
23617 xorl %ebx,%ebx
23618-1: ret
23619+1:
23620+#ifdef CONFIG_PAX_MEMORY_UDEREF
23621+ testb $3, CS+8(%rsp)
23622+ jnz 1f
23623+ pax_enter_kernel
23624+ jmp 2f
23625+1: pax_enter_kernel_user
23626+2:
23627+#else
23628+ pax_enter_kernel
23629+#endif
23630+ pax_force_retaddr
23631+ ret
23632 CFI_ENDPROC
23633-END(save_paranoid)
23634+ENDPROC(save_paranoid)
23635+
23636+ENTRY(save_paranoid_nmi)
23637+ XCPT_FRAME 1 RDI+8
23638+ cld
23639+ movq_cfi rdi, RDI+8
23640+ movq_cfi rsi, RSI+8
23641+ movq_cfi rdx, RDX+8
23642+ movq_cfi rcx, RCX+8
23643+ movq_cfi rax, RAX+8
23644+ movq_cfi r8, R8+8
23645+ movq_cfi r9, R9+8
23646+ movq_cfi r10, R10+8
23647+ movq_cfi r11, R11+8
23648+ movq_cfi rbx, RBX+8
23649+ movq_cfi rbp, RBP+8
23650+ movq_cfi r12, R12+8
23651+ movq_cfi r13, R13+8
23652+ movq_cfi r14, R14+8
23653+ movq_cfi r15, R15+8
23654+ movl $1,%ebx
23655+ movl $MSR_GS_BASE,%ecx
23656+ rdmsr
23657+ testl %edx,%edx
23658+ js 1f /* negative -> in kernel */
23659+ SWAPGS
23660+ xorl %ebx,%ebx
23661+1: pax_enter_kernel_nmi
23662+ pax_force_retaddr
23663+ ret
23664+ CFI_ENDPROC
23665+ENDPROC(save_paranoid_nmi)
23666
23667 /*
23668 * A newly forked process directly context switches into this address.
23669@@ -331,7 +793,7 @@ ENTRY(ret_from_fork)
23670
23671 RESTORE_REST
23672
23673- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23674+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23675 jz 1f
23676
23677 /*
23678@@ -344,15 +806,13 @@ ENTRY(ret_from_fork)
23679 jmp int_ret_from_sys_call
23680
23681 1:
23682- subq $REST_SKIP, %rsp # leave space for volatiles
23683- CFI_ADJUST_CFA_OFFSET REST_SKIP
23684 movq %rbp, %rdi
23685 call *%rbx
23686 movl $0, RAX(%rsp)
23687 RESTORE_REST
23688 jmp int_ret_from_sys_call
23689 CFI_ENDPROC
23690-END(ret_from_fork)
23691+ENDPROC(ret_from_fork)
23692
23693 /*
23694 * System call entry. Up to 6 arguments in registers are supported.
23695@@ -389,7 +849,7 @@ END(ret_from_fork)
23696 ENTRY(system_call)
23697 CFI_STARTPROC simple
23698 CFI_SIGNAL_FRAME
23699- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23700+ CFI_DEF_CFA rsp,0
23701 CFI_REGISTER rip,rcx
23702 /*CFI_REGISTER rflags,r11*/
23703 SWAPGS_UNSAFE_STACK
23704@@ -402,16 +862,23 @@ GLOBAL(system_call_after_swapgs)
23705
23706 movq %rsp,PER_CPU_VAR(old_rsp)
23707 movq PER_CPU_VAR(kernel_stack),%rsp
23708+ SAVE_ARGS 8*6, 0, rax_enosys=1
23709+ pax_enter_kernel_user
23710+
23711+#ifdef CONFIG_PAX_RANDKSTACK
23712+ pax_erase_kstack
23713+#endif
23714+
23715 /*
23716 * No need to follow this irqs off/on section - it's straight
23717 * and short:
23718 */
23719 ENABLE_INTERRUPTS(CLBR_NONE)
23720- SAVE_ARGS 8, 0, rax_enosys=1
23721 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23722 movq %rcx,RIP-ARGOFFSET(%rsp)
23723 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23724- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23725+ GET_THREAD_INFO(%rcx)
23726+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23727 jnz tracesys
23728 system_call_fastpath:
23729 #if __SYSCALL_MASK == ~0
23730@@ -435,10 +902,13 @@ sysret_check:
23731 LOCKDEP_SYS_EXIT
23732 DISABLE_INTERRUPTS(CLBR_NONE)
23733 TRACE_IRQS_OFF
23734- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23735+ GET_THREAD_INFO(%rcx)
23736+ movl TI_flags(%rcx),%edx
23737 andl %edi,%edx
23738 jnz sysret_careful
23739 CFI_REMEMBER_STATE
23740+ pax_exit_kernel_user
23741+ pax_erase_kstack
23742 /*
23743 * sysretq will re-enable interrupts:
23744 */
23745@@ -497,12 +967,15 @@ sysret_audit:
23746
23747 /* Do syscall tracing */
23748 tracesys:
23749- leaq -REST_SKIP(%rsp), %rdi
23750+ movq %rsp, %rdi
23751 movq $AUDIT_ARCH_X86_64, %rsi
23752 call syscall_trace_enter_phase1
23753 test %rax, %rax
23754 jnz tracesys_phase2 /* if needed, run the slow path */
23755- LOAD_ARGS 0 /* else restore clobbered regs */
23756+
23757+ pax_erase_kstack
23758+
23759+ LOAD_ARGS /* else restore clobbered regs */
23760 jmp system_call_fastpath /* and return to the fast path */
23761
23762 tracesys_phase2:
23763@@ -513,12 +986,14 @@ tracesys_phase2:
23764 movq %rax,%rdx
23765 call syscall_trace_enter_phase2
23766
23767+ pax_erase_kstack
23768+
23769 /*
23770 * Reload arg registers from stack in case ptrace changed them.
23771 * We don't reload %rax because syscall_trace_entry_phase2() returned
23772 * the value it wants us to use in the table lookup.
23773 */
23774- LOAD_ARGS ARGOFFSET, 1
23775+ LOAD_ARGS 1
23776 RESTORE_REST
23777 #if __SYSCALL_MASK == ~0
23778 cmpq $__NR_syscall_max,%rax
23779@@ -548,7 +1023,9 @@ GLOBAL(int_with_check)
23780 andl %edi,%edx
23781 jnz int_careful
23782 andl $~TS_COMPAT,TI_status(%rcx)
23783- jmp retint_swapgs
23784+ pax_exit_kernel_user
23785+ pax_erase_kstack
23786+ jmp retint_swapgs_pax
23787
23788 /* Either reschedule or signal or syscall exit tracking needed. */
23789 /* First do a reschedule test. */
23790@@ -594,7 +1071,7 @@ int_restore_rest:
23791 TRACE_IRQS_OFF
23792 jmp int_with_check
23793 CFI_ENDPROC
23794-END(system_call)
23795+ENDPROC(system_call)
23796
23797 .macro FORK_LIKE func
23798 ENTRY(stub_\func)
23799@@ -607,9 +1084,10 @@ ENTRY(stub_\func)
23800 DEFAULT_FRAME 0 8 /* offset 8: return address */
23801 call sys_\func
23802 RESTORE_TOP_OF_STACK %r11, 8
23803- ret $REST_SKIP /* pop extended registers */
23804+ pax_force_retaddr
23805+ ret
23806 CFI_ENDPROC
23807-END(stub_\func)
23808+ENDPROC(stub_\func)
23809 .endm
23810
23811 .macro FIXED_FRAME label,func
23812@@ -619,9 +1097,10 @@ ENTRY(\label)
23813 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23814 call \func
23815 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23816+ pax_force_retaddr
23817 ret
23818 CFI_ENDPROC
23819-END(\label)
23820+ENDPROC(\label)
23821 .endm
23822
23823 FORK_LIKE clone
23824@@ -629,19 +1108,6 @@ END(\label)
23825 FORK_LIKE vfork
23826 FIXED_FRAME stub_iopl, sys_iopl
23827
23828-ENTRY(ptregscall_common)
23829- DEFAULT_FRAME 1 8 /* offset 8: return address */
23830- RESTORE_TOP_OF_STACK %r11, 8
23831- movq_cfi_restore R15+8, r15
23832- movq_cfi_restore R14+8, r14
23833- movq_cfi_restore R13+8, r13
23834- movq_cfi_restore R12+8, r12
23835- movq_cfi_restore RBP+8, rbp
23836- movq_cfi_restore RBX+8, rbx
23837- ret $REST_SKIP /* pop extended registers */
23838- CFI_ENDPROC
23839-END(ptregscall_common)
23840-
23841 ENTRY(stub_execve)
23842 CFI_STARTPROC
23843 addq $8, %rsp
23844@@ -653,7 +1119,7 @@ ENTRY(stub_execve)
23845 RESTORE_REST
23846 jmp int_ret_from_sys_call
23847 CFI_ENDPROC
23848-END(stub_execve)
23849+ENDPROC(stub_execve)
23850
23851 ENTRY(stub_execveat)
23852 CFI_STARTPROC
23853@@ -667,7 +1133,7 @@ ENTRY(stub_execveat)
23854 RESTORE_REST
23855 jmp int_ret_from_sys_call
23856 CFI_ENDPROC
23857-END(stub_execveat)
23858+ENDPROC(stub_execveat)
23859
23860 /*
23861 * sigreturn is special because it needs to restore all registers on return.
23862@@ -684,7 +1150,7 @@ ENTRY(stub_rt_sigreturn)
23863 RESTORE_REST
23864 jmp int_ret_from_sys_call
23865 CFI_ENDPROC
23866-END(stub_rt_sigreturn)
23867+ENDPROC(stub_rt_sigreturn)
23868
23869 #ifdef CONFIG_X86_X32_ABI
23870 ENTRY(stub_x32_rt_sigreturn)
23871@@ -698,7 +1164,7 @@ ENTRY(stub_x32_rt_sigreturn)
23872 RESTORE_REST
23873 jmp int_ret_from_sys_call
23874 CFI_ENDPROC
23875-END(stub_x32_rt_sigreturn)
23876+ENDPROC(stub_x32_rt_sigreturn)
23877
23878 ENTRY(stub_x32_execve)
23879 CFI_STARTPROC
23880@@ -763,7 +1229,7 @@ vector=vector+1
23881 2: jmp common_interrupt
23882 .endr
23883 CFI_ENDPROC
23884-END(irq_entries_start)
23885+ENDPROC(irq_entries_start)
23886
23887 .previous
23888 END(interrupt)
23889@@ -780,8 +1246,8 @@ END(interrupt)
23890 /* 0(%rsp): ~(interrupt number) */
23891 .macro interrupt func
23892 /* reserve pt_regs for scratch regs and rbp */
23893- subq $ORIG_RAX-RBP, %rsp
23894- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23895+ subq $ORIG_RAX, %rsp
23896+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23897 SAVE_ARGS_IRQ
23898 call \func
23899 .endm
23900@@ -804,14 +1270,14 @@ ret_from_intr:
23901
23902 /* Restore saved previous stack */
23903 popq %rsi
23904- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23905- leaq ARGOFFSET-RBP(%rsi), %rsp
23906+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23907+ movq %rsi, %rsp
23908 CFI_DEF_CFA_REGISTER rsp
23909- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23910+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23911
23912 exit_intr:
23913 GET_THREAD_INFO(%rcx)
23914- testl $3,CS-ARGOFFSET(%rsp)
23915+ testb $3,CS-ARGOFFSET(%rsp)
23916 je retint_kernel
23917
23918 /* Interrupt came from user space */
23919@@ -833,12 +1299,35 @@ retint_swapgs: /* return to user-space */
23920 * The iretq could re-enable interrupts:
23921 */
23922 DISABLE_INTERRUPTS(CLBR_ANY)
23923+ pax_exit_kernel_user
23924+retint_swapgs_pax:
23925 TRACE_IRQS_IRETQ
23926 SWAPGS
23927 jmp restore_args
23928
23929 retint_restore_args: /* return to kernel space */
23930 DISABLE_INTERRUPTS(CLBR_ANY)
23931+ pax_exit_kernel
23932+
23933+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
23934+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
23935+ * namely calling EFI runtime services with a phys mapping. We're
23936+ * starting off with NOPs and patch in the real instrumentation
23937+ * (BTS/OR) before starting any userland process; even before starting
23938+ * up the APs.
23939+ */
23940+ .pushsection .altinstr_replacement, "a"
23941+ 601: pax_force_retaddr (RIP-ARGOFFSET)
23942+ 602:
23943+ .popsection
23944+ 603: .fill 602b-601b, 1, 0x90
23945+ .pushsection .altinstructions, "a"
23946+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
23947+ .popsection
23948+#else
23949+ pax_force_retaddr (RIP-ARGOFFSET)
23950+#endif
23951+
23952 /*
23953 * The iretq could re-enable interrupts:
23954 */
23955@@ -876,15 +1365,15 @@ native_irq_return_ldt:
23956 SWAPGS
23957 movq PER_CPU_VAR(espfix_waddr),%rdi
23958 movq %rax,(0*8)(%rdi) /* RAX */
23959- movq (2*8)(%rsp),%rax /* RIP */
23960+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
23961 movq %rax,(1*8)(%rdi)
23962- movq (3*8)(%rsp),%rax /* CS */
23963+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
23964 movq %rax,(2*8)(%rdi)
23965- movq (4*8)(%rsp),%rax /* RFLAGS */
23966+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
23967 movq %rax,(3*8)(%rdi)
23968- movq (6*8)(%rsp),%rax /* SS */
23969+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
23970 movq %rax,(5*8)(%rdi)
23971- movq (5*8)(%rsp),%rax /* RSP */
23972+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
23973 movq %rax,(4*8)(%rdi)
23974 andl $0xffff0000,%eax
23975 popq_cfi %rdi
23976@@ -938,7 +1427,7 @@ ENTRY(retint_kernel)
23977 jmp exit_intr
23978 #endif
23979 CFI_ENDPROC
23980-END(common_interrupt)
23981+ENDPROC(common_interrupt)
23982
23983 /*
23984 * APIC interrupts.
23985@@ -952,7 +1441,7 @@ ENTRY(\sym)
23986 interrupt \do_sym
23987 jmp ret_from_intr
23988 CFI_ENDPROC
23989-END(\sym)
23990+ENDPROC(\sym)
23991 .endm
23992
23993 #ifdef CONFIG_TRACING
23994@@ -1025,7 +1514,7 @@ apicinterrupt IRQ_WORK_VECTOR \
23995 /*
23996 * Exception entry points.
23997 */
23998-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23999+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
24000
24001 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
24002 ENTRY(\sym)
24003@@ -1076,6 +1565,12 @@ ENTRY(\sym)
24004 .endif
24005
24006 .if \shift_ist != -1
24007+#ifdef CONFIG_SMP
24008+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24009+ lea init_tss(%r13), %r13
24010+#else
24011+ lea init_tss(%rip), %r13
24012+#endif
24013 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24014 .endif
24015
24016@@ -1092,7 +1587,7 @@ ENTRY(\sym)
24017 .endif
24018
24019 CFI_ENDPROC
24020-END(\sym)
24021+ENDPROC(\sym)
24022 .endm
24023
24024 #ifdef CONFIG_TRACING
24025@@ -1133,9 +1628,10 @@ gs_change:
24026 2: mfence /* workaround */
24027 SWAPGS
24028 popfq_cfi
24029+ pax_force_retaddr
24030 ret
24031 CFI_ENDPROC
24032-END(native_load_gs_index)
24033+ENDPROC(native_load_gs_index)
24034
24035 _ASM_EXTABLE(gs_change,bad_gs)
24036 .section .fixup,"ax"
24037@@ -1163,9 +1659,10 @@ ENTRY(do_softirq_own_stack)
24038 CFI_DEF_CFA_REGISTER rsp
24039 CFI_ADJUST_CFA_OFFSET -8
24040 decl PER_CPU_VAR(irq_count)
24041+ pax_force_retaddr
24042 ret
24043 CFI_ENDPROC
24044-END(do_softirq_own_stack)
24045+ENDPROC(do_softirq_own_stack)
24046
24047 #ifdef CONFIG_XEN
24048 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24049@@ -1203,7 +1700,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24050 decl PER_CPU_VAR(irq_count)
24051 jmp error_exit
24052 CFI_ENDPROC
24053-END(xen_do_hypervisor_callback)
24054+ENDPROC(xen_do_hypervisor_callback)
24055
24056 /*
24057 * Hypervisor uses this for application faults while it executes.
24058@@ -1262,7 +1759,7 @@ ENTRY(xen_failsafe_callback)
24059 SAVE_ALL
24060 jmp error_exit
24061 CFI_ENDPROC
24062-END(xen_failsafe_callback)
24063+ENDPROC(xen_failsafe_callback)
24064
24065 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24066 xen_hvm_callback_vector xen_evtchn_do_upcall
24067@@ -1309,18 +1806,33 @@ ENTRY(paranoid_exit)
24068 DEFAULT_FRAME
24069 DISABLE_INTERRUPTS(CLBR_NONE)
24070 TRACE_IRQS_OFF_DEBUG
24071- testl %ebx,%ebx /* swapgs needed? */
24072+ testl $1,%ebx /* swapgs needed? */
24073 jnz paranoid_restore
24074- testl $3,CS(%rsp)
24075+ testb $3,CS(%rsp)
24076 jnz paranoid_userspace
24077+#ifdef CONFIG_PAX_MEMORY_UDEREF
24078+ pax_exit_kernel
24079+ TRACE_IRQS_IRETQ 0
24080+ SWAPGS_UNSAFE_STACK
24081+ RESTORE_ALL 8
24082+ pax_force_retaddr_bts
24083+ jmp irq_return
24084+#endif
24085 paranoid_swapgs:
24086+#ifdef CONFIG_PAX_MEMORY_UDEREF
24087+ pax_exit_kernel_user
24088+#else
24089+ pax_exit_kernel
24090+#endif
24091 TRACE_IRQS_IRETQ 0
24092 SWAPGS_UNSAFE_STACK
24093 RESTORE_ALL 8
24094 jmp irq_return
24095 paranoid_restore:
24096+ pax_exit_kernel
24097 TRACE_IRQS_IRETQ_DEBUG 0
24098 RESTORE_ALL 8
24099+ pax_force_retaddr_bts
24100 jmp irq_return
24101 paranoid_userspace:
24102 GET_THREAD_INFO(%rcx)
24103@@ -1349,7 +1861,7 @@ paranoid_schedule:
24104 TRACE_IRQS_OFF
24105 jmp paranoid_userspace
24106 CFI_ENDPROC
24107-END(paranoid_exit)
24108+ENDPROC(paranoid_exit)
24109
24110 /*
24111 * Exception entry point. This expects an error code/orig_rax on the stack.
24112@@ -1376,12 +1888,23 @@ ENTRY(error_entry)
24113 movq %r14, R14+8(%rsp)
24114 movq %r15, R15+8(%rsp)
24115 xorl %ebx,%ebx
24116- testl $3,CS+8(%rsp)
24117+ testb $3,CS+8(%rsp)
24118 je error_kernelspace
24119 error_swapgs:
24120 SWAPGS
24121 error_sti:
24122+#ifdef CONFIG_PAX_MEMORY_UDEREF
24123+ testb $3, CS+8(%rsp)
24124+ jnz 1f
24125+ pax_enter_kernel
24126+ jmp 2f
24127+1: pax_enter_kernel_user
24128+2:
24129+#else
24130+ pax_enter_kernel
24131+#endif
24132 TRACE_IRQS_OFF
24133+ pax_force_retaddr
24134 ret
24135
24136 /*
24137@@ -1416,7 +1939,7 @@ error_bad_iret:
24138 decl %ebx /* Return to usergs */
24139 jmp error_sti
24140 CFI_ENDPROC
24141-END(error_entry)
24142+ENDPROC(error_entry)
24143
24144
24145 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24146@@ -1427,7 +1950,7 @@ ENTRY(error_exit)
24147 DISABLE_INTERRUPTS(CLBR_NONE)
24148 TRACE_IRQS_OFF
24149 GET_THREAD_INFO(%rcx)
24150- testl %eax,%eax
24151+ testl $1,%eax
24152 jne retint_kernel
24153 LOCKDEP_SYS_EXIT_IRQ
24154 movl TI_flags(%rcx),%edx
24155@@ -1436,7 +1959,7 @@ ENTRY(error_exit)
24156 jnz retint_careful
24157 jmp retint_swapgs
24158 CFI_ENDPROC
24159-END(error_exit)
24160+ENDPROC(error_exit)
24161
24162 /*
24163 * Test if a given stack is an NMI stack or not.
24164@@ -1494,9 +2017,11 @@ ENTRY(nmi)
24165 * If %cs was not the kernel segment, then the NMI triggered in user
24166 * space, which means it is definitely not nested.
24167 */
24168+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24169+ je 1f
24170 cmpl $__KERNEL_CS, 16(%rsp)
24171 jne first_nmi
24172-
24173+1:
24174 /*
24175 * Check the special variable on the stack to see if NMIs are
24176 * executing.
24177@@ -1530,8 +2055,7 @@ nested_nmi:
24178
24179 1:
24180 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24181- leaq -1*8(%rsp), %rdx
24182- movq %rdx, %rsp
24183+ subq $8, %rsp
24184 CFI_ADJUST_CFA_OFFSET 1*8
24185 leaq -10*8(%rsp), %rdx
24186 pushq_cfi $__KERNEL_DS
24187@@ -1549,6 +2073,7 @@ nested_nmi_out:
24188 CFI_RESTORE rdx
24189
24190 /* No need to check faults here */
24191+# pax_force_retaddr_bts
24192 INTERRUPT_RETURN
24193
24194 CFI_RESTORE_STATE
24195@@ -1645,13 +2170,13 @@ end_repeat_nmi:
24196 subq $ORIG_RAX-R15, %rsp
24197 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24198 /*
24199- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24200+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24201 * as we should not be calling schedule in NMI context.
24202 * Even with normal interrupts enabled. An NMI should not be
24203 * setting NEED_RESCHED or anything that normal interrupts and
24204 * exceptions might do.
24205 */
24206- call save_paranoid
24207+ call save_paranoid_nmi
24208 DEFAULT_FRAME 0
24209
24210 /*
24211@@ -1661,9 +2186,9 @@ end_repeat_nmi:
24212 * NMI itself takes a page fault, the page fault that was preempted
24213 * will read the information from the NMI page fault and not the
24214 * origin fault. Save it off and restore it if it changes.
24215- * Use the r12 callee-saved register.
24216+ * Use the r13 callee-saved register.
24217 */
24218- movq %cr2, %r12
24219+ movq %cr2, %r13
24220
24221 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24222 movq %rsp,%rdi
24223@@ -1672,29 +2197,34 @@ end_repeat_nmi:
24224
24225 /* Did the NMI take a page fault? Restore cr2 if it did */
24226 movq %cr2, %rcx
24227- cmpq %rcx, %r12
24228+ cmpq %rcx, %r13
24229 je 1f
24230- movq %r12, %cr2
24231+ movq %r13, %cr2
24232 1:
24233
24234- testl %ebx,%ebx /* swapgs needed? */
24235+ testl $1,%ebx /* swapgs needed? */
24236 jnz nmi_restore
24237 nmi_swapgs:
24238 SWAPGS_UNSAFE_STACK
24239 nmi_restore:
24240+ pax_exit_kernel_nmi
24241 /* Pop the extra iret frame at once */
24242 RESTORE_ALL 6*8
24243+ testb $3, 8(%rsp)
24244+ jnz 1f
24245+ pax_force_retaddr_bts
24246+1:
24247
24248 /* Clear the NMI executing stack variable */
24249 movq $0, 5*8(%rsp)
24250 jmp irq_return
24251 CFI_ENDPROC
24252-END(nmi)
24253+ENDPROC(nmi)
24254
24255 ENTRY(ignore_sysret)
24256 CFI_STARTPROC
24257 mov $-ENOSYS,%eax
24258 sysret
24259 CFI_ENDPROC
24260-END(ignore_sysret)
24261+ENDPROC(ignore_sysret)
24262
24263diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24264index f5d0730..5bce89c 100644
24265--- a/arch/x86/kernel/espfix_64.c
24266+++ b/arch/x86/kernel/espfix_64.c
24267@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24268 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24269 static void *espfix_pages[ESPFIX_MAX_PAGES];
24270
24271-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24272- __aligned(PAGE_SIZE);
24273+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24274
24275 static unsigned int page_random, slot_random;
24276
24277@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24278 void __init init_espfix_bsp(void)
24279 {
24280 pgd_t *pgd_p;
24281+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24282
24283 /* Install the espfix pud into the kernel page directory */
24284- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24285+ pgd_p = &init_level4_pgt[index];
24286 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24287
24288+#ifdef CONFIG_PAX_PER_CPU_PGD
24289+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24290+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24291+#endif
24292+
24293 /* Randomize the locations */
24294 init_espfix_random();
24295
24296@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24297 set_pte(&pte_p[n*PTE_STRIDE], pte);
24298
24299 /* Job is done for this CPU and any CPU which shares this page */
24300- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24301+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24302
24303 unlock_done:
24304 mutex_unlock(&espfix_init_mutex);
24305diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24306index 8b7b0a5..2395f29 100644
24307--- a/arch/x86/kernel/ftrace.c
24308+++ b/arch/x86/kernel/ftrace.c
24309@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24310 * kernel identity mapping to modify code.
24311 */
24312 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24313- ip = (unsigned long)__va(__pa_symbol(ip));
24314+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24315
24316 return ip;
24317 }
24318@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24319 {
24320 unsigned char replaced[MCOUNT_INSN_SIZE];
24321
24322+ ip = ktla_ktva(ip);
24323+
24324 /*
24325 * Note: Due to modules and __init, code can
24326 * disappear and change, we need to protect against faulting
24327@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24328 unsigned char old[MCOUNT_INSN_SIZE];
24329 int ret;
24330
24331- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24332+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24333
24334 ftrace_update_func = ip;
24335 /* Make sure the breakpoints see the ftrace_update_func update */
24336@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24337 unsigned char replaced[MCOUNT_INSN_SIZE];
24338 unsigned char brk = BREAKPOINT_INSTRUCTION;
24339
24340- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24341+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24342 return -EFAULT;
24343
24344 /* Make sure it is what we expect it to be */
24345diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24346index eda1a86..8f6df48 100644
24347--- a/arch/x86/kernel/head64.c
24348+++ b/arch/x86/kernel/head64.c
24349@@ -67,12 +67,12 @@ again:
24350 pgd = *pgd_p;
24351
24352 /*
24353- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24354- * critical -- __PAGE_OFFSET would point us back into the dynamic
24355+ * The use of __early_va rather than __va here is critical:
24356+ * __va would point us back into the dynamic
24357 * range and we might end up looping forever...
24358 */
24359 if (pgd)
24360- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24361+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24362 else {
24363 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24364 reset_early_page_tables();
24365@@ -82,13 +82,13 @@ again:
24366 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24367 for (i = 0; i < PTRS_PER_PUD; i++)
24368 pud_p[i] = 0;
24369- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24370+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24371 }
24372 pud_p += pud_index(address);
24373 pud = *pud_p;
24374
24375 if (pud)
24376- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24377+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24378 else {
24379 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24380 reset_early_page_tables();
24381@@ -98,7 +98,7 @@ again:
24382 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24383 for (i = 0; i < PTRS_PER_PMD; i++)
24384 pmd_p[i] = 0;
24385- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24386+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24387 }
24388 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24389 pmd_p[pmd_index(address)] = pmd;
24390@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24391 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24392 early_printk("Kernel alive\n");
24393
24394- clear_page(init_level4_pgt);
24395 /* set init_level4_pgt kernel high mapping*/
24396 init_level4_pgt[511] = early_level4_pgt[511];
24397
24398diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24399index f36bd42..0ab4474 100644
24400--- a/arch/x86/kernel/head_32.S
24401+++ b/arch/x86/kernel/head_32.S
24402@@ -26,6 +26,12 @@
24403 /* Physical address */
24404 #define pa(X) ((X) - __PAGE_OFFSET)
24405
24406+#ifdef CONFIG_PAX_KERNEXEC
24407+#define ta(X) (X)
24408+#else
24409+#define ta(X) ((X) - __PAGE_OFFSET)
24410+#endif
24411+
24412 /*
24413 * References to members of the new_cpu_data structure.
24414 */
24415@@ -55,11 +61,7 @@
24416 * and small than max_low_pfn, otherwise will waste some page table entries
24417 */
24418
24419-#if PTRS_PER_PMD > 1
24420-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24421-#else
24422-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24423-#endif
24424+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24425
24426 /* Number of possible pages in the lowmem region */
24427 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24428@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24429 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24430
24431 /*
24432+ * Real beginning of normal "text" segment
24433+ */
24434+ENTRY(stext)
24435+ENTRY(_stext)
24436+
24437+/*
24438 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24439 * %esi points to the real-mode code as a 32-bit pointer.
24440 * CS and DS must be 4 GB flat segments, but we don't depend on
24441@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24442 * can.
24443 */
24444 __HEAD
24445+
24446+#ifdef CONFIG_PAX_KERNEXEC
24447+ jmp startup_32
24448+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24449+.fill PAGE_SIZE-5,1,0xcc
24450+#endif
24451+
24452 ENTRY(startup_32)
24453 movl pa(stack_start),%ecx
24454
24455@@ -106,6 +121,59 @@ ENTRY(startup_32)
24456 2:
24457 leal -__PAGE_OFFSET(%ecx),%esp
24458
24459+#ifdef CONFIG_SMP
24460+ movl $pa(cpu_gdt_table),%edi
24461+ movl $__per_cpu_load,%eax
24462+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24463+ rorl $16,%eax
24464+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24465+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24466+ movl $__per_cpu_end - 1,%eax
24467+ subl $__per_cpu_start,%eax
24468+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24469+#endif
24470+
24471+#ifdef CONFIG_PAX_MEMORY_UDEREF
24472+ movl $NR_CPUS,%ecx
24473+ movl $pa(cpu_gdt_table),%edi
24474+1:
24475+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24476+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24477+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24478+ addl $PAGE_SIZE_asm,%edi
24479+ loop 1b
24480+#endif
24481+
24482+#ifdef CONFIG_PAX_KERNEXEC
24483+ movl $pa(boot_gdt),%edi
24484+ movl $__LOAD_PHYSICAL_ADDR,%eax
24485+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24486+ rorl $16,%eax
24487+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24488+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24489+ rorl $16,%eax
24490+
24491+ ljmp $(__BOOT_CS),$1f
24492+1:
24493+
24494+ movl $NR_CPUS,%ecx
24495+ movl $pa(cpu_gdt_table),%edi
24496+ addl $__PAGE_OFFSET,%eax
24497+1:
24498+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24499+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24500+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24501+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24502+ rorl $16,%eax
24503+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24504+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24505+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24506+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24507+ rorl $16,%eax
24508+ addl $PAGE_SIZE_asm,%edi
24509+ loop 1b
24510+#endif
24511+
24512 /*
24513 * Clear BSS first so that there are no surprises...
24514 */
24515@@ -201,8 +269,11 @@ ENTRY(startup_32)
24516 movl %eax, pa(max_pfn_mapped)
24517
24518 /* Do early initialization of the fixmap area */
24519- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24520- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24521+#ifdef CONFIG_COMPAT_VDSO
24522+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24523+#else
24524+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24525+#endif
24526 #else /* Not PAE */
24527
24528 page_pde_offset = (__PAGE_OFFSET >> 20);
24529@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24530 movl %eax, pa(max_pfn_mapped)
24531
24532 /* Do early initialization of the fixmap area */
24533- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24534- movl %eax,pa(initial_page_table+0xffc)
24535+#ifdef CONFIG_COMPAT_VDSO
24536+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24537+#else
24538+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24539+#endif
24540 #endif
24541
24542 #ifdef CONFIG_PARAVIRT
24543@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24544 cmpl $num_subarch_entries, %eax
24545 jae bad_subarch
24546
24547- movl pa(subarch_entries)(,%eax,4), %eax
24548- subl $__PAGE_OFFSET, %eax
24549- jmp *%eax
24550+ jmp *pa(subarch_entries)(,%eax,4)
24551
24552 bad_subarch:
24553 WEAK(lguest_entry)
24554@@ -261,10 +333,10 @@ WEAK(xen_entry)
24555 __INITDATA
24556
24557 subarch_entries:
24558- .long default_entry /* normal x86/PC */
24559- .long lguest_entry /* lguest hypervisor */
24560- .long xen_entry /* Xen hypervisor */
24561- .long default_entry /* Moorestown MID */
24562+ .long ta(default_entry) /* normal x86/PC */
24563+ .long ta(lguest_entry) /* lguest hypervisor */
24564+ .long ta(xen_entry) /* Xen hypervisor */
24565+ .long ta(default_entry) /* Moorestown MID */
24566 num_subarch_entries = (. - subarch_entries) / 4
24567 .previous
24568 #else
24569@@ -354,6 +426,7 @@ default_entry:
24570 movl pa(mmu_cr4_features),%eax
24571 movl %eax,%cr4
24572
24573+#ifdef CONFIG_X86_PAE
24574 testb $X86_CR4_PAE, %al # check if PAE is enabled
24575 jz enable_paging
24576
24577@@ -382,6 +455,9 @@ default_entry:
24578 /* Make changes effective */
24579 wrmsr
24580
24581+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24582+#endif
24583+
24584 enable_paging:
24585
24586 /*
24587@@ -449,14 +525,20 @@ is486:
24588 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24589 movl %eax,%ss # after changing gdt.
24590
24591- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24592+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24593 movl %eax,%ds
24594 movl %eax,%es
24595
24596 movl $(__KERNEL_PERCPU), %eax
24597 movl %eax,%fs # set this cpu's percpu
24598
24599+#ifdef CONFIG_CC_STACKPROTECTOR
24600 movl $(__KERNEL_STACK_CANARY),%eax
24601+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24602+ movl $(__USER_DS),%eax
24603+#else
24604+ xorl %eax,%eax
24605+#endif
24606 movl %eax,%gs
24607
24608 xorl %eax,%eax # Clear LDT
24609@@ -512,8 +594,11 @@ setup_once:
24610 * relocation. Manually set base address in stack canary
24611 * segment descriptor.
24612 */
24613- movl $gdt_page,%eax
24614+ movl $cpu_gdt_table,%eax
24615 movl $stack_canary,%ecx
24616+#ifdef CONFIG_SMP
24617+ addl $__per_cpu_load,%ecx
24618+#endif
24619 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24620 shrl $16, %ecx
24621 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24622@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24623 cmpl $2,(%esp) # X86_TRAP_NMI
24624 je is_nmi # Ignore NMI
24625
24626- cmpl $2,%ss:early_recursion_flag
24627+ cmpl $1,%ss:early_recursion_flag
24628 je hlt_loop
24629 incl %ss:early_recursion_flag
24630
24631@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24632 pushl (20+6*4)(%esp) /* trapno */
24633 pushl $fault_msg
24634 call printk
24635-#endif
24636 call dump_stack
24637+#endif
24638 hlt_loop:
24639 hlt
24640 jmp hlt_loop
24641@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24642 /* This is the default interrupt "handler" :-) */
24643 ALIGN
24644 ignore_int:
24645- cld
24646 #ifdef CONFIG_PRINTK
24647+ cmpl $2,%ss:early_recursion_flag
24648+ je hlt_loop
24649+ incl %ss:early_recursion_flag
24650+ cld
24651 pushl %eax
24652 pushl %ecx
24653 pushl %edx
24654@@ -617,9 +705,6 @@ ignore_int:
24655 movl $(__KERNEL_DS),%eax
24656 movl %eax,%ds
24657 movl %eax,%es
24658- cmpl $2,early_recursion_flag
24659- je hlt_loop
24660- incl early_recursion_flag
24661 pushl 16(%esp)
24662 pushl 24(%esp)
24663 pushl 32(%esp)
24664@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24665 /*
24666 * BSS section
24667 */
24668-__PAGE_ALIGNED_BSS
24669- .align PAGE_SIZE
24670 #ifdef CONFIG_X86_PAE
24671+.section .initial_pg_pmd,"a",@progbits
24672 initial_pg_pmd:
24673 .fill 1024*KPMDS,4,0
24674 #else
24675+.section .initial_page_table,"a",@progbits
24676 ENTRY(initial_page_table)
24677 .fill 1024,4,0
24678 #endif
24679+.section .initial_pg_fixmap,"a",@progbits
24680 initial_pg_fixmap:
24681 .fill 1024,4,0
24682+.section .empty_zero_page,"a",@progbits
24683 ENTRY(empty_zero_page)
24684 .fill 4096,1,0
24685+.section .swapper_pg_dir,"a",@progbits
24686 ENTRY(swapper_pg_dir)
24687+#ifdef CONFIG_X86_PAE
24688+ .fill 4,8,0
24689+#else
24690 .fill 1024,4,0
24691+#endif
24692
24693 /*
24694 * This starts the data section.
24695 */
24696 #ifdef CONFIG_X86_PAE
24697-__PAGE_ALIGNED_DATA
24698- /* Page-aligned for the benefit of paravirt? */
24699- .align PAGE_SIZE
24700+.section .initial_page_table,"a",@progbits
24701 ENTRY(initial_page_table)
24702 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24703 # if KPMDS == 3
24704@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24705 # error "Kernel PMDs should be 1, 2 or 3"
24706 # endif
24707 .align PAGE_SIZE /* needs to be page-sized too */
24708+
24709+#ifdef CONFIG_PAX_PER_CPU_PGD
24710+ENTRY(cpu_pgd)
24711+ .rept 2*NR_CPUS
24712+ .fill 4,8,0
24713+ .endr
24714+#endif
24715+
24716 #endif
24717
24718 .data
24719 .balign 4
24720 ENTRY(stack_start)
24721- .long init_thread_union+THREAD_SIZE
24722+ .long init_thread_union+THREAD_SIZE-8
24723
24724 __INITRODATA
24725 int_msg:
24726@@ -727,7 +825,7 @@ fault_msg:
24727 * segment size, and 32-bit linear address value:
24728 */
24729
24730- .data
24731+.section .rodata,"a",@progbits
24732 .globl boot_gdt_descr
24733 .globl idt_descr
24734
24735@@ -736,7 +834,7 @@ fault_msg:
24736 .word 0 # 32 bit align gdt_desc.address
24737 boot_gdt_descr:
24738 .word __BOOT_DS+7
24739- .long boot_gdt - __PAGE_OFFSET
24740+ .long pa(boot_gdt)
24741
24742 .word 0 # 32-bit align idt_desc.address
24743 idt_descr:
24744@@ -747,7 +845,7 @@ idt_descr:
24745 .word 0 # 32 bit align gdt_desc.address
24746 ENTRY(early_gdt_descr)
24747 .word GDT_ENTRIES*8-1
24748- .long gdt_page /* Overwritten for secondary CPUs */
24749+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24750
24751 /*
24752 * The boot_gdt must mirror the equivalent in setup.S and is
24753@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24754 .align L1_CACHE_BYTES
24755 ENTRY(boot_gdt)
24756 .fill GDT_ENTRY_BOOT_CS,8,0
24757- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24758- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24759+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24760+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24761+
24762+ .align PAGE_SIZE_asm
24763+ENTRY(cpu_gdt_table)
24764+ .rept NR_CPUS
24765+ .quad 0x0000000000000000 /* NULL descriptor */
24766+ .quad 0x0000000000000000 /* 0x0b reserved */
24767+ .quad 0x0000000000000000 /* 0x13 reserved */
24768+ .quad 0x0000000000000000 /* 0x1b reserved */
24769+
24770+#ifdef CONFIG_PAX_KERNEXEC
24771+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24772+#else
24773+ .quad 0x0000000000000000 /* 0x20 unused */
24774+#endif
24775+
24776+ .quad 0x0000000000000000 /* 0x28 unused */
24777+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24778+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24779+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24780+ .quad 0x0000000000000000 /* 0x4b reserved */
24781+ .quad 0x0000000000000000 /* 0x53 reserved */
24782+ .quad 0x0000000000000000 /* 0x5b reserved */
24783+
24784+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24785+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24786+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24787+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24788+
24789+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24790+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24791+
24792+ /*
24793+ * Segments used for calling PnP BIOS have byte granularity.
24794+ * The code segments and data segments have fixed 64k limits,
24795+ * the transfer segment sizes are set at run time.
24796+ */
24797+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24798+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24799+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24800+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24801+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24802+
24803+ /*
24804+ * The APM segments have byte granularity and their bases
24805+ * are set at run time. All have 64k limits.
24806+ */
24807+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24808+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24809+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24810+
24811+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24812+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24813+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24814+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24815+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24816+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24817+
24818+ /* Be sure this is zeroed to avoid false validations in Xen */
24819+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24820+ .endr
24821diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24822index a468c0a..8b5a879 100644
24823--- a/arch/x86/kernel/head_64.S
24824+++ b/arch/x86/kernel/head_64.S
24825@@ -20,6 +20,8 @@
24826 #include <asm/processor-flags.h>
24827 #include <asm/percpu.h>
24828 #include <asm/nops.h>
24829+#include <asm/cpufeature.h>
24830+#include <asm/alternative-asm.h>
24831
24832 #ifdef CONFIG_PARAVIRT
24833 #include <asm/asm-offsets.h>
24834@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24835 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24836 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24837 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24838+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24839+L3_VMALLOC_START = pud_index(VMALLOC_START)
24840+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24841+L3_VMALLOC_END = pud_index(VMALLOC_END)
24842+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24843+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24844
24845 .text
24846 __HEAD
24847@@ -89,11 +97,24 @@ startup_64:
24848 * Fixup the physical addresses in the page table
24849 */
24850 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24851+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24852+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24853+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24854+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24855+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24856
24857- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24858- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24859+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24860+#ifndef CONFIG_XEN
24861+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24862+#endif
24863+
24864+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24865+
24866+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24867+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24868
24869 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24870+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24871
24872 /*
24873 * Set up the identity mapping for the switchover. These
24874@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
24875 * after the boot processor executes this code.
24876 */
24877
24878+ orq $-1, %rbp
24879 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24880 1:
24881
24882- /* Enable PAE mode and PGE */
24883- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24884+ /* Enable PAE mode and PSE/PGE */
24885+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24886 movq %rcx, %cr4
24887
24888 /* Setup early boot stage 4 level pagetables. */
24889@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
24890 movl $MSR_EFER, %ecx
24891 rdmsr
24892 btsl $_EFER_SCE, %eax /* Enable System Call */
24893- btl $20,%edi /* No Execute supported? */
24894+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24895 jnc 1f
24896 btsl $_EFER_NX, %eax
24897+ cmpq $-1, %rbp
24898+ je 1f
24899 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24900+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24901+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24902+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24903+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24904+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24905+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24906+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24907 1: wrmsr /* Make changes effective */
24908
24909 /* Setup cr0 */
24910@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
24911 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24912 * address given in m16:64.
24913 */
24914+ pax_set_fptr_mask
24915 movq initial_code(%rip),%rax
24916 pushq $0 # fake return address to stop unwinder
24917 pushq $__KERNEL_CS # set correct cs
24918@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
24919 .quad INIT_PER_CPU_VAR(irq_stack_union)
24920
24921 GLOBAL(stack_start)
24922- .quad init_thread_union+THREAD_SIZE-8
24923+ .quad init_thread_union+THREAD_SIZE-16
24924 .word 0
24925 __FINITDATA
24926
24927@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
24928 call dump_stack
24929 #ifdef CONFIG_KALLSYMS
24930 leaq early_idt_ripmsg(%rip),%rdi
24931- movq 40(%rsp),%rsi # %rip again
24932+ movq 88(%rsp),%rsi # %rip again
24933 call __print_symbol
24934 #endif
24935 #endif /* EARLY_PRINTK */
24936@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
24937 early_recursion_flag:
24938 .long 0
24939
24940+ .section .rodata,"a",@progbits
24941 #ifdef CONFIG_EARLY_PRINTK
24942 early_idt_msg:
24943 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24944@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
24945 NEXT_PAGE(early_dynamic_pgts)
24946 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24947
24948- .data
24949+ .section .rodata,"a",@progbits
24950
24951-#ifndef CONFIG_XEN
24952 NEXT_PAGE(init_level4_pgt)
24953- .fill 512,8,0
24954-#else
24955-NEXT_PAGE(init_level4_pgt)
24956- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24957 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24958 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24959+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24960+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24961+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24962+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24963+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24964+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24965 .org init_level4_pgt + L4_START_KERNEL*8, 0
24966 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24967 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24968
24969+#ifdef CONFIG_PAX_PER_CPU_PGD
24970+NEXT_PAGE(cpu_pgd)
24971+ .rept 2*NR_CPUS
24972+ .fill 512,8,0
24973+ .endr
24974+#endif
24975+
24976 NEXT_PAGE(level3_ident_pgt)
24977 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24978+#ifdef CONFIG_XEN
24979 .fill 511, 8, 0
24980+#else
24981+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24982+ .fill 510,8,0
24983+#endif
24984+
24985+NEXT_PAGE(level3_vmalloc_start_pgt)
24986+ .fill 512,8,0
24987+
24988+NEXT_PAGE(level3_vmalloc_end_pgt)
24989+ .fill 512,8,0
24990+
24991+NEXT_PAGE(level3_vmemmap_pgt)
24992+ .fill L3_VMEMMAP_START,8,0
24993+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24994+
24995 NEXT_PAGE(level2_ident_pgt)
24996- /* Since I easily can, map the first 1G.
24997+ /* Since I easily can, map the first 2G.
24998 * Don't set NX because code runs from these pages.
24999 */
25000- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25001-#endif
25002+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25003
25004 NEXT_PAGE(level3_kernel_pgt)
25005 .fill L3_START_KERNEL,8,0
25006@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
25007 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25008 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25009
25010+NEXT_PAGE(level2_vmemmap_pgt)
25011+ .fill 512,8,0
25012+
25013 NEXT_PAGE(level2_kernel_pgt)
25014 /*
25015 * 512 MB kernel mapping. We spend a full page on this pagetable
25016@@ -494,28 +553,64 @@ NEXT_PAGE(level2_kernel_pgt)
25017 NEXT_PAGE(level2_fixmap_pgt)
25018 .fill 506,8,0
25019 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25020- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25021- .fill 5,8,0
25022+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25023+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25024+ .fill 4,8,0
25025
25026 NEXT_PAGE(level1_fixmap_pgt)
25027 .fill 512,8,0
25028
25029+NEXT_PAGE(level1_vsyscall_pgt)
25030+ .fill 512,8,0
25031+
25032 #undef PMDS
25033
25034- .data
25035+ .align PAGE_SIZE
25036+ENTRY(cpu_gdt_table)
25037+ .rept NR_CPUS
25038+ .quad 0x0000000000000000 /* NULL descriptor */
25039+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25040+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25041+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25042+ .quad 0x00cffb000000ffff /* __USER32_CS */
25043+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25044+ .quad 0x00affb000000ffff /* __USER_CS */
25045+
25046+#ifdef CONFIG_PAX_KERNEXEC
25047+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25048+#else
25049+ .quad 0x0 /* unused */
25050+#endif
25051+
25052+ .quad 0,0 /* TSS */
25053+ .quad 0,0 /* LDT */
25054+ .quad 0,0,0 /* three TLS descriptors */
25055+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25056+ /* asm/segment.h:GDT_ENTRIES must match this */
25057+
25058+#ifdef CONFIG_PAX_MEMORY_UDEREF
25059+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25060+#else
25061+ .quad 0x0 /* unused */
25062+#endif
25063+
25064+ /* zero the remaining page */
25065+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25066+ .endr
25067+
25068 .align 16
25069 .globl early_gdt_descr
25070 early_gdt_descr:
25071 .word GDT_ENTRIES*8-1
25072 early_gdt_descr_base:
25073- .quad INIT_PER_CPU_VAR(gdt_page)
25074+ .quad cpu_gdt_table
25075
25076 ENTRY(phys_base)
25077 /* This must match the first entry in level2_kernel_pgt */
25078 .quad 0x0000000000000000
25079
25080 #include "../../x86/xen/xen-head.S"
25081-
25082- __PAGE_ALIGNED_BSS
25083+
25084+ .section .rodata,"a",@progbits
25085 NEXT_PAGE(empty_zero_page)
25086 .skip PAGE_SIZE
25087diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25088index 05fd74f..c3548b1 100644
25089--- a/arch/x86/kernel/i386_ksyms_32.c
25090+++ b/arch/x86/kernel/i386_ksyms_32.c
25091@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25092 EXPORT_SYMBOL(cmpxchg8b_emu);
25093 #endif
25094
25095+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25096+
25097 /* Networking helper routines. */
25098 EXPORT_SYMBOL(csum_partial_copy_generic);
25099+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25100+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25101
25102 EXPORT_SYMBOL(__get_user_1);
25103 EXPORT_SYMBOL(__get_user_2);
25104@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25105 EXPORT_SYMBOL(___preempt_schedule_context);
25106 #endif
25107 #endif
25108+
25109+#ifdef CONFIG_PAX_KERNEXEC
25110+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25111+#endif
25112+
25113+#ifdef CONFIG_PAX_PER_CPU_PGD
25114+EXPORT_SYMBOL(cpu_pgd);
25115+#endif
25116diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25117index a9a4229..6f4d476 100644
25118--- a/arch/x86/kernel/i387.c
25119+++ b/arch/x86/kernel/i387.c
25120@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25121 static inline bool interrupted_user_mode(void)
25122 {
25123 struct pt_regs *regs = get_irq_regs();
25124- return regs && user_mode_vm(regs);
25125+ return regs && user_mode(regs);
25126 }
25127
25128 /*
25129diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25130index e7cc537..67d7372 100644
25131--- a/arch/x86/kernel/i8259.c
25132+++ b/arch/x86/kernel/i8259.c
25133@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25134 static void make_8259A_irq(unsigned int irq)
25135 {
25136 disable_irq_nosync(irq);
25137- io_apic_irqs &= ~(1<<irq);
25138+ io_apic_irqs &= ~(1UL<<irq);
25139 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25140 enable_irq(irq);
25141 }
25142@@ -208,7 +208,7 @@ spurious_8259A_irq:
25143 "spurious 8259A interrupt: IRQ%d.\n", irq);
25144 spurious_irq_mask |= irqmask;
25145 }
25146- atomic_inc(&irq_err_count);
25147+ atomic_inc_unchecked(&irq_err_count);
25148 /*
25149 * Theoretically we do not have to handle this IRQ,
25150 * but in Linux this does not cause problems and is
25151@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25152 /* (slave's support for AEOI in flat mode is to be investigated) */
25153 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25154
25155+ pax_open_kernel();
25156 if (auto_eoi)
25157 /*
25158 * In AEOI mode we just have to mask the interrupt
25159 * when acking.
25160 */
25161- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25162+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25163 else
25164- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25165+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25166+ pax_close_kernel();
25167
25168 udelay(100); /* wait for 8259A to initialize */
25169
25170diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25171index a979b5b..1d6db75 100644
25172--- a/arch/x86/kernel/io_delay.c
25173+++ b/arch/x86/kernel/io_delay.c
25174@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25175 * Quirk table for systems that misbehave (lock up, etc.) if port
25176 * 0x80 is used:
25177 */
25178-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25179+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25180 {
25181 .callback = dmi_io_delay_0xed_port,
25182 .ident = "Compaq Presario V6000",
25183diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25184index 4ddaf66..49d5c18 100644
25185--- a/arch/x86/kernel/ioport.c
25186+++ b/arch/x86/kernel/ioport.c
25187@@ -6,6 +6,7 @@
25188 #include <linux/sched.h>
25189 #include <linux/kernel.h>
25190 #include <linux/capability.h>
25191+#include <linux/security.h>
25192 #include <linux/errno.h>
25193 #include <linux/types.h>
25194 #include <linux/ioport.h>
25195@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25196 return -EINVAL;
25197 if (turn_on && !capable(CAP_SYS_RAWIO))
25198 return -EPERM;
25199+#ifdef CONFIG_GRKERNSEC_IO
25200+ if (turn_on && grsec_disable_privio) {
25201+ gr_handle_ioperm();
25202+ return -ENODEV;
25203+ }
25204+#endif
25205
25206 /*
25207 * If it's the first ioperm() call in this thread's lifetime, set the
25208@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25209 * because the ->io_bitmap_max value must match the bitmap
25210 * contents:
25211 */
25212- tss = &per_cpu(init_tss, get_cpu());
25213+ tss = init_tss + get_cpu();
25214
25215 if (turn_on)
25216 bitmap_clear(t->io_bitmap_ptr, from, num);
25217@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25218 if (level > old) {
25219 if (!capable(CAP_SYS_RAWIO))
25220 return -EPERM;
25221+#ifdef CONFIG_GRKERNSEC_IO
25222+ if (grsec_disable_privio) {
25223+ gr_handle_iopl();
25224+ return -ENODEV;
25225+ }
25226+#endif
25227 }
25228 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25229 t->iopl = level << 12;
25230diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25231index 705ef8d..8672c9d 100644
25232--- a/arch/x86/kernel/irq.c
25233+++ b/arch/x86/kernel/irq.c
25234@@ -22,7 +22,7 @@
25235 #define CREATE_TRACE_POINTS
25236 #include <asm/trace/irq_vectors.h>
25237
25238-atomic_t irq_err_count;
25239+atomic_unchecked_t irq_err_count;
25240
25241 /* Function pointer for generic interrupt vector handling */
25242 void (*x86_platform_ipi_callback)(void) = NULL;
25243@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25244 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25245 seq_puts(p, " Hypervisor callback interrupts\n");
25246 #endif
25247- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25248+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25249 #if defined(CONFIG_X86_IO_APIC)
25250- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25251+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25252 #endif
25253 return 0;
25254 }
25255@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25256
25257 u64 arch_irq_stat(void)
25258 {
25259- u64 sum = atomic_read(&irq_err_count);
25260+ u64 sum = atomic_read_unchecked(&irq_err_count);
25261 return sum;
25262 }
25263
25264diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25265index 63ce838..2ea3e06 100644
25266--- a/arch/x86/kernel/irq_32.c
25267+++ b/arch/x86/kernel/irq_32.c
25268@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25269
25270 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25271
25272+extern void gr_handle_kernel_exploit(void);
25273+
25274 int sysctl_panic_on_stackoverflow __read_mostly;
25275
25276 /* Debugging check for stack overflow: is there less than 1KB free? */
25277@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25278 __asm__ __volatile__("andl %%esp,%0" :
25279 "=r" (sp) : "0" (THREAD_SIZE - 1));
25280
25281- return sp < (sizeof(struct thread_info) + STACK_WARN);
25282+ return sp < STACK_WARN;
25283 }
25284
25285 static void print_stack_overflow(void)
25286 {
25287 printk(KERN_WARNING "low stack detected by irq handler\n");
25288 dump_stack();
25289+ gr_handle_kernel_exploit();
25290 if (sysctl_panic_on_stackoverflow)
25291 panic("low stack detected by irq handler - check messages\n");
25292 }
25293@@ -84,10 +87,9 @@ static inline void *current_stack(void)
25294 static inline int
25295 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25296 {
25297- struct irq_stack *curstk, *irqstk;
25298+ struct irq_stack *irqstk;
25299 u32 *isp, *prev_esp, arg1, arg2;
25300
25301- curstk = (struct irq_stack *) current_stack();
25302 irqstk = __this_cpu_read(hardirq_stack);
25303
25304 /*
25305@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25306 * handler) we can't do that and just have to keep using the
25307 * current stack (which is the irq stack already after all)
25308 */
25309- if (unlikely(curstk == irqstk))
25310+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25311 return 0;
25312
25313- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25314+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25315
25316 /* Save the next esp at the bottom of the stack */
25317 prev_esp = (u32 *)irqstk;
25318 *prev_esp = current_stack_pointer;
25319
25320+#ifdef CONFIG_PAX_MEMORY_UDEREF
25321+ __set_fs(MAKE_MM_SEG(0));
25322+#endif
25323+
25324 if (unlikely(overflow))
25325 call_on_stack(print_stack_overflow, isp);
25326
25327@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25328 : "0" (irq), "1" (desc), "2" (isp),
25329 "D" (desc->handle_irq)
25330 : "memory", "cc", "ecx");
25331+
25332+#ifdef CONFIG_PAX_MEMORY_UDEREF
25333+ __set_fs(current_thread_info()->addr_limit);
25334+#endif
25335+
25336 return 1;
25337 }
25338
25339@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25340 */
25341 void irq_ctx_init(int cpu)
25342 {
25343- struct irq_stack *irqstk;
25344-
25345 if (per_cpu(hardirq_stack, cpu))
25346 return;
25347
25348- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25349- THREADINFO_GFP,
25350- THREAD_SIZE_ORDER));
25351- per_cpu(hardirq_stack, cpu) = irqstk;
25352-
25353- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25354- THREADINFO_GFP,
25355- THREAD_SIZE_ORDER));
25356- per_cpu(softirq_stack, cpu) = irqstk;
25357-
25358- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25359- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25360+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25361+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25362 }
25363
25364 void do_softirq_own_stack(void)
25365 {
25366- struct thread_info *curstk;
25367 struct irq_stack *irqstk;
25368 u32 *isp, *prev_esp;
25369
25370- curstk = current_stack();
25371 irqstk = __this_cpu_read(softirq_stack);
25372
25373 /* build the stack frame on the softirq stack */
25374@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
25375 prev_esp = (u32 *)irqstk;
25376 *prev_esp = current_stack_pointer;
25377
25378+#ifdef CONFIG_PAX_MEMORY_UDEREF
25379+ __set_fs(MAKE_MM_SEG(0));
25380+#endif
25381+
25382 call_on_stack(__do_softirq, isp);
25383+
25384+#ifdef CONFIG_PAX_MEMORY_UDEREF
25385+ __set_fs(current_thread_info()->addr_limit);
25386+#endif
25387+
25388 }
25389
25390 bool handle_irq(unsigned irq, struct pt_regs *regs)
25391@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25392 if (unlikely(!desc))
25393 return false;
25394
25395- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25396+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25397 if (unlikely(overflow))
25398 print_stack_overflow();
25399 desc->handle_irq(irq, desc);
25400diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25401index e4b503d..824fce8 100644
25402--- a/arch/x86/kernel/irq_64.c
25403+++ b/arch/x86/kernel/irq_64.c
25404@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25405 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25406 EXPORT_PER_CPU_SYMBOL(irq_regs);
25407
25408+extern void gr_handle_kernel_exploit(void);
25409+
25410 int sysctl_panic_on_stackoverflow;
25411
25412 /*
25413@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25414 u64 estack_top, estack_bottom;
25415 u64 curbase = (u64)task_stack_page(current);
25416
25417- if (user_mode_vm(regs))
25418+ if (user_mode(regs))
25419 return;
25420
25421 if (regs->sp >= curbase + sizeof(struct thread_info) +
25422@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25423 irq_stack_top, irq_stack_bottom,
25424 estack_top, estack_bottom);
25425
25426+ gr_handle_kernel_exploit();
25427+
25428 if (sysctl_panic_on_stackoverflow)
25429 panic("low stack detected by irq handler - check messages\n");
25430 #endif
25431diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25432index 26d5a55..a01160a 100644
25433--- a/arch/x86/kernel/jump_label.c
25434+++ b/arch/x86/kernel/jump_label.c
25435@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25436 * Jump label is enabled for the first time.
25437 * So we expect a default_nop...
25438 */
25439- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25440+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25441 != 0))
25442 bug_at((void *)entry->code, __LINE__);
25443 } else {
25444@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25445 * ...otherwise expect an ideal_nop. Otherwise
25446 * something went horribly wrong.
25447 */
25448- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25449+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25450 != 0))
25451 bug_at((void *)entry->code, __LINE__);
25452 }
25453@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25454 * are converting the default nop to the ideal nop.
25455 */
25456 if (init) {
25457- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25458+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25459 bug_at((void *)entry->code, __LINE__);
25460 } else {
25461 code.jump = 0xe9;
25462 code.offset = entry->target -
25463 (entry->code + JUMP_LABEL_NOP_SIZE);
25464- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25465+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25466 bug_at((void *)entry->code, __LINE__);
25467 }
25468 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25469diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25470index 7ec1d5f..5a7d130 100644
25471--- a/arch/x86/kernel/kgdb.c
25472+++ b/arch/x86/kernel/kgdb.c
25473@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25474 #ifdef CONFIG_X86_32
25475 switch (regno) {
25476 case GDB_SS:
25477- if (!user_mode_vm(regs))
25478+ if (!user_mode(regs))
25479 *(unsigned long *)mem = __KERNEL_DS;
25480 break;
25481 case GDB_SP:
25482- if (!user_mode_vm(regs))
25483+ if (!user_mode(regs))
25484 *(unsigned long *)mem = kernel_stack_pointer(regs);
25485 break;
25486 case GDB_GS:
25487@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25488 bp->attr.bp_addr = breakinfo[breakno].addr;
25489 bp->attr.bp_len = breakinfo[breakno].len;
25490 bp->attr.bp_type = breakinfo[breakno].type;
25491- info->address = breakinfo[breakno].addr;
25492+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25493+ info->address = ktla_ktva(breakinfo[breakno].addr);
25494+ else
25495+ info->address = breakinfo[breakno].addr;
25496 info->len = breakinfo[breakno].len;
25497 info->type = breakinfo[breakno].type;
25498 val = arch_install_hw_breakpoint(bp);
25499@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25500 case 'k':
25501 /* clear the trace bit */
25502 linux_regs->flags &= ~X86_EFLAGS_TF;
25503- atomic_set(&kgdb_cpu_doing_single_step, -1);
25504+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25505
25506 /* set the trace bit if we're stepping */
25507 if (remcomInBuffer[0] == 's') {
25508 linux_regs->flags |= X86_EFLAGS_TF;
25509- atomic_set(&kgdb_cpu_doing_single_step,
25510+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25511 raw_smp_processor_id());
25512 }
25513
25514@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25515
25516 switch (cmd) {
25517 case DIE_DEBUG:
25518- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25519+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25520 if (user_mode(regs))
25521 return single_step_cont(regs, args);
25522 break;
25523@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25524 #endif /* CONFIG_DEBUG_RODATA */
25525
25526 bpt->type = BP_BREAKPOINT;
25527- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25528+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25529 BREAK_INSTR_SIZE);
25530 if (err)
25531 return err;
25532- err = probe_kernel_write((char *)bpt->bpt_addr,
25533+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25534 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25535 #ifdef CONFIG_DEBUG_RODATA
25536 if (!err)
25537@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25538 return -EBUSY;
25539 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25540 BREAK_INSTR_SIZE);
25541- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25542+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25543 if (err)
25544 return err;
25545 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25546@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25547 if (mutex_is_locked(&text_mutex))
25548 goto knl_write;
25549 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25550- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25551+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25552 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25553 goto knl_write;
25554 return err;
25555 knl_write:
25556 #endif /* CONFIG_DEBUG_RODATA */
25557- return probe_kernel_write((char *)bpt->bpt_addr,
25558+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25559 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25560 }
25561
25562diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25563index 98f654d..ac04352 100644
25564--- a/arch/x86/kernel/kprobes/core.c
25565+++ b/arch/x86/kernel/kprobes/core.c
25566@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25567 s32 raddr;
25568 } __packed *insn;
25569
25570- insn = (struct __arch_relative_insn *)from;
25571+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25572+
25573+ pax_open_kernel();
25574 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25575 insn->op = op;
25576+ pax_close_kernel();
25577 }
25578
25579 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25580@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25581 kprobe_opcode_t opcode;
25582 kprobe_opcode_t *orig_opcodes = opcodes;
25583
25584- if (search_exception_tables((unsigned long)opcodes))
25585+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25586 return 0; /* Page fault may occur on this address. */
25587
25588 retry:
25589@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25590 * for the first byte, we can recover the original instruction
25591 * from it and kp->opcode.
25592 */
25593- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25594+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25595 buf[0] = kp->opcode;
25596- return (unsigned long)buf;
25597+ return ktva_ktla((unsigned long)buf);
25598 }
25599
25600 /*
25601@@ -338,7 +341,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25602 /* Another subsystem puts a breakpoint, failed to recover */
25603 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25604 return 0;
25605+ pax_open_kernel();
25606 memcpy(dest, insn.kaddr, insn.length);
25607+ pax_close_kernel();
25608
25609 #ifdef CONFIG_X86_64
25610 if (insn_rip_relative(&insn)) {
25611@@ -365,7 +370,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25612 return 0;
25613 }
25614 disp = (u8 *) dest + insn_offset_displacement(&insn);
25615+ pax_open_kernel();
25616 *(s32 *) disp = (s32) newdisp;
25617+ pax_close_kernel();
25618 }
25619 #endif
25620 return insn.length;
25621@@ -507,7 +514,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25622 * nor set current_kprobe, because it doesn't use single
25623 * stepping.
25624 */
25625- regs->ip = (unsigned long)p->ainsn.insn;
25626+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25627 preempt_enable_no_resched();
25628 return;
25629 }
25630@@ -524,9 +531,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25631 regs->flags &= ~X86_EFLAGS_IF;
25632 /* single step inline if the instruction is an int3 */
25633 if (p->opcode == BREAKPOINT_INSTRUCTION)
25634- regs->ip = (unsigned long)p->addr;
25635+ regs->ip = ktla_ktva((unsigned long)p->addr);
25636 else
25637- regs->ip = (unsigned long)p->ainsn.insn;
25638+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25639 }
25640 NOKPROBE_SYMBOL(setup_singlestep);
25641
25642@@ -576,7 +583,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25643 struct kprobe *p;
25644 struct kprobe_ctlblk *kcb;
25645
25646- if (user_mode_vm(regs))
25647+ if (user_mode(regs))
25648 return 0;
25649
25650 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25651@@ -611,7 +618,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25652 setup_singlestep(p, regs, kcb, 0);
25653 return 1;
25654 }
25655- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25656+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25657 /*
25658 * The breakpoint instruction was removed right
25659 * after we hit it. Another cpu has removed
25660@@ -658,6 +665,9 @@ static void __used kretprobe_trampoline_holder(void)
25661 " movq %rax, 152(%rsp)\n"
25662 RESTORE_REGS_STRING
25663 " popfq\n"
25664+#ifdef KERNEXEC_PLUGIN
25665+ " btsq $63,(%rsp)\n"
25666+#endif
25667 #else
25668 " pushf\n"
25669 SAVE_REGS_STRING
25670@@ -798,7 +808,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25671 struct kprobe_ctlblk *kcb)
25672 {
25673 unsigned long *tos = stack_addr(regs);
25674- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25675+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25676 unsigned long orig_ip = (unsigned long)p->addr;
25677 kprobe_opcode_t *insn = p->ainsn.insn;
25678
25679@@ -981,7 +991,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25680 struct die_args *args = data;
25681 int ret = NOTIFY_DONE;
25682
25683- if (args->regs && user_mode_vm(args->regs))
25684+ if (args->regs && user_mode(args->regs))
25685 return ret;
25686
25687 if (val == DIE_GPF) {
25688diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25689index 7c523bb..01b051b 100644
25690--- a/arch/x86/kernel/kprobes/opt.c
25691+++ b/arch/x86/kernel/kprobes/opt.c
25692@@ -79,6 +79,7 @@ found:
25693 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25694 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25695 {
25696+ pax_open_kernel();
25697 #ifdef CONFIG_X86_64
25698 *addr++ = 0x48;
25699 *addr++ = 0xbf;
25700@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25701 *addr++ = 0xb8;
25702 #endif
25703 *(unsigned long *)addr = val;
25704+ pax_close_kernel();
25705 }
25706
25707 asm (
25708@@ -339,7 +341,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25709 * Verify if the address gap is in 2GB range, because this uses
25710 * a relative jump.
25711 */
25712- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25713+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25714 if (abs(rel) > 0x7fffffff) {
25715 __arch_remove_optimized_kprobe(op, 0);
25716 return -ERANGE;
25717@@ -356,16 +358,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25718 op->optinsn.size = ret;
25719
25720 /* Copy arch-dep-instance from template */
25721- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25722+ pax_open_kernel();
25723+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25724+ pax_close_kernel();
25725
25726 /* Set probe information */
25727 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25728
25729 /* Set probe function call */
25730- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25731+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25732
25733 /* Set returning jmp instruction at the tail of out-of-line buffer */
25734- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25735+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25736 (u8 *)op->kp.addr + op->optinsn.size);
25737
25738 flush_icache_range((unsigned long) buf,
25739@@ -390,7 +394,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25740 WARN_ON(kprobe_disabled(&op->kp));
25741
25742 /* Backup instructions which will be replaced by jump address */
25743- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25744+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25745 RELATIVE_ADDR_SIZE);
25746
25747 insn_buf[0] = RELATIVEJUMP_OPCODE;
25748@@ -438,7 +442,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25749 /* This kprobe is really able to run optimized path. */
25750 op = container_of(p, struct optimized_kprobe, kp);
25751 /* Detour through copied instructions */
25752- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25753+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25754 if (!reenter)
25755 reset_current_kprobe();
25756 preempt_enable_no_resched();
25757diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25758index c2bedae..25e7ab60 100644
25759--- a/arch/x86/kernel/ksysfs.c
25760+++ b/arch/x86/kernel/ksysfs.c
25761@@ -184,7 +184,7 @@ out:
25762
25763 static struct kobj_attribute type_attr = __ATTR_RO(type);
25764
25765-static struct bin_attribute data_attr = {
25766+static bin_attribute_no_const data_attr __read_only = {
25767 .attr = {
25768 .name = "data",
25769 .mode = S_IRUGO,
25770diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25771index c37886d..d851d32 100644
25772--- a/arch/x86/kernel/ldt.c
25773+++ b/arch/x86/kernel/ldt.c
25774@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25775 if (reload) {
25776 #ifdef CONFIG_SMP
25777 preempt_disable();
25778- load_LDT(pc);
25779+ load_LDT_nolock(pc);
25780 if (!cpumask_equal(mm_cpumask(current->mm),
25781 cpumask_of(smp_processor_id())))
25782 smp_call_function(flush_ldt, current->mm, 1);
25783 preempt_enable();
25784 #else
25785- load_LDT(pc);
25786+ load_LDT_nolock(pc);
25787 #endif
25788 }
25789 if (oldsize) {
25790@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25791 return err;
25792
25793 for (i = 0; i < old->size; i++)
25794- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25795+ write_ldt_entry(new->ldt, i, old->ldt + i);
25796 return 0;
25797 }
25798
25799@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25800 retval = copy_ldt(&mm->context, &old_mm->context);
25801 mutex_unlock(&old_mm->context.lock);
25802 }
25803+
25804+ if (tsk == current) {
25805+ mm->context.vdso = 0;
25806+
25807+#ifdef CONFIG_X86_32
25808+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25809+ mm->context.user_cs_base = 0UL;
25810+ mm->context.user_cs_limit = ~0UL;
25811+
25812+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25813+ cpus_clear(mm->context.cpu_user_cs_mask);
25814+#endif
25815+
25816+#endif
25817+#endif
25818+
25819+ }
25820+
25821 return retval;
25822 }
25823
25824@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25825 }
25826 }
25827
25828+#ifdef CONFIG_PAX_SEGMEXEC
25829+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25830+ error = -EINVAL;
25831+ goto out_unlock;
25832+ }
25833+#endif
25834+
25835 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25836 error = -EINVAL;
25837 goto out_unlock;
25838diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25839index 469b23d..5449cfe 100644
25840--- a/arch/x86/kernel/machine_kexec_32.c
25841+++ b/arch/x86/kernel/machine_kexec_32.c
25842@@ -26,7 +26,7 @@
25843 #include <asm/cacheflush.h>
25844 #include <asm/debugreg.h>
25845
25846-static void set_idt(void *newidt, __u16 limit)
25847+static void set_idt(struct desc_struct *newidt, __u16 limit)
25848 {
25849 struct desc_ptr curidt;
25850
25851@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25852 }
25853
25854
25855-static void set_gdt(void *newgdt, __u16 limit)
25856+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25857 {
25858 struct desc_ptr curgdt;
25859
25860@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25861 }
25862
25863 control_page = page_address(image->control_code_page);
25864- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25865+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25866
25867 relocate_kernel_ptr = control_page;
25868 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25869diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25870index 94ea120..4154cea 100644
25871--- a/arch/x86/kernel/mcount_64.S
25872+++ b/arch/x86/kernel/mcount_64.S
25873@@ -7,7 +7,7 @@
25874 #include <linux/linkage.h>
25875 #include <asm/ptrace.h>
25876 #include <asm/ftrace.h>
25877-
25878+#include <asm/alternative-asm.h>
25879
25880 .code64
25881 .section .entry.text, "ax"
25882@@ -148,8 +148,9 @@
25883 #ifdef CONFIG_DYNAMIC_FTRACE
25884
25885 ENTRY(function_hook)
25886+ pax_force_retaddr
25887 retq
25888-END(function_hook)
25889+ENDPROC(function_hook)
25890
25891 ENTRY(ftrace_caller)
25892 /* save_mcount_regs fills in first two parameters */
25893@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25894 #endif
25895
25896 GLOBAL(ftrace_stub)
25897+ pax_force_retaddr
25898 retq
25899-END(ftrace_caller)
25900+ENDPROC(ftrace_caller)
25901
25902 ENTRY(ftrace_regs_caller)
25903 /* Save the current flags before any operations that can change them */
25904@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
25905
25906 jmp ftrace_return
25907
25908-END(ftrace_regs_caller)
25909+ENDPROC(ftrace_regs_caller)
25910
25911
25912 #else /* ! CONFIG_DYNAMIC_FTRACE */
25913@@ -272,18 +274,20 @@ fgraph_trace:
25914 #endif
25915
25916 GLOBAL(ftrace_stub)
25917+ pax_force_retaddr
25918 retq
25919
25920 trace:
25921 /* save_mcount_regs fills in first two parameters */
25922 save_mcount_regs
25923
25924+ pax_force_fptr ftrace_trace_function
25925 call *ftrace_trace_function
25926
25927 restore_mcount_regs
25928
25929 jmp fgraph_trace
25930-END(function_hook)
25931+ENDPROC(function_hook)
25932 #endif /* CONFIG_DYNAMIC_FTRACE */
25933 #endif /* CONFIG_FUNCTION_TRACER */
25934
25935@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
25936
25937 restore_mcount_regs
25938
25939+ pax_force_retaddr
25940 retq
25941-END(ftrace_graph_caller)
25942+ENDPROC(ftrace_graph_caller)
25943
25944 GLOBAL(return_to_handler)
25945 subq $24, %rsp
25946@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
25947 movq 8(%rsp), %rdx
25948 movq (%rsp), %rax
25949 addq $24, %rsp
25950+ pax_force_fptr %rdi
25951 jmp *%rdi
25952+ENDPROC(return_to_handler)
25953 #endif
25954diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
25955index e69f988..72902b7 100644
25956--- a/arch/x86/kernel/module.c
25957+++ b/arch/x86/kernel/module.c
25958@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
25959 }
25960 #endif
25961
25962-void *module_alloc(unsigned long size)
25963+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
25964 {
25965- if (PAGE_ALIGN(size) > MODULES_LEN)
25966+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
25967 return NULL;
25968 return __vmalloc_node_range(size, 1,
25969 MODULES_VADDR + get_module_load_offset(),
25970- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
25971- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
25972+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
25973+ prot, NUMA_NO_NODE,
25974 __builtin_return_address(0));
25975 }
25976
25977+void *module_alloc(unsigned long size)
25978+{
25979+
25980+#ifdef CONFIG_PAX_KERNEXEC
25981+ return __module_alloc(size, PAGE_KERNEL);
25982+#else
25983+ return __module_alloc(size, PAGE_KERNEL_EXEC);
25984+#endif
25985+
25986+}
25987+
25988+#ifdef CONFIG_PAX_KERNEXEC
25989+#ifdef CONFIG_X86_32
25990+void *module_alloc_exec(unsigned long size)
25991+{
25992+ struct vm_struct *area;
25993+
25994+ if (size == 0)
25995+ return NULL;
25996+
25997+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
25998+return area ? area->addr : NULL;
25999+}
26000+EXPORT_SYMBOL(module_alloc_exec);
26001+
26002+void module_memfree_exec(void *module_region)
26003+{
26004+ vunmap(module_region);
26005+}
26006+EXPORT_SYMBOL(module_memfree_exec);
26007+#else
26008+void module_memfree_exec(void *module_region)
26009+{
26010+ module_memfree(module_region);
26011+}
26012+EXPORT_SYMBOL(module_memfree_exec);
26013+
26014+void *module_alloc_exec(unsigned long size)
26015+{
26016+ return __module_alloc(size, PAGE_KERNEL_RX);
26017+}
26018+EXPORT_SYMBOL(module_alloc_exec);
26019+#endif
26020+#endif
26021+
26022 #ifdef CONFIG_X86_32
26023 int apply_relocate(Elf32_Shdr *sechdrs,
26024 const char *strtab,
26025@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26026 unsigned int i;
26027 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26028 Elf32_Sym *sym;
26029- uint32_t *location;
26030+ uint32_t *plocation, location;
26031
26032 DEBUGP("Applying relocate section %u to %u\n",
26033 relsec, sechdrs[relsec].sh_info);
26034 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26035 /* This is where to make the change */
26036- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26037- + rel[i].r_offset;
26038+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26039+ location = (uint32_t)plocation;
26040+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26041+ plocation = ktla_ktva((void *)plocation);
26042 /* This is the symbol it is referring to. Note that all
26043 undefined symbols have been resolved. */
26044 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26045@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26046 switch (ELF32_R_TYPE(rel[i].r_info)) {
26047 case R_386_32:
26048 /* We add the value into the location given */
26049- *location += sym->st_value;
26050+ pax_open_kernel();
26051+ *plocation += sym->st_value;
26052+ pax_close_kernel();
26053 break;
26054 case R_386_PC32:
26055 /* Add the value, subtract its position */
26056- *location += sym->st_value - (uint32_t)location;
26057+ pax_open_kernel();
26058+ *plocation += sym->st_value - location;
26059+ pax_close_kernel();
26060 break;
26061 default:
26062 pr_err("%s: Unknown relocation: %u\n",
26063@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26064 case R_X86_64_NONE:
26065 break;
26066 case R_X86_64_64:
26067+ pax_open_kernel();
26068 *(u64 *)loc = val;
26069+ pax_close_kernel();
26070 break;
26071 case R_X86_64_32:
26072+ pax_open_kernel();
26073 *(u32 *)loc = val;
26074+ pax_close_kernel();
26075 if (val != *(u32 *)loc)
26076 goto overflow;
26077 break;
26078 case R_X86_64_32S:
26079+ pax_open_kernel();
26080 *(s32 *)loc = val;
26081+ pax_close_kernel();
26082 if ((s64)val != *(s32 *)loc)
26083 goto overflow;
26084 break;
26085 case R_X86_64_PC32:
26086 val -= (u64)loc;
26087+ pax_open_kernel();
26088 *(u32 *)loc = val;
26089+ pax_close_kernel();
26090+
26091 #if 0
26092 if ((s64)val != *(s32 *)loc)
26093 goto overflow;
26094diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26095index 113e707..0a690e1 100644
26096--- a/arch/x86/kernel/msr.c
26097+++ b/arch/x86/kernel/msr.c
26098@@ -39,6 +39,7 @@
26099 #include <linux/notifier.h>
26100 #include <linux/uaccess.h>
26101 #include <linux/gfp.h>
26102+#include <linux/grsecurity.h>
26103
26104 #include <asm/processor.h>
26105 #include <asm/msr.h>
26106@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26107 int err = 0;
26108 ssize_t bytes = 0;
26109
26110+#ifdef CONFIG_GRKERNSEC_KMEM
26111+ gr_handle_msr_write();
26112+ return -EPERM;
26113+#endif
26114+
26115 if (count % 8)
26116 return -EINVAL; /* Invalid chunk size */
26117
26118@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26119 err = -EBADF;
26120 break;
26121 }
26122+#ifdef CONFIG_GRKERNSEC_KMEM
26123+ gr_handle_msr_write();
26124+ return -EPERM;
26125+#endif
26126 if (copy_from_user(&regs, uregs, sizeof regs)) {
26127 err = -EFAULT;
26128 break;
26129@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26130 return notifier_from_errno(err);
26131 }
26132
26133-static struct notifier_block __refdata msr_class_cpu_notifier = {
26134+static struct notifier_block msr_class_cpu_notifier = {
26135 .notifier_call = msr_class_cpu_callback,
26136 };
26137
26138diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26139index c3e985d..110a36a 100644
26140--- a/arch/x86/kernel/nmi.c
26141+++ b/arch/x86/kernel/nmi.c
26142@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26143
26144 static void nmi_max_handler(struct irq_work *w)
26145 {
26146- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26147+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26148 int remainder_ns, decimal_msecs;
26149- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26150+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26151
26152 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26153 decimal_msecs = remainder_ns / 1000;
26154
26155 printk_ratelimited(KERN_INFO
26156 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26157- a->handler, whole_msecs, decimal_msecs);
26158+ n->action->handler, whole_msecs, decimal_msecs);
26159 }
26160
26161 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26162@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26163 delta = sched_clock() - delta;
26164 trace_nmi_handler(a->handler, (int)delta, thishandled);
26165
26166- if (delta < nmi_longest_ns || delta < a->max_duration)
26167+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26168 continue;
26169
26170- a->max_duration = delta;
26171- irq_work_queue(&a->irq_work);
26172+ a->work->max_duration = delta;
26173+ irq_work_queue(&a->work->irq_work);
26174 }
26175
26176 rcu_read_unlock();
26177@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26178 }
26179 NOKPROBE_SYMBOL(nmi_handle);
26180
26181-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26182+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26183 {
26184 struct nmi_desc *desc = nmi_to_desc(type);
26185 unsigned long flags;
26186@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26187 if (!action->handler)
26188 return -EINVAL;
26189
26190- init_irq_work(&action->irq_work, nmi_max_handler);
26191+ action->work->action = action;
26192+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26193
26194 spin_lock_irqsave(&desc->lock, flags);
26195
26196@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26197 * event confuses some handlers (kdump uses this flag)
26198 */
26199 if (action->flags & NMI_FLAG_FIRST)
26200- list_add_rcu(&action->list, &desc->head);
26201+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26202 else
26203- list_add_tail_rcu(&action->list, &desc->head);
26204+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26205
26206 spin_unlock_irqrestore(&desc->lock, flags);
26207 return 0;
26208@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26209 if (!strcmp(n->name, name)) {
26210 WARN(in_nmi(),
26211 "Trying to free NMI (%s) from NMI context!\n", n->name);
26212- list_del_rcu(&n->list);
26213+ pax_list_del_rcu((struct list_head *)&n->list);
26214 break;
26215 }
26216 }
26217@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26218 dotraplinkage notrace void
26219 do_nmi(struct pt_regs *regs, long error_code)
26220 {
26221+
26222+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26223+ if (!user_mode(regs)) {
26224+ unsigned long cs = regs->cs & 0xFFFF;
26225+ unsigned long ip = ktva_ktla(regs->ip);
26226+
26227+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26228+ regs->ip = ip;
26229+ }
26230+#endif
26231+
26232 nmi_nesting_preprocess(regs);
26233
26234 nmi_enter();
26235diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26236index 6d9582e..f746287 100644
26237--- a/arch/x86/kernel/nmi_selftest.c
26238+++ b/arch/x86/kernel/nmi_selftest.c
26239@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26240 {
26241 /* trap all the unknown NMIs we may generate */
26242 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26243- __initdata);
26244+ __initconst);
26245 }
26246
26247 static void __init cleanup_nmi_testsuite(void)
26248@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26249 unsigned long timeout;
26250
26251 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26252- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26253+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26254 nmi_fail = FAILURE;
26255 return;
26256 }
26257diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26258index bbb6c73..24a58ef 100644
26259--- a/arch/x86/kernel/paravirt-spinlocks.c
26260+++ b/arch/x86/kernel/paravirt-spinlocks.c
26261@@ -8,7 +8,7 @@
26262
26263 #include <asm/paravirt.h>
26264
26265-struct pv_lock_ops pv_lock_ops = {
26266+struct pv_lock_ops pv_lock_ops __read_only = {
26267 #ifdef CONFIG_SMP
26268 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26269 .unlock_kick = paravirt_nop,
26270diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26271index 548d25f..f8fb99c 100644
26272--- a/arch/x86/kernel/paravirt.c
26273+++ b/arch/x86/kernel/paravirt.c
26274@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26275 {
26276 return x;
26277 }
26278+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26279+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26280+#endif
26281
26282 void __init default_banner(void)
26283 {
26284@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26285
26286 if (opfunc == NULL)
26287 /* If there's no function, patch it with a ud2a (BUG) */
26288- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26289- else if (opfunc == _paravirt_nop)
26290+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26291+ else if (opfunc == (void *)_paravirt_nop)
26292 /* If the operation is a nop, then nop the callsite */
26293 ret = paravirt_patch_nop();
26294
26295 /* identity functions just return their single argument */
26296- else if (opfunc == _paravirt_ident_32)
26297+ else if (opfunc == (void *)_paravirt_ident_32)
26298 ret = paravirt_patch_ident_32(insnbuf, len);
26299- else if (opfunc == _paravirt_ident_64)
26300+ else if (opfunc == (void *)_paravirt_ident_64)
26301 ret = paravirt_patch_ident_64(insnbuf, len);
26302+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26303+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26304+ ret = paravirt_patch_ident_64(insnbuf, len);
26305+#endif
26306
26307 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26308 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26309@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26310 if (insn_len > len || start == NULL)
26311 insn_len = len;
26312 else
26313- memcpy(insnbuf, start, insn_len);
26314+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26315
26316 return insn_len;
26317 }
26318@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26319 return this_cpu_read(paravirt_lazy_mode);
26320 }
26321
26322-struct pv_info pv_info = {
26323+struct pv_info pv_info __read_only = {
26324 .name = "bare hardware",
26325 .paravirt_enabled = 0,
26326 .kernel_rpl = 0,
26327@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26328 #endif
26329 };
26330
26331-struct pv_init_ops pv_init_ops = {
26332+struct pv_init_ops pv_init_ops __read_only = {
26333 .patch = native_patch,
26334 };
26335
26336-struct pv_time_ops pv_time_ops = {
26337+struct pv_time_ops pv_time_ops __read_only = {
26338 .sched_clock = native_sched_clock,
26339 .steal_clock = native_steal_clock,
26340 };
26341
26342-__visible struct pv_irq_ops pv_irq_ops = {
26343+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26344 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26345 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26346 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26347@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26348 #endif
26349 };
26350
26351-__visible struct pv_cpu_ops pv_cpu_ops = {
26352+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26353 .cpuid = native_cpuid,
26354 .get_debugreg = native_get_debugreg,
26355 .set_debugreg = native_set_debugreg,
26356@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26357 NOKPROBE_SYMBOL(native_set_debugreg);
26358 NOKPROBE_SYMBOL(native_load_idt);
26359
26360-struct pv_apic_ops pv_apic_ops = {
26361+struct pv_apic_ops pv_apic_ops __read_only= {
26362 #ifdef CONFIG_X86_LOCAL_APIC
26363 .startup_ipi_hook = paravirt_nop,
26364 #endif
26365 };
26366
26367-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26368+#ifdef CONFIG_X86_32
26369+#ifdef CONFIG_X86_PAE
26370+/* 64-bit pagetable entries */
26371+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26372+#else
26373 /* 32-bit pagetable entries */
26374 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26375+#endif
26376 #else
26377 /* 64-bit pagetable entries */
26378 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26379 #endif
26380
26381-struct pv_mmu_ops pv_mmu_ops = {
26382+struct pv_mmu_ops pv_mmu_ops __read_only = {
26383
26384 .read_cr2 = native_read_cr2,
26385 .write_cr2 = native_write_cr2,
26386@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26387 .make_pud = PTE_IDENT,
26388
26389 .set_pgd = native_set_pgd,
26390+ .set_pgd_batched = native_set_pgd_batched,
26391 #endif
26392 #endif /* PAGETABLE_LEVELS >= 3 */
26393
26394@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26395 },
26396
26397 .set_fixmap = native_set_fixmap,
26398+
26399+#ifdef CONFIG_PAX_KERNEXEC
26400+ .pax_open_kernel = native_pax_open_kernel,
26401+ .pax_close_kernel = native_pax_close_kernel,
26402+#endif
26403+
26404 };
26405
26406 EXPORT_SYMBOL_GPL(pv_time_ops);
26407diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26408index a1da673..b6f5831 100644
26409--- a/arch/x86/kernel/paravirt_patch_64.c
26410+++ b/arch/x86/kernel/paravirt_patch_64.c
26411@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26412 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26413 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26414 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26415+
26416+#ifndef CONFIG_PAX_MEMORY_UDEREF
26417 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26418+#endif
26419+
26420 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26421 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26422
26423@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26424 PATCH_SITE(pv_mmu_ops, read_cr3);
26425 PATCH_SITE(pv_mmu_ops, write_cr3);
26426 PATCH_SITE(pv_cpu_ops, clts);
26427+
26428+#ifndef CONFIG_PAX_MEMORY_UDEREF
26429 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26430+#endif
26431+
26432 PATCH_SITE(pv_cpu_ops, wbinvd);
26433
26434 patch_site:
26435diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26436index 0497f71..7186c0d 100644
26437--- a/arch/x86/kernel/pci-calgary_64.c
26438+++ b/arch/x86/kernel/pci-calgary_64.c
26439@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26440 tce_space = be64_to_cpu(readq(target));
26441 tce_space = tce_space & TAR_SW_BITS;
26442
26443- tce_space = tce_space & (~specified_table_size);
26444+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26445 info->tce_space = (u64 *)__va(tce_space);
26446 }
26447 }
26448diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26449index 35ccf75..7a15747 100644
26450--- a/arch/x86/kernel/pci-iommu_table.c
26451+++ b/arch/x86/kernel/pci-iommu_table.c
26452@@ -2,7 +2,7 @@
26453 #include <asm/iommu_table.h>
26454 #include <linux/string.h>
26455 #include <linux/kallsyms.h>
26456-
26457+#include <linux/sched.h>
26458
26459 #define DEBUG 1
26460
26461diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26462index 77dd0ad..9ec4723 100644
26463--- a/arch/x86/kernel/pci-swiotlb.c
26464+++ b/arch/x86/kernel/pci-swiotlb.c
26465@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26466 struct dma_attrs *attrs)
26467 {
26468 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26469- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26470+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26471 else
26472 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26473 }
26474diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26475index e127dda..94e384d 100644
26476--- a/arch/x86/kernel/process.c
26477+++ b/arch/x86/kernel/process.c
26478@@ -36,7 +36,8 @@
26479 * section. Since TSS's are completely CPU-local, we want them
26480 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26481 */
26482-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26483+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26484+EXPORT_SYMBOL(init_tss);
26485
26486 #ifdef CONFIG_X86_64
26487 static DEFINE_PER_CPU(unsigned char, is_idle);
26488@@ -94,7 +95,7 @@ void arch_task_cache_init(void)
26489 task_xstate_cachep =
26490 kmem_cache_create("task_xstate", xstate_size,
26491 __alignof__(union thread_xstate),
26492- SLAB_PANIC | SLAB_NOTRACK, NULL);
26493+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26494 setup_xstate_comp();
26495 }
26496
26497@@ -108,7 +109,7 @@ void exit_thread(void)
26498 unsigned long *bp = t->io_bitmap_ptr;
26499
26500 if (bp) {
26501- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26502+ struct tss_struct *tss = init_tss + get_cpu();
26503
26504 t->io_bitmap_ptr = NULL;
26505 clear_thread_flag(TIF_IO_BITMAP);
26506@@ -128,6 +129,9 @@ void flush_thread(void)
26507 {
26508 struct task_struct *tsk = current;
26509
26510+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26511+ loadsegment(gs, 0);
26512+#endif
26513 flush_ptrace_hw_breakpoint(tsk);
26514 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26515 drop_init_fpu(tsk);
26516@@ -274,7 +278,7 @@ static void __exit_idle(void)
26517 void exit_idle(void)
26518 {
26519 /* idle loop has pid 0 */
26520- if (current->pid)
26521+ if (task_pid_nr(current))
26522 return;
26523 __exit_idle();
26524 }
26525@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
26526 return ret;
26527 }
26528 #endif
26529-void stop_this_cpu(void *dummy)
26530+__noreturn void stop_this_cpu(void *dummy)
26531 {
26532 local_irq_disable();
26533 /*
26534@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
26535 }
26536 early_param("idle", idle_setup);
26537
26538-unsigned long arch_align_stack(unsigned long sp)
26539+#ifdef CONFIG_PAX_RANDKSTACK
26540+void pax_randomize_kstack(struct pt_regs *regs)
26541 {
26542- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26543- sp -= get_random_int() % 8192;
26544- return sp & ~0xf;
26545-}
26546+ struct thread_struct *thread = &current->thread;
26547+ unsigned long time;
26548
26549-unsigned long arch_randomize_brk(struct mm_struct *mm)
26550-{
26551- unsigned long range_end = mm->brk + 0x02000000;
26552- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26553-}
26554+ if (!randomize_va_space)
26555+ return;
26556+
26557+ if (v8086_mode(regs))
26558+ return;
26559
26560+ rdtscl(time);
26561+
26562+ /* P4 seems to return a 0 LSB, ignore it */
26563+#ifdef CONFIG_MPENTIUM4
26564+ time &= 0x3EUL;
26565+ time <<= 2;
26566+#elif defined(CONFIG_X86_64)
26567+ time &= 0xFUL;
26568+ time <<= 4;
26569+#else
26570+ time &= 0x1FUL;
26571+ time <<= 3;
26572+#endif
26573+
26574+ thread->sp0 ^= time;
26575+ load_sp0(init_tss + smp_processor_id(), thread);
26576+
26577+#ifdef CONFIG_X86_64
26578+ this_cpu_write(kernel_stack, thread->sp0);
26579+#endif
26580+}
26581+#endif
26582diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26583index 8f3ebfe..cbc731b 100644
26584--- a/arch/x86/kernel/process_32.c
26585+++ b/arch/x86/kernel/process_32.c
26586@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26587 unsigned long thread_saved_pc(struct task_struct *tsk)
26588 {
26589 return ((unsigned long *)tsk->thread.sp)[3];
26590+//XXX return tsk->thread.eip;
26591 }
26592
26593 void __show_regs(struct pt_regs *regs, int all)
26594@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26595 unsigned long sp;
26596 unsigned short ss, gs;
26597
26598- if (user_mode_vm(regs)) {
26599+ if (user_mode(regs)) {
26600 sp = regs->sp;
26601 ss = regs->ss & 0xffff;
26602- gs = get_user_gs(regs);
26603 } else {
26604 sp = kernel_stack_pointer(regs);
26605 savesegment(ss, ss);
26606- savesegment(gs, gs);
26607 }
26608+ gs = get_user_gs(regs);
26609
26610 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26611 (u16)regs->cs, regs->ip, regs->flags,
26612- smp_processor_id());
26613+ raw_smp_processor_id());
26614 print_symbol("EIP is at %s\n", regs->ip);
26615
26616 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26617@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26618 int copy_thread(unsigned long clone_flags, unsigned long sp,
26619 unsigned long arg, struct task_struct *p)
26620 {
26621- struct pt_regs *childregs = task_pt_regs(p);
26622+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26623 struct task_struct *tsk;
26624 int err;
26625
26626 p->thread.sp = (unsigned long) childregs;
26627 p->thread.sp0 = (unsigned long) (childregs+1);
26628+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26629 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26630
26631 if (unlikely(p->flags & PF_KTHREAD)) {
26632 /* kernel thread */
26633 memset(childregs, 0, sizeof(struct pt_regs));
26634 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26635- task_user_gs(p) = __KERNEL_STACK_CANARY;
26636- childregs->ds = __USER_DS;
26637- childregs->es = __USER_DS;
26638+ savesegment(gs, childregs->gs);
26639+ childregs->ds = __KERNEL_DS;
26640+ childregs->es = __KERNEL_DS;
26641 childregs->fs = __KERNEL_PERCPU;
26642 childregs->bx = sp; /* function */
26643 childregs->bp = arg;
26644@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26645 struct thread_struct *prev = &prev_p->thread,
26646 *next = &next_p->thread;
26647 int cpu = smp_processor_id();
26648- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26649+ struct tss_struct *tss = init_tss + cpu;
26650 fpu_switch_t fpu;
26651
26652 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26653@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26654 */
26655 lazy_save_gs(prev->gs);
26656
26657+#ifdef CONFIG_PAX_MEMORY_UDEREF
26658+ __set_fs(task_thread_info(next_p)->addr_limit);
26659+#endif
26660+
26661 /*
26662 * Load the per-thread Thread-Local Storage descriptor.
26663 */
26664@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26665 */
26666 arch_end_context_switch(next_p);
26667
26668- this_cpu_write(kernel_stack,
26669- (unsigned long)task_stack_page(next_p) +
26670- THREAD_SIZE - KERNEL_STACK_OFFSET);
26671+ this_cpu_write(current_task, next_p);
26672+ this_cpu_write(current_tinfo, &next_p->tinfo);
26673+ this_cpu_write(kernel_stack, next->sp0);
26674
26675 /*
26676 * Restore %gs if needed (which is common)
26677@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26678
26679 switch_fpu_finish(next_p, fpu);
26680
26681- this_cpu_write(current_task, next_p);
26682-
26683 return prev_p;
26684 }
26685
26686@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26687 } while (count++ < 16);
26688 return 0;
26689 }
26690-
26691diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26692index 5a2c029..ec8611d 100644
26693--- a/arch/x86/kernel/process_64.c
26694+++ b/arch/x86/kernel/process_64.c
26695@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26696 struct pt_regs *childregs;
26697 struct task_struct *me = current;
26698
26699- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26700+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26701 childregs = task_pt_regs(p);
26702 p->thread.sp = (unsigned long) childregs;
26703 p->thread.usersp = me->thread.usersp;
26704+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26705 set_tsk_thread_flag(p, TIF_FORK);
26706 p->thread.io_bitmap_ptr = NULL;
26707
26708@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26709 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26710 savesegment(es, p->thread.es);
26711 savesegment(ds, p->thread.ds);
26712+ savesegment(ss, p->thread.ss);
26713+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26714 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26715
26716 if (unlikely(p->flags & PF_KTHREAD)) {
26717@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26718 struct thread_struct *prev = &prev_p->thread;
26719 struct thread_struct *next = &next_p->thread;
26720 int cpu = smp_processor_id();
26721- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26722+ struct tss_struct *tss = init_tss + cpu;
26723 unsigned fsindex, gsindex;
26724 fpu_switch_t fpu;
26725
26726@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26727 if (unlikely(next->ds | prev->ds))
26728 loadsegment(ds, next->ds);
26729
26730+ savesegment(ss, prev->ss);
26731+ if (unlikely(next->ss != prev->ss))
26732+ loadsegment(ss, next->ss);
26733+
26734 /*
26735 * Switch FS and GS.
26736 *
26737@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26738 prev->usersp = this_cpu_read(old_rsp);
26739 this_cpu_write(old_rsp, next->usersp);
26740 this_cpu_write(current_task, next_p);
26741+ this_cpu_write(current_tinfo, &next_p->tinfo);
26742
26743 /*
26744 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26745@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26746 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26747 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26748
26749- this_cpu_write(kernel_stack,
26750- (unsigned long)task_stack_page(next_p) +
26751- THREAD_SIZE - KERNEL_STACK_OFFSET);
26752+ this_cpu_write(kernel_stack, next->sp0);
26753
26754 /*
26755 * Now maybe reload the debug registers and handle I/O bitmaps
26756@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26757 if (!p || p == current || p->state == TASK_RUNNING)
26758 return 0;
26759 stack = (unsigned long)task_stack_page(p);
26760- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26761+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26762 return 0;
26763 fp = *(u64 *)(p->thread.sp);
26764 do {
26765- if (fp < (unsigned long)stack ||
26766- fp >= (unsigned long)stack+THREAD_SIZE)
26767+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26768 return 0;
26769 ip = *(u64 *)(fp+8);
26770 if (!in_sched_functions(ip))
26771diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26772index e510618..5165ac0 100644
26773--- a/arch/x86/kernel/ptrace.c
26774+++ b/arch/x86/kernel/ptrace.c
26775@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26776 unsigned long sp = (unsigned long)&regs->sp;
26777 u32 *prev_esp;
26778
26779- if (context == (sp & ~(THREAD_SIZE - 1)))
26780+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26781 return sp;
26782
26783- prev_esp = (u32 *)(context);
26784+ prev_esp = *(u32 **)(context);
26785 if (prev_esp)
26786 return (unsigned long)prev_esp;
26787
26788@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26789 if (child->thread.gs != value)
26790 return do_arch_prctl(child, ARCH_SET_GS, value);
26791 return 0;
26792+
26793+ case offsetof(struct user_regs_struct,ip):
26794+ /*
26795+ * Protect against any attempt to set ip to an
26796+ * impossible address. There are dragons lurking if the
26797+ * address is noncanonical. (This explicitly allows
26798+ * setting ip to TASK_SIZE_MAX, because user code can do
26799+ * that all by itself by running off the end of its
26800+ * address space.
26801+ */
26802+ if (value > TASK_SIZE_MAX)
26803+ return -EIO;
26804+ break;
26805+
26806 #endif
26807 }
26808
26809@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26810 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26811 {
26812 int i;
26813- int dr7 = 0;
26814+ unsigned long dr7 = 0;
26815 struct arch_hw_breakpoint *info;
26816
26817 for (i = 0; i < HBP_NUM; i++) {
26818@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26819 unsigned long addr, unsigned long data)
26820 {
26821 int ret;
26822- unsigned long __user *datap = (unsigned long __user *)data;
26823+ unsigned long __user *datap = (__force unsigned long __user *)data;
26824
26825 switch (request) {
26826 /* read the word at location addr in the USER area. */
26827@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26828 if ((int) addr < 0)
26829 return -EIO;
26830 ret = do_get_thread_area(child, addr,
26831- (struct user_desc __user *)data);
26832+ (__force struct user_desc __user *) data);
26833 break;
26834
26835 case PTRACE_SET_THREAD_AREA:
26836 if ((int) addr < 0)
26837 return -EIO;
26838 ret = do_set_thread_area(child, addr,
26839- (struct user_desc __user *)data, 0);
26840+ (__force struct user_desc __user *) data, 0);
26841 break;
26842 #endif
26843
26844@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26845
26846 #ifdef CONFIG_X86_64
26847
26848-static struct user_regset x86_64_regsets[] __read_mostly = {
26849+static user_regset_no_const x86_64_regsets[] __read_only = {
26850 [REGSET_GENERAL] = {
26851 .core_note_type = NT_PRSTATUS,
26852 .n = sizeof(struct user_regs_struct) / sizeof(long),
26853@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26854 #endif /* CONFIG_X86_64 */
26855
26856 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26857-static struct user_regset x86_32_regsets[] __read_mostly = {
26858+static user_regset_no_const x86_32_regsets[] __read_only = {
26859 [REGSET_GENERAL] = {
26860 .core_note_type = NT_PRSTATUS,
26861 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26862@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26863 */
26864 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26865
26866-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26867+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26868 {
26869 #ifdef CONFIG_X86_64
26870 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26871@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26872 memset(info, 0, sizeof(*info));
26873 info->si_signo = SIGTRAP;
26874 info->si_code = si_code;
26875- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26876+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26877 }
26878
26879 void user_single_step_siginfo(struct task_struct *tsk,
26880@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26881 }
26882 }
26883
26884+#ifdef CONFIG_GRKERNSEC_SETXID
26885+extern void gr_delayed_cred_worker(void);
26886+#endif
26887+
26888 /*
26889 * We can return 0 to resume the syscall or anything else to go to phase
26890 * 2. If we resume the syscall, we need to put something appropriate in
26891@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26892
26893 BUG_ON(regs != task_pt_regs(current));
26894
26895+#ifdef CONFIG_GRKERNSEC_SETXID
26896+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26897+ gr_delayed_cred_worker();
26898+#endif
26899+
26900 /*
26901 * If we stepped into a sysenter/syscall insn, it trapped in
26902 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26903@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26904 */
26905 user_exit();
26906
26907+#ifdef CONFIG_GRKERNSEC_SETXID
26908+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26909+ gr_delayed_cred_worker();
26910+#endif
26911+
26912 audit_syscall_exit(regs);
26913
26914 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26915diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26916index 2f355d2..e75ed0a 100644
26917--- a/arch/x86/kernel/pvclock.c
26918+++ b/arch/x86/kernel/pvclock.c
26919@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26920 reset_hung_task_detector();
26921 }
26922
26923-static atomic64_t last_value = ATOMIC64_INIT(0);
26924+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26925
26926 void pvclock_resume(void)
26927 {
26928- atomic64_set(&last_value, 0);
26929+ atomic64_set_unchecked(&last_value, 0);
26930 }
26931
26932 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26933@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26934 * updating at the same time, and one of them could be slightly behind,
26935 * making the assumption that last_value always go forward fail to hold.
26936 */
26937- last = atomic64_read(&last_value);
26938+ last = atomic64_read_unchecked(&last_value);
26939 do {
26940 if (ret < last)
26941 return last;
26942- last = atomic64_cmpxchg(&last_value, last, ret);
26943+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
26944 } while (unlikely(last != ret));
26945
26946 return ret;
26947diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
26948index bae6c60..b438619 100644
26949--- a/arch/x86/kernel/reboot.c
26950+++ b/arch/x86/kernel/reboot.c
26951@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
26952
26953 void __noreturn machine_real_restart(unsigned int type)
26954 {
26955+
26956+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
26957+ struct desc_struct *gdt;
26958+#endif
26959+
26960 local_irq_disable();
26961
26962 /*
26963@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
26964
26965 /* Jump to the identity-mapped low memory code */
26966 #ifdef CONFIG_X86_32
26967- asm volatile("jmpl *%0" : :
26968+
26969+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
26970+ gdt = get_cpu_gdt_table(smp_processor_id());
26971+ pax_open_kernel();
26972+#ifdef CONFIG_PAX_MEMORY_UDEREF
26973+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
26974+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
26975+ loadsegment(ds, __KERNEL_DS);
26976+ loadsegment(es, __KERNEL_DS);
26977+ loadsegment(ss, __KERNEL_DS);
26978+#endif
26979+#ifdef CONFIG_PAX_KERNEXEC
26980+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
26981+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
26982+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
26983+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
26984+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
26985+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
26986+#endif
26987+ pax_close_kernel();
26988+#endif
26989+
26990+ asm volatile("ljmpl *%0" : :
26991 "rm" (real_mode_header->machine_real_restart_asm),
26992 "a" (type));
26993 #else
26994@@ -501,7 +528,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
26995 * This means that this function can never return, it can misbehave
26996 * by not rebooting properly and hanging.
26997 */
26998-static void native_machine_emergency_restart(void)
26999+static void __noreturn native_machine_emergency_restart(void)
27000 {
27001 int i;
27002 int attempt = 0;
27003@@ -621,13 +648,13 @@ void native_machine_shutdown(void)
27004 #endif
27005 }
27006
27007-static void __machine_emergency_restart(int emergency)
27008+static void __noreturn __machine_emergency_restart(int emergency)
27009 {
27010 reboot_emergency = emergency;
27011 machine_ops.emergency_restart();
27012 }
27013
27014-static void native_machine_restart(char *__unused)
27015+static void __noreturn native_machine_restart(char *__unused)
27016 {
27017 pr_notice("machine restart\n");
27018
27019@@ -636,7 +663,7 @@ static void native_machine_restart(char *__unused)
27020 __machine_emergency_restart(0);
27021 }
27022
27023-static void native_machine_halt(void)
27024+static void __noreturn native_machine_halt(void)
27025 {
27026 /* Stop other cpus and apics */
27027 machine_shutdown();
27028@@ -646,7 +673,7 @@ static void native_machine_halt(void)
27029 stop_this_cpu(NULL);
27030 }
27031
27032-static void native_machine_power_off(void)
27033+static void __noreturn native_machine_power_off(void)
27034 {
27035 if (pm_power_off) {
27036 if (!reboot_force)
27037@@ -655,9 +682,10 @@ static void native_machine_power_off(void)
27038 }
27039 /* A fallback in case there is no PM info available */
27040 tboot_shutdown(TB_SHUTDOWN_HALT);
27041+ unreachable();
27042 }
27043
27044-struct machine_ops machine_ops = {
27045+struct machine_ops machine_ops __read_only = {
27046 .power_off = native_machine_power_off,
27047 .shutdown = native_machine_shutdown,
27048 .emergency_restart = native_machine_emergency_restart,
27049diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27050index c8e41e9..64049ef 100644
27051--- a/arch/x86/kernel/reboot_fixups_32.c
27052+++ b/arch/x86/kernel/reboot_fixups_32.c
27053@@ -57,7 +57,7 @@ struct device_fixup {
27054 unsigned int vendor;
27055 unsigned int device;
27056 void (*reboot_fixup)(struct pci_dev *);
27057-};
27058+} __do_const;
27059
27060 /*
27061 * PCI ids solely used for fixups_table go here
27062diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27063index 3fd2c69..a444264 100644
27064--- a/arch/x86/kernel/relocate_kernel_64.S
27065+++ b/arch/x86/kernel/relocate_kernel_64.S
27066@@ -96,8 +96,7 @@ relocate_kernel:
27067
27068 /* jump to identity mapped page */
27069 addq $(identity_mapped - relocate_kernel), %r8
27070- pushq %r8
27071- ret
27072+ jmp *%r8
27073
27074 identity_mapped:
27075 /* set return address to 0 if not preserving context */
27076diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27077index ab4734e..c4ca0eb 100644
27078--- a/arch/x86/kernel/setup.c
27079+++ b/arch/x86/kernel/setup.c
27080@@ -110,6 +110,7 @@
27081 #include <asm/mce.h>
27082 #include <asm/alternative.h>
27083 #include <asm/prom.h>
27084+#include <asm/boot.h>
27085
27086 /*
27087 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27088@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27089 #endif
27090
27091
27092-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27093-__visible unsigned long mmu_cr4_features;
27094+#ifdef CONFIG_X86_64
27095+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27096+#elif defined(CONFIG_X86_PAE)
27097+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27098 #else
27099-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27100+__visible unsigned long mmu_cr4_features __read_only;
27101 #endif
27102
27103+void set_in_cr4(unsigned long mask)
27104+{
27105+ unsigned long cr4 = read_cr4();
27106+
27107+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27108+ return;
27109+
27110+ pax_open_kernel();
27111+ mmu_cr4_features |= mask;
27112+ pax_close_kernel();
27113+
27114+ if (trampoline_cr4_features)
27115+ *trampoline_cr4_features = mmu_cr4_features;
27116+ cr4 |= mask;
27117+ write_cr4(cr4);
27118+}
27119+EXPORT_SYMBOL(set_in_cr4);
27120+
27121+void clear_in_cr4(unsigned long mask)
27122+{
27123+ unsigned long cr4 = read_cr4();
27124+
27125+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27126+ return;
27127+
27128+ pax_open_kernel();
27129+ mmu_cr4_features &= ~mask;
27130+ pax_close_kernel();
27131+
27132+ if (trampoline_cr4_features)
27133+ *trampoline_cr4_features = mmu_cr4_features;
27134+ cr4 &= ~mask;
27135+ write_cr4(cr4);
27136+}
27137+EXPORT_SYMBOL(clear_in_cr4);
27138+
27139 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27140 int bootloader_type, bootloader_version;
27141
27142@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27143 * area (640->1Mb) as ram even though it is not.
27144 * take them out.
27145 */
27146- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27147+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27148
27149 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27150 }
27151@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27152 /* called before trim_bios_range() to spare extra sanitize */
27153 static void __init e820_add_kernel_range(void)
27154 {
27155- u64 start = __pa_symbol(_text);
27156+ u64 start = __pa_symbol(ktla_ktva(_text));
27157 u64 size = __pa_symbol(_end) - start;
27158
27159 /*
27160@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27161
27162 void __init setup_arch(char **cmdline_p)
27163 {
27164+#ifdef CONFIG_X86_32
27165+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27166+#else
27167 memblock_reserve(__pa_symbol(_text),
27168 (unsigned long)__bss_stop - (unsigned long)_text);
27169+#endif
27170
27171 early_reserve_initrd();
27172
27173@@ -955,16 +998,16 @@ void __init setup_arch(char **cmdline_p)
27174
27175 if (!boot_params.hdr.root_flags)
27176 root_mountflags &= ~MS_RDONLY;
27177- init_mm.start_code = (unsigned long) _text;
27178- init_mm.end_code = (unsigned long) _etext;
27179+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27180+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27181 init_mm.end_data = (unsigned long) _edata;
27182 init_mm.brk = _brk_end;
27183
27184 mpx_mm_init(&init_mm);
27185
27186- code_resource.start = __pa_symbol(_text);
27187- code_resource.end = __pa_symbol(_etext)-1;
27188- data_resource.start = __pa_symbol(_etext);
27189+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27190+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27191+ data_resource.start = __pa_symbol(_sdata);
27192 data_resource.end = __pa_symbol(_edata)-1;
27193 bss_resource.start = __pa_symbol(__bss_start);
27194 bss_resource.end = __pa_symbol(__bss_stop)-1;
27195diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27196index e4fcb87..9c06c55 100644
27197--- a/arch/x86/kernel/setup_percpu.c
27198+++ b/arch/x86/kernel/setup_percpu.c
27199@@ -21,19 +21,17 @@
27200 #include <asm/cpu.h>
27201 #include <asm/stackprotector.h>
27202
27203-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27204+#ifdef CONFIG_SMP
27205+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27206 EXPORT_PER_CPU_SYMBOL(cpu_number);
27207+#endif
27208
27209-#ifdef CONFIG_X86_64
27210 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27211-#else
27212-#define BOOT_PERCPU_OFFSET 0
27213-#endif
27214
27215 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27216 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27217
27218-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27219+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27220 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27221 };
27222 EXPORT_SYMBOL(__per_cpu_offset);
27223@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27224 {
27225 #ifdef CONFIG_NEED_MULTIPLE_NODES
27226 pg_data_t *last = NULL;
27227- unsigned int cpu;
27228+ int cpu;
27229
27230 for_each_possible_cpu(cpu) {
27231 int node = early_cpu_to_node(cpu);
27232@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27233 {
27234 #ifdef CONFIG_X86_32
27235 struct desc_struct gdt;
27236+ unsigned long base = per_cpu_offset(cpu);
27237
27238- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27239- 0x2 | DESCTYPE_S, 0x8);
27240- gdt.s = 1;
27241+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27242+ 0x83 | DESCTYPE_S, 0xC);
27243 write_gdt_entry(get_cpu_gdt_table(cpu),
27244 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27245 #endif
27246@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27247 /* alrighty, percpu areas up and running */
27248 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27249 for_each_possible_cpu(cpu) {
27250+#ifdef CONFIG_CC_STACKPROTECTOR
27251+#ifdef CONFIG_X86_32
27252+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27253+#endif
27254+#endif
27255 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27256 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27257 per_cpu(cpu_number, cpu) = cpu;
27258@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27259 */
27260 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27261 #endif
27262+#ifdef CONFIG_CC_STACKPROTECTOR
27263+#ifdef CONFIG_X86_32
27264+ if (!cpu)
27265+ per_cpu(stack_canary.canary, cpu) = canary;
27266+#endif
27267+#endif
27268 /*
27269 * Up to this point, the boot CPU has been using .init.data
27270 * area. Reload any changed state for the boot CPU.
27271diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27272index ed37a76..39f936e 100644
27273--- a/arch/x86/kernel/signal.c
27274+++ b/arch/x86/kernel/signal.c
27275@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27276 * Align the stack pointer according to the i386 ABI,
27277 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27278 */
27279- sp = ((sp + 4) & -16ul) - 4;
27280+ sp = ((sp - 12) & -16ul) - 4;
27281 #else /* !CONFIG_X86_32 */
27282 sp = round_down(sp, 16) - 8;
27283 #endif
27284@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27285 }
27286
27287 if (current->mm->context.vdso)
27288- restorer = current->mm->context.vdso +
27289- selected_vdso32->sym___kernel_sigreturn;
27290+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27291 else
27292- restorer = &frame->retcode;
27293+ restorer = (void __user *)&frame->retcode;
27294 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27295 restorer = ksig->ka.sa.sa_restorer;
27296
27297@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27298 * reasons and because gdb uses it as a signature to notice
27299 * signal handler stack frames.
27300 */
27301- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27302+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27303
27304 if (err)
27305 return -EFAULT;
27306@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27307 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27308
27309 /* Set up to return from userspace. */
27310- restorer = current->mm->context.vdso +
27311- selected_vdso32->sym___kernel_rt_sigreturn;
27312+ if (current->mm->context.vdso)
27313+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27314+ else
27315+ restorer = (void __user *)&frame->retcode;
27316 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27317 restorer = ksig->ka.sa.sa_restorer;
27318 put_user_ex(restorer, &frame->pretcode);
27319@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27320 * reasons and because gdb uses it as a signature to notice
27321 * signal handler stack frames.
27322 */
27323- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27324+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27325 } put_user_catch(err);
27326
27327 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27328@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27329 {
27330 int usig = signr_convert(ksig->sig);
27331 sigset_t *set = sigmask_to_save();
27332- compat_sigset_t *cset = (compat_sigset_t *) set;
27333+ sigset_t sigcopy;
27334+ compat_sigset_t *cset;
27335+
27336+ sigcopy = *set;
27337+
27338+ cset = (compat_sigset_t *) &sigcopy;
27339
27340 /* Set up the stack frame */
27341 if (is_ia32_frame()) {
27342@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27343 } else if (is_x32_frame()) {
27344 return x32_setup_rt_frame(ksig, cset, regs);
27345 } else {
27346- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27347+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27348 }
27349 }
27350
27351diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27352index be8e1bd..a3d93fa 100644
27353--- a/arch/x86/kernel/smp.c
27354+++ b/arch/x86/kernel/smp.c
27355@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27356
27357 __setup("nonmi_ipi", nonmi_ipi_setup);
27358
27359-struct smp_ops smp_ops = {
27360+struct smp_ops smp_ops __read_only = {
27361 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27362 .smp_prepare_cpus = native_smp_prepare_cpus,
27363 .smp_cpus_done = native_smp_cpus_done,
27364diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27365index 6d7022c..4feb6be 100644
27366--- a/arch/x86/kernel/smpboot.c
27367+++ b/arch/x86/kernel/smpboot.c
27368@@ -194,14 +194,17 @@ static void notrace start_secondary(void *unused)
27369
27370 enable_start_cpu0 = 0;
27371
27372-#ifdef CONFIG_X86_32
27373+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27374+ barrier();
27375+
27376 /* switch away from the initial page table */
27377+#ifdef CONFIG_PAX_PER_CPU_PGD
27378+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27379+#else
27380 load_cr3(swapper_pg_dir);
27381+#endif
27382 __flush_tlb_all();
27383-#endif
27384
27385- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27386- barrier();
27387 /*
27388 * Check TSC synchronization with the BP:
27389 */
27390@@ -765,8 +768,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27391 alternatives_enable_smp();
27392
27393 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27394- (THREAD_SIZE + task_stack_page(idle))) - 1);
27395+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27396 per_cpu(current_task, cpu) = idle;
27397+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27398
27399 #ifdef CONFIG_X86_32
27400 /* Stack for startup_32 can be just as for start_secondary onwards */
27401@@ -775,10 +779,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27402 clear_tsk_thread_flag(idle, TIF_FORK);
27403 initial_gs = per_cpu_offset(cpu);
27404 #endif
27405- per_cpu(kernel_stack, cpu) =
27406- (unsigned long)task_stack_page(idle) -
27407- KERNEL_STACK_OFFSET + THREAD_SIZE;
27408+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27409+ pax_open_kernel();
27410 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27411+ pax_close_kernel();
27412 initial_code = (unsigned long)start_secondary;
27413 stack_start = idle->thread.sp;
27414
27415@@ -918,6 +922,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27416 /* the FPU context is blank, nobody can own it */
27417 __cpu_disable_lazy_restore(cpu);
27418
27419+#ifdef CONFIG_PAX_PER_CPU_PGD
27420+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27421+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27422+ KERNEL_PGD_PTRS);
27423+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27424+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27425+ KERNEL_PGD_PTRS);
27426+#endif
27427+
27428 err = do_boot_cpu(apicid, cpu, tidle);
27429 if (err) {
27430 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27431diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27432index 9b4d51d..5d28b58 100644
27433--- a/arch/x86/kernel/step.c
27434+++ b/arch/x86/kernel/step.c
27435@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27436 struct desc_struct *desc;
27437 unsigned long base;
27438
27439- seg &= ~7UL;
27440+ seg >>= 3;
27441
27442 mutex_lock(&child->mm->context.lock);
27443- if (unlikely((seg >> 3) >= child->mm->context.size))
27444+ if (unlikely(seg >= child->mm->context.size))
27445 addr = -1L; /* bogus selector, access would fault */
27446 else {
27447 desc = child->mm->context.ldt + seg;
27448@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27449 addr += base;
27450 }
27451 mutex_unlock(&child->mm->context.lock);
27452- }
27453+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27454+ addr = ktla_ktva(addr);
27455
27456 return addr;
27457 }
27458@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27459 unsigned char opcode[15];
27460 unsigned long addr = convert_ip_to_linear(child, regs);
27461
27462+ if (addr == -EINVAL)
27463+ return 0;
27464+
27465 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27466 for (i = 0; i < copied; i++) {
27467 switch (opcode[i]) {
27468diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27469new file mode 100644
27470index 0000000..5877189
27471--- /dev/null
27472+++ b/arch/x86/kernel/sys_i386_32.c
27473@@ -0,0 +1,189 @@
27474+/*
27475+ * This file contains various random system calls that
27476+ * have a non-standard calling sequence on the Linux/i386
27477+ * platform.
27478+ */
27479+
27480+#include <linux/errno.h>
27481+#include <linux/sched.h>
27482+#include <linux/mm.h>
27483+#include <linux/fs.h>
27484+#include <linux/smp.h>
27485+#include <linux/sem.h>
27486+#include <linux/msg.h>
27487+#include <linux/shm.h>
27488+#include <linux/stat.h>
27489+#include <linux/syscalls.h>
27490+#include <linux/mman.h>
27491+#include <linux/file.h>
27492+#include <linux/utsname.h>
27493+#include <linux/ipc.h>
27494+#include <linux/elf.h>
27495+
27496+#include <linux/uaccess.h>
27497+#include <linux/unistd.h>
27498+
27499+#include <asm/syscalls.h>
27500+
27501+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27502+{
27503+ unsigned long pax_task_size = TASK_SIZE;
27504+
27505+#ifdef CONFIG_PAX_SEGMEXEC
27506+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27507+ pax_task_size = SEGMEXEC_TASK_SIZE;
27508+#endif
27509+
27510+ if (flags & MAP_FIXED)
27511+ if (len > pax_task_size || addr > pax_task_size - len)
27512+ return -EINVAL;
27513+
27514+ return 0;
27515+}
27516+
27517+/*
27518+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27519+ */
27520+static unsigned long get_align_mask(void)
27521+{
27522+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27523+ return 0;
27524+
27525+ if (!(current->flags & PF_RANDOMIZE))
27526+ return 0;
27527+
27528+ return va_align.mask;
27529+}
27530+
27531+unsigned long
27532+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27533+ unsigned long len, unsigned long pgoff, unsigned long flags)
27534+{
27535+ struct mm_struct *mm = current->mm;
27536+ struct vm_area_struct *vma;
27537+ unsigned long pax_task_size = TASK_SIZE;
27538+ struct vm_unmapped_area_info info;
27539+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27540+
27541+#ifdef CONFIG_PAX_SEGMEXEC
27542+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27543+ pax_task_size = SEGMEXEC_TASK_SIZE;
27544+#endif
27545+
27546+ pax_task_size -= PAGE_SIZE;
27547+
27548+ if (len > pax_task_size)
27549+ return -ENOMEM;
27550+
27551+ if (flags & MAP_FIXED)
27552+ return addr;
27553+
27554+#ifdef CONFIG_PAX_RANDMMAP
27555+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27556+#endif
27557+
27558+ if (addr) {
27559+ addr = PAGE_ALIGN(addr);
27560+ if (pax_task_size - len >= addr) {
27561+ vma = find_vma(mm, addr);
27562+ if (check_heap_stack_gap(vma, addr, len, offset))
27563+ return addr;
27564+ }
27565+ }
27566+
27567+ info.flags = 0;
27568+ info.length = len;
27569+ info.align_mask = filp ? get_align_mask() : 0;
27570+ info.align_offset = pgoff << PAGE_SHIFT;
27571+ info.threadstack_offset = offset;
27572+
27573+#ifdef CONFIG_PAX_PAGEEXEC
27574+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27575+ info.low_limit = 0x00110000UL;
27576+ info.high_limit = mm->start_code;
27577+
27578+#ifdef CONFIG_PAX_RANDMMAP
27579+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27580+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27581+#endif
27582+
27583+ if (info.low_limit < info.high_limit) {
27584+ addr = vm_unmapped_area(&info);
27585+ if (!IS_ERR_VALUE(addr))
27586+ return addr;
27587+ }
27588+ } else
27589+#endif
27590+
27591+ info.low_limit = mm->mmap_base;
27592+ info.high_limit = pax_task_size;
27593+
27594+ return vm_unmapped_area(&info);
27595+}
27596+
27597+unsigned long
27598+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27599+ const unsigned long len, const unsigned long pgoff,
27600+ const unsigned long flags)
27601+{
27602+ struct vm_area_struct *vma;
27603+ struct mm_struct *mm = current->mm;
27604+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27605+ struct vm_unmapped_area_info info;
27606+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27607+
27608+#ifdef CONFIG_PAX_SEGMEXEC
27609+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27610+ pax_task_size = SEGMEXEC_TASK_SIZE;
27611+#endif
27612+
27613+ pax_task_size -= PAGE_SIZE;
27614+
27615+ /* requested length too big for entire address space */
27616+ if (len > pax_task_size)
27617+ return -ENOMEM;
27618+
27619+ if (flags & MAP_FIXED)
27620+ return addr;
27621+
27622+#ifdef CONFIG_PAX_PAGEEXEC
27623+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27624+ goto bottomup;
27625+#endif
27626+
27627+#ifdef CONFIG_PAX_RANDMMAP
27628+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27629+#endif
27630+
27631+ /* requesting a specific address */
27632+ if (addr) {
27633+ addr = PAGE_ALIGN(addr);
27634+ if (pax_task_size - len >= addr) {
27635+ vma = find_vma(mm, addr);
27636+ if (check_heap_stack_gap(vma, addr, len, offset))
27637+ return addr;
27638+ }
27639+ }
27640+
27641+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27642+ info.length = len;
27643+ info.low_limit = PAGE_SIZE;
27644+ info.high_limit = mm->mmap_base;
27645+ info.align_mask = filp ? get_align_mask() : 0;
27646+ info.align_offset = pgoff << PAGE_SHIFT;
27647+ info.threadstack_offset = offset;
27648+
27649+ addr = vm_unmapped_area(&info);
27650+ if (!(addr & ~PAGE_MASK))
27651+ return addr;
27652+ VM_BUG_ON(addr != -ENOMEM);
27653+
27654+bottomup:
27655+ /*
27656+ * A failed mmap() very likely causes application failure,
27657+ * so fall back to the bottom-up function here. This scenario
27658+ * can happen with large stack limits and large mmap()
27659+ * allocations.
27660+ */
27661+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27662+}
27663diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27664index 30277e2..5664a29 100644
27665--- a/arch/x86/kernel/sys_x86_64.c
27666+++ b/arch/x86/kernel/sys_x86_64.c
27667@@ -81,8 +81,8 @@ out:
27668 return error;
27669 }
27670
27671-static void find_start_end(unsigned long flags, unsigned long *begin,
27672- unsigned long *end)
27673+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27674+ unsigned long *begin, unsigned long *end)
27675 {
27676 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27677 unsigned long new_begin;
27678@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27679 *begin = new_begin;
27680 }
27681 } else {
27682- *begin = current->mm->mmap_legacy_base;
27683+ *begin = mm->mmap_legacy_base;
27684 *end = TASK_SIZE;
27685 }
27686 }
27687@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27688 struct vm_area_struct *vma;
27689 struct vm_unmapped_area_info info;
27690 unsigned long begin, end;
27691+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27692
27693 if (flags & MAP_FIXED)
27694 return addr;
27695
27696- find_start_end(flags, &begin, &end);
27697+ find_start_end(mm, flags, &begin, &end);
27698
27699 if (len > end)
27700 return -ENOMEM;
27701
27702+#ifdef CONFIG_PAX_RANDMMAP
27703+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27704+#endif
27705+
27706 if (addr) {
27707 addr = PAGE_ALIGN(addr);
27708 vma = find_vma(mm, addr);
27709- if (end - len >= addr &&
27710- (!vma || addr + len <= vma->vm_start))
27711+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27712 return addr;
27713 }
27714
27715@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27716 info.high_limit = end;
27717 info.align_mask = filp ? get_align_mask() : 0;
27718 info.align_offset = pgoff << PAGE_SHIFT;
27719+ info.threadstack_offset = offset;
27720 return vm_unmapped_area(&info);
27721 }
27722
27723@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27724 struct mm_struct *mm = current->mm;
27725 unsigned long addr = addr0;
27726 struct vm_unmapped_area_info info;
27727+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27728
27729 /* requested length too big for entire address space */
27730 if (len > TASK_SIZE)
27731@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27732 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27733 goto bottomup;
27734
27735+#ifdef CONFIG_PAX_RANDMMAP
27736+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27737+#endif
27738+
27739 /* requesting a specific address */
27740 if (addr) {
27741 addr = PAGE_ALIGN(addr);
27742 vma = find_vma(mm, addr);
27743- if (TASK_SIZE - len >= addr &&
27744- (!vma || addr + len <= vma->vm_start))
27745+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27746 return addr;
27747 }
27748
27749@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27750 info.high_limit = mm->mmap_base;
27751 info.align_mask = filp ? get_align_mask() : 0;
27752 info.align_offset = pgoff << PAGE_SHIFT;
27753+ info.threadstack_offset = offset;
27754 addr = vm_unmapped_area(&info);
27755 if (!(addr & ~PAGE_MASK))
27756 return addr;
27757diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27758index 91a4496..bb87552 100644
27759--- a/arch/x86/kernel/tboot.c
27760+++ b/arch/x86/kernel/tboot.c
27761@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27762
27763 void tboot_shutdown(u32 shutdown_type)
27764 {
27765- void (*shutdown)(void);
27766+ void (* __noreturn shutdown)(void);
27767
27768 if (!tboot_enabled())
27769 return;
27770@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27771
27772 switch_to_tboot_pt();
27773
27774- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27775+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27776 shutdown();
27777
27778 /* should not reach here */
27779@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27780 return -ENODEV;
27781 }
27782
27783-static atomic_t ap_wfs_count;
27784+static atomic_unchecked_t ap_wfs_count;
27785
27786 static int tboot_wait_for_aps(int num_aps)
27787 {
27788@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27789 {
27790 switch (action) {
27791 case CPU_DYING:
27792- atomic_inc(&ap_wfs_count);
27793+ atomic_inc_unchecked(&ap_wfs_count);
27794 if (num_online_cpus() == 1)
27795- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27796+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27797 return NOTIFY_BAD;
27798 break;
27799 }
27800@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27801
27802 tboot_create_trampoline();
27803
27804- atomic_set(&ap_wfs_count, 0);
27805+ atomic_set_unchecked(&ap_wfs_count, 0);
27806 register_hotcpu_notifier(&tboot_cpu_notifier);
27807
27808 #ifdef CONFIG_DEBUG_FS
27809diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27810index 25adc0e..1df4349 100644
27811--- a/arch/x86/kernel/time.c
27812+++ b/arch/x86/kernel/time.c
27813@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27814 {
27815 unsigned long pc = instruction_pointer(regs);
27816
27817- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27818+ if (!user_mode(regs) && in_lock_functions(pc)) {
27819 #ifdef CONFIG_FRAME_POINTER
27820- return *(unsigned long *)(regs->bp + sizeof(long));
27821+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27822 #else
27823 unsigned long *sp =
27824 (unsigned long *)kernel_stack_pointer(regs);
27825@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27826 * or above a saved flags. Eflags has bits 22-31 zero,
27827 * kernel addresses don't.
27828 */
27829+
27830+#ifdef CONFIG_PAX_KERNEXEC
27831+ return ktla_ktva(sp[0]);
27832+#else
27833 if (sp[0] >> 22)
27834 return sp[0];
27835 if (sp[1] >> 22)
27836 return sp[1];
27837 #endif
27838+
27839+#endif
27840 }
27841 return pc;
27842 }
27843diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27844index 7fc5e84..c6e445a 100644
27845--- a/arch/x86/kernel/tls.c
27846+++ b/arch/x86/kernel/tls.c
27847@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27848 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27849 return -EINVAL;
27850
27851+#ifdef CONFIG_PAX_SEGMEXEC
27852+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27853+ return -EINVAL;
27854+#endif
27855+
27856 set_tls_desc(p, idx, &info, 1);
27857
27858 return 0;
27859@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27860
27861 if (kbuf)
27862 info = kbuf;
27863- else if (__copy_from_user(infobuf, ubuf, count))
27864+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27865 return -EFAULT;
27866 else
27867 info = infobuf;
27868diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27869index 1c113db..287b42e 100644
27870--- a/arch/x86/kernel/tracepoint.c
27871+++ b/arch/x86/kernel/tracepoint.c
27872@@ -9,11 +9,11 @@
27873 #include <linux/atomic.h>
27874
27875 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27876-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27877+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27878 (unsigned long) trace_idt_table };
27879
27880 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27881-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27882+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27883
27884 static int trace_irq_vector_refcount;
27885 static DEFINE_MUTEX(irq_vector_mutex);
27886diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27887index 89f4e64..aa4149d 100644
27888--- a/arch/x86/kernel/traps.c
27889+++ b/arch/x86/kernel/traps.c
27890@@ -68,7 +68,7 @@
27891 #include <asm/proto.h>
27892
27893 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27894-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27895+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27896 #else
27897 #include <asm/processor-flags.h>
27898 #include <asm/setup.h>
27899@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27900 #endif
27901
27902 /* Must be page-aligned because the real IDT is used in a fixmap. */
27903-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27904+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27905
27906 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27907 EXPORT_SYMBOL_GPL(used_vectors);
27908@@ -109,11 +109,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
27909 }
27910
27911 static nokprobe_inline int
27912-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27913+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27914 struct pt_regs *regs, long error_code)
27915 {
27916 #ifdef CONFIG_X86_32
27917- if (regs->flags & X86_VM_MASK) {
27918+ if (v8086_mode(regs)) {
27919 /*
27920 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27921 * On nmi (interrupt 2), do_trap should not be called.
27922@@ -126,12 +126,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27923 return -1;
27924 }
27925 #endif
27926- if (!user_mode(regs)) {
27927+ if (!user_mode_novm(regs)) {
27928 if (!fixup_exception(regs)) {
27929 tsk->thread.error_code = error_code;
27930 tsk->thread.trap_nr = trapnr;
27931+
27932+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27933+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
27934+ str = "PAX: suspicious stack segment fault";
27935+#endif
27936+
27937 die(str, regs, error_code);
27938 }
27939+
27940+#ifdef CONFIG_PAX_REFCOUNT
27941+ if (trapnr == X86_TRAP_OF)
27942+ pax_report_refcount_overflow(regs);
27943+#endif
27944+
27945 return 0;
27946 }
27947
27948@@ -170,7 +182,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
27949 }
27950
27951 static void
27952-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27953+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
27954 long error_code, siginfo_t *info)
27955 {
27956 struct task_struct *tsk = current;
27957@@ -194,7 +206,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27958 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
27959 printk_ratelimit()) {
27960 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
27961- tsk->comm, tsk->pid, str,
27962+ tsk->comm, task_pid_nr(tsk), str,
27963 regs->ip, regs->sp, error_code);
27964 print_vma_addr(" in ", regs->ip);
27965 pr_cont("\n");
27966@@ -274,6 +286,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
27967 tsk->thread.error_code = error_code;
27968 tsk->thread.trap_nr = X86_TRAP_DF;
27969
27970+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
27971+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
27972+ die("grsec: kernel stack overflow detected", regs, error_code);
27973+#endif
27974+
27975 #ifdef CONFIG_DOUBLEFAULT
27976 df_debug(regs, error_code);
27977 #endif
27978@@ -300,7 +317,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
27979 goto exit;
27980 conditional_sti(regs);
27981
27982- if (!user_mode_vm(regs))
27983+ if (!user_mode(regs))
27984 die("bounds", regs, error_code);
27985
27986 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
27987@@ -379,7 +396,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
27988 conditional_sti(regs);
27989
27990 #ifdef CONFIG_X86_32
27991- if (regs->flags & X86_VM_MASK) {
27992+ if (v8086_mode(regs)) {
27993 local_irq_enable();
27994 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
27995 goto exit;
27996@@ -387,18 +404,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
27997 #endif
27998
27999 tsk = current;
28000- if (!user_mode(regs)) {
28001+ if (!user_mode_novm(regs)) {
28002 if (fixup_exception(regs))
28003 goto exit;
28004
28005 tsk->thread.error_code = error_code;
28006 tsk->thread.trap_nr = X86_TRAP_GP;
28007 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28008- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28009+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28010+
28011+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28012+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28013+ die("PAX: suspicious general protection fault", regs, error_code);
28014+ else
28015+#endif
28016+
28017 die("general protection fault", regs, error_code);
28018+ }
28019 goto exit;
28020 }
28021
28022+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28023+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28024+ struct mm_struct *mm = tsk->mm;
28025+ unsigned long limit;
28026+
28027+ down_write(&mm->mmap_sem);
28028+ limit = mm->context.user_cs_limit;
28029+ if (limit < TASK_SIZE) {
28030+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28031+ up_write(&mm->mmap_sem);
28032+ return;
28033+ }
28034+ up_write(&mm->mmap_sem);
28035+ }
28036+#endif
28037+
28038 tsk->thread.error_code = error_code;
28039 tsk->thread.trap_nr = X86_TRAP_GP;
28040
28041@@ -510,13 +551,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28042 container_of(task_pt_regs(current),
28043 struct bad_iret_stack, regs);
28044
28045+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28046+ new_stack = s;
28047+
28048 /* Copy the IRET target to the new stack. */
28049 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28050
28051 /* Copy the remainder of the stack from the current stack. */
28052 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28053
28054- BUG_ON(!user_mode_vm(&new_stack->regs));
28055+ BUG_ON(!user_mode(&new_stack->regs));
28056 return new_stack;
28057 }
28058 NOKPROBE_SYMBOL(fixup_bad_iret);
28059@@ -566,7 +610,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28060 * then it's very likely the result of an icebp/int01 trap.
28061 * User wants a sigtrap for that.
28062 */
28063- if (!dr6 && user_mode_vm(regs))
28064+ if (!dr6 && user_mode(regs))
28065 user_icebp = 1;
28066
28067 /* Catch kmemcheck conditions first of all! */
28068@@ -602,7 +646,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28069 /* It's safe to allow irq's after DR6 has been saved */
28070 preempt_conditional_sti(regs);
28071
28072- if (regs->flags & X86_VM_MASK) {
28073+ if (v8086_mode(regs)) {
28074 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28075 X86_TRAP_DB);
28076 preempt_conditional_cli(regs);
28077@@ -617,7 +661,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28078 * We already checked v86 mode above, so we can check for kernel mode
28079 * by just checking the CPL of CS.
28080 */
28081- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28082+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28083 tsk->thread.debugreg6 &= ~DR_STEP;
28084 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28085 regs->flags &= ~X86_EFLAGS_TF;
28086@@ -650,7 +694,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28087 return;
28088 conditional_sti(regs);
28089
28090- if (!user_mode_vm(regs))
28091+ if (!user_mode(regs))
28092 {
28093 if (!fixup_exception(regs)) {
28094 task->thread.error_code = error_code;
28095diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28096index 5054497..139f8f8 100644
28097--- a/arch/x86/kernel/tsc.c
28098+++ b/arch/x86/kernel/tsc.c
28099@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28100 */
28101 smp_wmb();
28102
28103- ACCESS_ONCE(c2n->head) = data;
28104+ ACCESS_ONCE_RW(c2n->head) = data;
28105 }
28106
28107 /*
28108diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28109index 8b96a94..792b410 100644
28110--- a/arch/x86/kernel/uprobes.c
28111+++ b/arch/x86/kernel/uprobes.c
28112@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28113 int ret = NOTIFY_DONE;
28114
28115 /* We are only interested in userspace traps */
28116- if (regs && !user_mode_vm(regs))
28117+ if (regs && !user_mode(regs))
28118 return NOTIFY_DONE;
28119
28120 switch (val) {
28121@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28122
28123 if (nleft != rasize) {
28124 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28125- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28126+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28127
28128 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28129 }
28130diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28131index b9242ba..50c5edd 100644
28132--- a/arch/x86/kernel/verify_cpu.S
28133+++ b/arch/x86/kernel/verify_cpu.S
28134@@ -20,6 +20,7 @@
28135 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28136 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28137 * arch/x86/kernel/head_32.S: processor startup
28138+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28139 *
28140 * verify_cpu, returns the status of longmode and SSE in register %eax.
28141 * 0: Success 1: Failure
28142diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28143index e8edcf5..27f9344 100644
28144--- a/arch/x86/kernel/vm86_32.c
28145+++ b/arch/x86/kernel/vm86_32.c
28146@@ -44,6 +44,7 @@
28147 #include <linux/ptrace.h>
28148 #include <linux/audit.h>
28149 #include <linux/stddef.h>
28150+#include <linux/grsecurity.h>
28151
28152 #include <asm/uaccess.h>
28153 #include <asm/io.h>
28154@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28155 do_exit(SIGSEGV);
28156 }
28157
28158- tss = &per_cpu(init_tss, get_cpu());
28159+ tss = init_tss + get_cpu();
28160 current->thread.sp0 = current->thread.saved_sp0;
28161 current->thread.sysenter_cs = __KERNEL_CS;
28162 load_sp0(tss, &current->thread);
28163@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28164
28165 if (tsk->thread.saved_sp0)
28166 return -EPERM;
28167+
28168+#ifdef CONFIG_GRKERNSEC_VM86
28169+ if (!capable(CAP_SYS_RAWIO)) {
28170+ gr_handle_vm86();
28171+ return -EPERM;
28172+ }
28173+#endif
28174+
28175 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28176 offsetof(struct kernel_vm86_struct, vm86plus) -
28177 sizeof(info.regs));
28178@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28179 int tmp;
28180 struct vm86plus_struct __user *v86;
28181
28182+#ifdef CONFIG_GRKERNSEC_VM86
28183+ if (!capable(CAP_SYS_RAWIO)) {
28184+ gr_handle_vm86();
28185+ return -EPERM;
28186+ }
28187+#endif
28188+
28189 tsk = current;
28190 switch (cmd) {
28191 case VM86_REQUEST_IRQ:
28192@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28193 tsk->thread.saved_fs = info->regs32->fs;
28194 tsk->thread.saved_gs = get_user_gs(info->regs32);
28195
28196- tss = &per_cpu(init_tss, get_cpu());
28197+ tss = init_tss + get_cpu();
28198 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28199 if (cpu_has_sep)
28200 tsk->thread.sysenter_cs = 0;
28201@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28202 goto cannot_handle;
28203 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28204 goto cannot_handle;
28205- intr_ptr = (unsigned long __user *) (i << 2);
28206+ intr_ptr = (__force unsigned long __user *) (i << 2);
28207 if (get_user(segoffs, intr_ptr))
28208 goto cannot_handle;
28209 if ((segoffs >> 16) == BIOSSEG)
28210diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28211index 00bf300..129df8e 100644
28212--- a/arch/x86/kernel/vmlinux.lds.S
28213+++ b/arch/x86/kernel/vmlinux.lds.S
28214@@ -26,6 +26,13 @@
28215 #include <asm/page_types.h>
28216 #include <asm/cache.h>
28217 #include <asm/boot.h>
28218+#include <asm/segment.h>
28219+
28220+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28221+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28222+#else
28223+#define __KERNEL_TEXT_OFFSET 0
28224+#endif
28225
28226 #undef i386 /* in case the preprocessor is a 32bit one */
28227
28228@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28229
28230 PHDRS {
28231 text PT_LOAD FLAGS(5); /* R_E */
28232+#ifdef CONFIG_X86_32
28233+ module PT_LOAD FLAGS(5); /* R_E */
28234+#endif
28235+#ifdef CONFIG_XEN
28236+ rodata PT_LOAD FLAGS(5); /* R_E */
28237+#else
28238+ rodata PT_LOAD FLAGS(4); /* R__ */
28239+#endif
28240 data PT_LOAD FLAGS(6); /* RW_ */
28241-#ifdef CONFIG_X86_64
28242+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28243 #ifdef CONFIG_SMP
28244 percpu PT_LOAD FLAGS(6); /* RW_ */
28245 #endif
28246+ text.init PT_LOAD FLAGS(5); /* R_E */
28247+ text.exit PT_LOAD FLAGS(5); /* R_E */
28248 init PT_LOAD FLAGS(7); /* RWE */
28249-#endif
28250 note PT_NOTE FLAGS(0); /* ___ */
28251 }
28252
28253 SECTIONS
28254 {
28255 #ifdef CONFIG_X86_32
28256- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28257- phys_startup_32 = startup_32 - LOAD_OFFSET;
28258+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28259 #else
28260- . = __START_KERNEL;
28261- phys_startup_64 = startup_64 - LOAD_OFFSET;
28262+ . = __START_KERNEL;
28263 #endif
28264
28265 /* Text and read-only data */
28266- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28267- _text = .;
28268+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28269 /* bootstrapping code */
28270+#ifdef CONFIG_X86_32
28271+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28272+#else
28273+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28274+#endif
28275+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28276+ _text = .;
28277 HEAD_TEXT
28278 . = ALIGN(8);
28279 _stext = .;
28280@@ -104,13 +124,47 @@ SECTIONS
28281 IRQENTRY_TEXT
28282 *(.fixup)
28283 *(.gnu.warning)
28284- /* End of text section */
28285- _etext = .;
28286 } :text = 0x9090
28287
28288- NOTES :text :note
28289+ . += __KERNEL_TEXT_OFFSET;
28290
28291- EXCEPTION_TABLE(16) :text = 0x9090
28292+#ifdef CONFIG_X86_32
28293+ . = ALIGN(PAGE_SIZE);
28294+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28295+
28296+#ifdef CONFIG_PAX_KERNEXEC
28297+ MODULES_EXEC_VADDR = .;
28298+ BYTE(0)
28299+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28300+ . = ALIGN(HPAGE_SIZE) - 1;
28301+ MODULES_EXEC_END = .;
28302+#endif
28303+
28304+ } :module
28305+#endif
28306+
28307+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28308+ /* End of text section */
28309+ BYTE(0)
28310+ _etext = . - __KERNEL_TEXT_OFFSET;
28311+ }
28312+
28313+#ifdef CONFIG_X86_32
28314+ . = ALIGN(PAGE_SIZE);
28315+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28316+ . = ALIGN(PAGE_SIZE);
28317+ *(.empty_zero_page)
28318+ *(.initial_pg_fixmap)
28319+ *(.initial_pg_pmd)
28320+ *(.initial_page_table)
28321+ *(.swapper_pg_dir)
28322+ } :rodata
28323+#endif
28324+
28325+ . = ALIGN(PAGE_SIZE);
28326+ NOTES :rodata :note
28327+
28328+ EXCEPTION_TABLE(16) :rodata
28329
28330 #if defined(CONFIG_DEBUG_RODATA)
28331 /* .text should occupy whole number of pages */
28332@@ -122,16 +176,20 @@ SECTIONS
28333
28334 /* Data */
28335 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28336+
28337+#ifdef CONFIG_PAX_KERNEXEC
28338+ . = ALIGN(HPAGE_SIZE);
28339+#else
28340+ . = ALIGN(PAGE_SIZE);
28341+#endif
28342+
28343 /* Start of data section */
28344 _sdata = .;
28345
28346 /* init_task */
28347 INIT_TASK_DATA(THREAD_SIZE)
28348
28349-#ifdef CONFIG_X86_32
28350- /* 32 bit has nosave before _edata */
28351 NOSAVE_DATA
28352-#endif
28353
28354 PAGE_ALIGNED_DATA(PAGE_SIZE)
28355
28356@@ -174,12 +232,19 @@ SECTIONS
28357 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28358
28359 /* Init code and data - will be freed after init */
28360- . = ALIGN(PAGE_SIZE);
28361 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28362+ BYTE(0)
28363+
28364+#ifdef CONFIG_PAX_KERNEXEC
28365+ . = ALIGN(HPAGE_SIZE);
28366+#else
28367+ . = ALIGN(PAGE_SIZE);
28368+#endif
28369+
28370 __init_begin = .; /* paired with __init_end */
28371- }
28372+ } :init.begin
28373
28374-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28375+#ifdef CONFIG_SMP
28376 /*
28377 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28378 * output PHDR, so the next output section - .init.text - should
28379@@ -190,12 +255,27 @@ SECTIONS
28380 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28381 #endif
28382
28383- INIT_TEXT_SECTION(PAGE_SIZE)
28384-#ifdef CONFIG_X86_64
28385- :init
28386-#endif
28387+ . = ALIGN(PAGE_SIZE);
28388+ init_begin = .;
28389+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28390+ VMLINUX_SYMBOL(_sinittext) = .;
28391+ INIT_TEXT
28392+ . = ALIGN(PAGE_SIZE);
28393+ } :text.init
28394
28395- INIT_DATA_SECTION(16)
28396+ /*
28397+ * .exit.text is discard at runtime, not link time, to deal with
28398+ * references from .altinstructions and .eh_frame
28399+ */
28400+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28401+ EXIT_TEXT
28402+ VMLINUX_SYMBOL(_einittext) = .;
28403+ . = ALIGN(16);
28404+ } :text.exit
28405+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28406+
28407+ . = ALIGN(PAGE_SIZE);
28408+ INIT_DATA_SECTION(16) :init
28409
28410 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28411 __x86_cpu_dev_start = .;
28412@@ -266,19 +346,12 @@ SECTIONS
28413 }
28414
28415 . = ALIGN(8);
28416- /*
28417- * .exit.text is discard at runtime, not link time, to deal with
28418- * references from .altinstructions and .eh_frame
28419- */
28420- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28421- EXIT_TEXT
28422- }
28423
28424 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28425 EXIT_DATA
28426 }
28427
28428-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28429+#ifndef CONFIG_SMP
28430 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28431 #endif
28432
28433@@ -297,16 +370,10 @@ SECTIONS
28434 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28435 __smp_locks = .;
28436 *(.smp_locks)
28437- . = ALIGN(PAGE_SIZE);
28438 __smp_locks_end = .;
28439+ . = ALIGN(PAGE_SIZE);
28440 }
28441
28442-#ifdef CONFIG_X86_64
28443- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28444- NOSAVE_DATA
28445- }
28446-#endif
28447-
28448 /* BSS */
28449 . = ALIGN(PAGE_SIZE);
28450 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28451@@ -322,6 +389,7 @@ SECTIONS
28452 __brk_base = .;
28453 . += 64 * 1024; /* 64k alignment slop space */
28454 *(.brk_reservation) /* areas brk users have reserved */
28455+ . = ALIGN(HPAGE_SIZE);
28456 __brk_limit = .;
28457 }
28458
28459@@ -348,13 +416,12 @@ SECTIONS
28460 * for the boot processor.
28461 */
28462 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28463-INIT_PER_CPU(gdt_page);
28464 INIT_PER_CPU(irq_stack_union);
28465
28466 /*
28467 * Build-time check on the image size:
28468 */
28469-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28470+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28471 "kernel image bigger than KERNEL_IMAGE_SIZE");
28472
28473 #ifdef CONFIG_SMP
28474diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28475index 2dcc6ff..082dc7a 100644
28476--- a/arch/x86/kernel/vsyscall_64.c
28477+++ b/arch/x86/kernel/vsyscall_64.c
28478@@ -38,15 +38,13 @@
28479 #define CREATE_TRACE_POINTS
28480 #include "vsyscall_trace.h"
28481
28482-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28483+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28484
28485 static int __init vsyscall_setup(char *str)
28486 {
28487 if (str) {
28488 if (!strcmp("emulate", str))
28489 vsyscall_mode = EMULATE;
28490- else if (!strcmp("native", str))
28491- vsyscall_mode = NATIVE;
28492 else if (!strcmp("none", str))
28493 vsyscall_mode = NONE;
28494 else
28495@@ -264,8 +262,7 @@ do_ret:
28496 return true;
28497
28498 sigsegv:
28499- force_sig(SIGSEGV, current);
28500- return true;
28501+ do_group_exit(SIGKILL);
28502 }
28503
28504 /*
28505@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28506 static struct vm_area_struct gate_vma = {
28507 .vm_start = VSYSCALL_ADDR,
28508 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28509- .vm_page_prot = PAGE_READONLY_EXEC,
28510- .vm_flags = VM_READ | VM_EXEC,
28511+ .vm_page_prot = PAGE_READONLY,
28512+ .vm_flags = VM_READ,
28513 .vm_ops = &gate_vma_ops,
28514 };
28515
28516@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28517 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28518
28519 if (vsyscall_mode != NONE)
28520- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28521- vsyscall_mode == NATIVE
28522- ? PAGE_KERNEL_VSYSCALL
28523- : PAGE_KERNEL_VVAR);
28524+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28525
28526 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28527 (unsigned long)VSYSCALL_ADDR);
28528diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28529index 04068192..4d75aa6 100644
28530--- a/arch/x86/kernel/x8664_ksyms_64.c
28531+++ b/arch/x86/kernel/x8664_ksyms_64.c
28532@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28533 EXPORT_SYMBOL(copy_user_generic_unrolled);
28534 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28535 EXPORT_SYMBOL(__copy_user_nocache);
28536-EXPORT_SYMBOL(_copy_from_user);
28537-EXPORT_SYMBOL(_copy_to_user);
28538
28539 EXPORT_SYMBOL(copy_page);
28540 EXPORT_SYMBOL(clear_page);
28541@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28542 EXPORT_SYMBOL(___preempt_schedule_context);
28543 #endif
28544 #endif
28545+
28546+#ifdef CONFIG_PAX_PER_CPU_PGD
28547+EXPORT_SYMBOL(cpu_pgd);
28548+#endif
28549diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28550index 234b072..b7ab191 100644
28551--- a/arch/x86/kernel/x86_init.c
28552+++ b/arch/x86/kernel/x86_init.c
28553@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28554 static void default_nmi_init(void) { };
28555 static int default_i8042_detect(void) { return 1; };
28556
28557-struct x86_platform_ops x86_platform = {
28558+struct x86_platform_ops x86_platform __read_only = {
28559 .calibrate_tsc = native_calibrate_tsc,
28560 .get_wallclock = mach_get_cmos_time,
28561 .set_wallclock = mach_set_rtc_mmss,
28562@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28563 EXPORT_SYMBOL_GPL(x86_platform);
28564
28565 #if defined(CONFIG_PCI_MSI)
28566-struct x86_msi_ops x86_msi = {
28567+struct x86_msi_ops x86_msi __read_only = {
28568 .setup_msi_irqs = native_setup_msi_irqs,
28569 .compose_msi_msg = native_compose_msi_msg,
28570 .teardown_msi_irq = native_teardown_msi_irq,
28571@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28572 }
28573 #endif
28574
28575-struct x86_io_apic_ops x86_io_apic_ops = {
28576+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28577 .init = native_io_apic_init_mappings,
28578 .read = native_io_apic_read,
28579 .write = native_io_apic_write,
28580diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28581index 8be1e17..07dd990 100644
28582--- a/arch/x86/kernel/xsave.c
28583+++ b/arch/x86/kernel/xsave.c
28584@@ -167,18 +167,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28585
28586 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28587 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28588- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28589+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28590
28591 if (!use_xsave())
28592 return err;
28593
28594- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28595+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28596
28597 /*
28598 * Read the xstate_bv which we copied (directly from the cpu or
28599 * from the state in task struct) to the user buffers.
28600 */
28601- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28602+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28603
28604 /*
28605 * For legacy compatible, we always set FP/SSE bits in the bit
28606@@ -193,7 +193,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28607 */
28608 xstate_bv |= XSTATE_FPSSE;
28609
28610- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28611+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28612
28613 return err;
28614 }
28615@@ -202,6 +202,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28616 {
28617 int err;
28618
28619+ buf = (struct xsave_struct __user *)____m(buf);
28620 if (use_xsave())
28621 err = xsave_user(buf);
28622 else if (use_fxsr())
28623@@ -312,6 +313,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28624 */
28625 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28626 {
28627+ buf = (void __user *)____m(buf);
28628 if (use_xsave()) {
28629 if ((unsigned long)buf % 64 || fx_only) {
28630 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28631diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28632index 8a80737..bac4961 100644
28633--- a/arch/x86/kvm/cpuid.c
28634+++ b/arch/x86/kvm/cpuid.c
28635@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28636 struct kvm_cpuid2 *cpuid,
28637 struct kvm_cpuid_entry2 __user *entries)
28638 {
28639- int r;
28640+ int r, i;
28641
28642 r = -E2BIG;
28643 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28644 goto out;
28645 r = -EFAULT;
28646- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28647- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28648+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28649 goto out;
28650+ for (i = 0; i < cpuid->nent; ++i) {
28651+ struct kvm_cpuid_entry2 cpuid_entry;
28652+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28653+ goto out;
28654+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28655+ }
28656 vcpu->arch.cpuid_nent = cpuid->nent;
28657 kvm_apic_set_version(vcpu);
28658 kvm_x86_ops->cpuid_update(vcpu);
28659@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28660 struct kvm_cpuid2 *cpuid,
28661 struct kvm_cpuid_entry2 __user *entries)
28662 {
28663- int r;
28664+ int r, i;
28665
28666 r = -E2BIG;
28667 if (cpuid->nent < vcpu->arch.cpuid_nent)
28668 goto out;
28669 r = -EFAULT;
28670- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28671- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28672+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28673 goto out;
28674+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28675+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28676+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28677+ goto out;
28678+ }
28679 return 0;
28680
28681 out:
28682diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28683index b24c2d8..e1e4e259 100644
28684--- a/arch/x86/kvm/emulate.c
28685+++ b/arch/x86/kvm/emulate.c
28686@@ -3503,7 +3503,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28687 int cr = ctxt->modrm_reg;
28688 u64 efer = 0;
28689
28690- static u64 cr_reserved_bits[] = {
28691+ static const u64 cr_reserved_bits[] = {
28692 0xffffffff00000000ULL,
28693 0, 0, 0, /* CR3 checked later */
28694 CR4_RESERVED_BITS,
28695diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28696index d52dcf0..cec7e84 100644
28697--- a/arch/x86/kvm/lapic.c
28698+++ b/arch/x86/kvm/lapic.c
28699@@ -55,7 +55,7 @@
28700 #define APIC_BUS_CYCLE_NS 1
28701
28702 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28703-#define apic_debug(fmt, arg...)
28704+#define apic_debug(fmt, arg...) do {} while (0)
28705
28706 #define APIC_LVT_NUM 6
28707 /* 14 is the version for Xeon and Pentium 8.4.8*/
28708diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28709index fd49c86..77e1aa0 100644
28710--- a/arch/x86/kvm/paging_tmpl.h
28711+++ b/arch/x86/kvm/paging_tmpl.h
28712@@ -343,7 +343,7 @@ retry_walk:
28713 if (unlikely(kvm_is_error_hva(host_addr)))
28714 goto error;
28715
28716- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28717+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28718 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28719 goto error;
28720 walker->ptep_user[walker->level - 1] = ptep_user;
28721diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28722index 41dd038..de331cf 100644
28723--- a/arch/x86/kvm/svm.c
28724+++ b/arch/x86/kvm/svm.c
28725@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28726 int cpu = raw_smp_processor_id();
28727
28728 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28729+
28730+ pax_open_kernel();
28731 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28732+ pax_close_kernel();
28733+
28734 load_TR_desc();
28735 }
28736
28737@@ -3969,6 +3973,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28738 #endif
28739 #endif
28740
28741+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28742+ __set_fs(current_thread_info()->addr_limit);
28743+#endif
28744+
28745 reload_tss(vcpu);
28746
28747 local_irq_disable();
28748diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28749index d4c58d8..eaf2568 100644
28750--- a/arch/x86/kvm/vmx.c
28751+++ b/arch/x86/kvm/vmx.c
28752@@ -1380,12 +1380,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28753 #endif
28754 }
28755
28756-static void vmcs_clear_bits(unsigned long field, u32 mask)
28757+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28758 {
28759 vmcs_writel(field, vmcs_readl(field) & ~mask);
28760 }
28761
28762-static void vmcs_set_bits(unsigned long field, u32 mask)
28763+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28764 {
28765 vmcs_writel(field, vmcs_readl(field) | mask);
28766 }
28767@@ -1645,7 +1645,11 @@ static void reload_tss(void)
28768 struct desc_struct *descs;
28769
28770 descs = (void *)gdt->address;
28771+
28772+ pax_open_kernel();
28773 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28774+ pax_close_kernel();
28775+
28776 load_TR_desc();
28777 }
28778
28779@@ -1881,6 +1885,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28780 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28781 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28782
28783+#ifdef CONFIG_PAX_PER_CPU_PGD
28784+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28785+#endif
28786+
28787 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28788 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28789 vmx->loaded_vmcs->cpu = cpu;
28790@@ -2170,7 +2178,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28791 * reads and returns guest's timestamp counter "register"
28792 * guest_tsc = host_tsc + tsc_offset -- 21.3
28793 */
28794-static u64 guest_read_tsc(void)
28795+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28796 {
28797 u64 host_tsc, tsc_offset;
28798
28799@@ -4252,7 +4260,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28800 unsigned long cr4;
28801
28802 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28803+
28804+#ifndef CONFIG_PAX_PER_CPU_PGD
28805 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28806+#endif
28807
28808 /* Save the most likely value for this task's CR4 in the VMCS. */
28809 cr4 = read_cr4();
28810@@ -4279,7 +4290,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28811 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28812 vmx->host_idt_base = dt.address;
28813
28814- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28815+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28816
28817 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28818 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28819@@ -5876,11 +5887,16 @@ static __init int hardware_setup(void)
28820 * page upon invalidation. No need to do anything if the
28821 * processor does not have the APIC_ACCESS_ADDR VMCS field.
28822 */
28823- kvm_x86_ops->set_apic_access_page_addr = NULL;
28824+ pax_open_kernel();
28825+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28826+ pax_close_kernel();
28827 }
28828
28829- if (!cpu_has_vmx_tpr_shadow())
28830- kvm_x86_ops->update_cr8_intercept = NULL;
28831+ if (!cpu_has_vmx_tpr_shadow()) {
28832+ pax_open_kernel();
28833+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28834+ pax_close_kernel();
28835+ }
28836
28837 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28838 kvm_disable_largepages();
28839@@ -5891,13 +5907,15 @@ static __init int hardware_setup(void)
28840 if (!cpu_has_vmx_apicv())
28841 enable_apicv = 0;
28842
28843+ pax_open_kernel();
28844 if (enable_apicv)
28845- kvm_x86_ops->update_cr8_intercept = NULL;
28846+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28847 else {
28848- kvm_x86_ops->hwapic_irr_update = NULL;
28849- kvm_x86_ops->deliver_posted_interrupt = NULL;
28850- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28851+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28852+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28853+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28854 }
28855+ pax_close_kernel();
28856
28857 if (nested)
28858 nested_vmx_setup_ctls_msrs();
28859@@ -7846,6 +7864,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28860 "jmp 2f \n\t"
28861 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28862 "2: "
28863+
28864+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28865+ "ljmp %[cs],$3f\n\t"
28866+ "3: "
28867+#endif
28868+
28869 /* Save guest registers, load host registers, keep flags */
28870 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28871 "pop %0 \n\t"
28872@@ -7898,6 +7922,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28873 #endif
28874 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28875 [wordsize]"i"(sizeof(ulong))
28876+
28877+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28878+ ,[cs]"i"(__KERNEL_CS)
28879+#endif
28880+
28881 : "cc", "memory"
28882 #ifdef CONFIG_X86_64
28883 , "rax", "rbx", "rdi", "rsi"
28884@@ -7911,7 +7940,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28885 if (debugctlmsr)
28886 update_debugctlmsr(debugctlmsr);
28887
28888-#ifndef CONFIG_X86_64
28889+#ifdef CONFIG_X86_32
28890 /*
28891 * The sysexit path does not restore ds/es, so we must set them to
28892 * a reasonable value ourselves.
28893@@ -7920,8 +7949,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28894 * may be executed in interrupt context, which saves and restore segments
28895 * around it, nullifying its effect.
28896 */
28897- loadsegment(ds, __USER_DS);
28898- loadsegment(es, __USER_DS);
28899+ loadsegment(ds, __KERNEL_DS);
28900+ loadsegment(es, __KERNEL_DS);
28901+ loadsegment(ss, __KERNEL_DS);
28902+
28903+#ifdef CONFIG_PAX_KERNEXEC
28904+ loadsegment(fs, __KERNEL_PERCPU);
28905+#endif
28906+
28907+#ifdef CONFIG_PAX_MEMORY_UDEREF
28908+ __set_fs(current_thread_info()->addr_limit);
28909+#endif
28910+
28911 #endif
28912
28913 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28914diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28915index 64d76c1..e20a4c1 100644
28916--- a/arch/x86/kvm/x86.c
28917+++ b/arch/x86/kvm/x86.c
28918@@ -1882,8 +1882,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28919 {
28920 struct kvm *kvm = vcpu->kvm;
28921 int lm = is_long_mode(vcpu);
28922- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28923- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28924+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28925+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28926 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28927 : kvm->arch.xen_hvm_config.blob_size_32;
28928 u32 page_num = data & ~PAGE_MASK;
28929@@ -2809,6 +2809,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28930 if (n < msr_list.nmsrs)
28931 goto out;
28932 r = -EFAULT;
28933+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28934+ goto out;
28935 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28936 num_msrs_to_save * sizeof(u32)))
28937 goto out;
28938@@ -5745,7 +5747,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28939 };
28940 #endif
28941
28942-int kvm_arch_init(void *opaque)
28943+int kvm_arch_init(const void *opaque)
28944 {
28945 int r;
28946 struct kvm_x86_ops *ops = opaque;
28947diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28948index c1c1544..f90c9d5 100644
28949--- a/arch/x86/lguest/boot.c
28950+++ b/arch/x86/lguest/boot.c
28951@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28952 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28953 * Launcher to reboot us.
28954 */
28955-static void lguest_restart(char *reason)
28956+static __noreturn void lguest_restart(char *reason)
28957 {
28958 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
28959+ BUG();
28960 }
28961
28962 /*G:050
28963diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
28964index 00933d5..3a64af9 100644
28965--- a/arch/x86/lib/atomic64_386_32.S
28966+++ b/arch/x86/lib/atomic64_386_32.S
28967@@ -48,6 +48,10 @@ BEGIN(read)
28968 movl (v), %eax
28969 movl 4(v), %edx
28970 RET_ENDP
28971+BEGIN(read_unchecked)
28972+ movl (v), %eax
28973+ movl 4(v), %edx
28974+RET_ENDP
28975 #undef v
28976
28977 #define v %esi
28978@@ -55,6 +59,10 @@ BEGIN(set)
28979 movl %ebx, (v)
28980 movl %ecx, 4(v)
28981 RET_ENDP
28982+BEGIN(set_unchecked)
28983+ movl %ebx, (v)
28984+ movl %ecx, 4(v)
28985+RET_ENDP
28986 #undef v
28987
28988 #define v %esi
28989@@ -70,6 +78,20 @@ RET_ENDP
28990 BEGIN(add)
28991 addl %eax, (v)
28992 adcl %edx, 4(v)
28993+
28994+#ifdef CONFIG_PAX_REFCOUNT
28995+ jno 0f
28996+ subl %eax, (v)
28997+ sbbl %edx, 4(v)
28998+ int $4
28999+0:
29000+ _ASM_EXTABLE(0b, 0b)
29001+#endif
29002+
29003+RET_ENDP
29004+BEGIN(add_unchecked)
29005+ addl %eax, (v)
29006+ adcl %edx, 4(v)
29007 RET_ENDP
29008 #undef v
29009
29010@@ -77,6 +99,24 @@ RET_ENDP
29011 BEGIN(add_return)
29012 addl (v), %eax
29013 adcl 4(v), %edx
29014+
29015+#ifdef CONFIG_PAX_REFCOUNT
29016+ into
29017+1234:
29018+ _ASM_EXTABLE(1234b, 2f)
29019+#endif
29020+
29021+ movl %eax, (v)
29022+ movl %edx, 4(v)
29023+
29024+#ifdef CONFIG_PAX_REFCOUNT
29025+2:
29026+#endif
29027+
29028+RET_ENDP
29029+BEGIN(add_return_unchecked)
29030+ addl (v), %eax
29031+ adcl 4(v), %edx
29032 movl %eax, (v)
29033 movl %edx, 4(v)
29034 RET_ENDP
29035@@ -86,6 +126,20 @@ RET_ENDP
29036 BEGIN(sub)
29037 subl %eax, (v)
29038 sbbl %edx, 4(v)
29039+
29040+#ifdef CONFIG_PAX_REFCOUNT
29041+ jno 0f
29042+ addl %eax, (v)
29043+ adcl %edx, 4(v)
29044+ int $4
29045+0:
29046+ _ASM_EXTABLE(0b, 0b)
29047+#endif
29048+
29049+RET_ENDP
29050+BEGIN(sub_unchecked)
29051+ subl %eax, (v)
29052+ sbbl %edx, 4(v)
29053 RET_ENDP
29054 #undef v
29055
29056@@ -96,6 +150,27 @@ BEGIN(sub_return)
29057 sbbl $0, %edx
29058 addl (v), %eax
29059 adcl 4(v), %edx
29060+
29061+#ifdef CONFIG_PAX_REFCOUNT
29062+ into
29063+1234:
29064+ _ASM_EXTABLE(1234b, 2f)
29065+#endif
29066+
29067+ movl %eax, (v)
29068+ movl %edx, 4(v)
29069+
29070+#ifdef CONFIG_PAX_REFCOUNT
29071+2:
29072+#endif
29073+
29074+RET_ENDP
29075+BEGIN(sub_return_unchecked)
29076+ negl %edx
29077+ negl %eax
29078+ sbbl $0, %edx
29079+ addl (v), %eax
29080+ adcl 4(v), %edx
29081 movl %eax, (v)
29082 movl %edx, 4(v)
29083 RET_ENDP
29084@@ -105,6 +180,20 @@ RET_ENDP
29085 BEGIN(inc)
29086 addl $1, (v)
29087 adcl $0, 4(v)
29088+
29089+#ifdef CONFIG_PAX_REFCOUNT
29090+ jno 0f
29091+ subl $1, (v)
29092+ sbbl $0, 4(v)
29093+ int $4
29094+0:
29095+ _ASM_EXTABLE(0b, 0b)
29096+#endif
29097+
29098+RET_ENDP
29099+BEGIN(inc_unchecked)
29100+ addl $1, (v)
29101+ adcl $0, 4(v)
29102 RET_ENDP
29103 #undef v
29104
29105@@ -114,6 +203,26 @@ BEGIN(inc_return)
29106 movl 4(v), %edx
29107 addl $1, %eax
29108 adcl $0, %edx
29109+
29110+#ifdef CONFIG_PAX_REFCOUNT
29111+ into
29112+1234:
29113+ _ASM_EXTABLE(1234b, 2f)
29114+#endif
29115+
29116+ movl %eax, (v)
29117+ movl %edx, 4(v)
29118+
29119+#ifdef CONFIG_PAX_REFCOUNT
29120+2:
29121+#endif
29122+
29123+RET_ENDP
29124+BEGIN(inc_return_unchecked)
29125+ movl (v), %eax
29126+ movl 4(v), %edx
29127+ addl $1, %eax
29128+ adcl $0, %edx
29129 movl %eax, (v)
29130 movl %edx, 4(v)
29131 RET_ENDP
29132@@ -123,6 +232,20 @@ RET_ENDP
29133 BEGIN(dec)
29134 subl $1, (v)
29135 sbbl $0, 4(v)
29136+
29137+#ifdef CONFIG_PAX_REFCOUNT
29138+ jno 0f
29139+ addl $1, (v)
29140+ adcl $0, 4(v)
29141+ int $4
29142+0:
29143+ _ASM_EXTABLE(0b, 0b)
29144+#endif
29145+
29146+RET_ENDP
29147+BEGIN(dec_unchecked)
29148+ subl $1, (v)
29149+ sbbl $0, 4(v)
29150 RET_ENDP
29151 #undef v
29152
29153@@ -132,6 +255,26 @@ BEGIN(dec_return)
29154 movl 4(v), %edx
29155 subl $1, %eax
29156 sbbl $0, %edx
29157+
29158+#ifdef CONFIG_PAX_REFCOUNT
29159+ into
29160+1234:
29161+ _ASM_EXTABLE(1234b, 2f)
29162+#endif
29163+
29164+ movl %eax, (v)
29165+ movl %edx, 4(v)
29166+
29167+#ifdef CONFIG_PAX_REFCOUNT
29168+2:
29169+#endif
29170+
29171+RET_ENDP
29172+BEGIN(dec_return_unchecked)
29173+ movl (v), %eax
29174+ movl 4(v), %edx
29175+ subl $1, %eax
29176+ sbbl $0, %edx
29177 movl %eax, (v)
29178 movl %edx, 4(v)
29179 RET_ENDP
29180@@ -143,6 +286,13 @@ BEGIN(add_unless)
29181 adcl %edx, %edi
29182 addl (v), %eax
29183 adcl 4(v), %edx
29184+
29185+#ifdef CONFIG_PAX_REFCOUNT
29186+ into
29187+1234:
29188+ _ASM_EXTABLE(1234b, 2f)
29189+#endif
29190+
29191 cmpl %eax, %ecx
29192 je 3f
29193 1:
29194@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29195 1:
29196 addl $1, %eax
29197 adcl $0, %edx
29198+
29199+#ifdef CONFIG_PAX_REFCOUNT
29200+ into
29201+1234:
29202+ _ASM_EXTABLE(1234b, 2f)
29203+#endif
29204+
29205 movl %eax, (v)
29206 movl %edx, 4(v)
29207 movl $1, %eax
29208@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29209 movl 4(v), %edx
29210 subl $1, %eax
29211 sbbl $0, %edx
29212+
29213+#ifdef CONFIG_PAX_REFCOUNT
29214+ into
29215+1234:
29216+ _ASM_EXTABLE(1234b, 1f)
29217+#endif
29218+
29219 js 1f
29220 movl %eax, (v)
29221 movl %edx, 4(v)
29222diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29223index f5cc9eb..51fa319 100644
29224--- a/arch/x86/lib/atomic64_cx8_32.S
29225+++ b/arch/x86/lib/atomic64_cx8_32.S
29226@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29227 CFI_STARTPROC
29228
29229 read64 %ecx
29230+ pax_force_retaddr
29231 ret
29232 CFI_ENDPROC
29233 ENDPROC(atomic64_read_cx8)
29234
29235+ENTRY(atomic64_read_unchecked_cx8)
29236+ CFI_STARTPROC
29237+
29238+ read64 %ecx
29239+ pax_force_retaddr
29240+ ret
29241+ CFI_ENDPROC
29242+ENDPROC(atomic64_read_unchecked_cx8)
29243+
29244 ENTRY(atomic64_set_cx8)
29245 CFI_STARTPROC
29246
29247@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29248 cmpxchg8b (%esi)
29249 jne 1b
29250
29251+ pax_force_retaddr
29252 ret
29253 CFI_ENDPROC
29254 ENDPROC(atomic64_set_cx8)
29255
29256+ENTRY(atomic64_set_unchecked_cx8)
29257+ CFI_STARTPROC
29258+
29259+1:
29260+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29261+ * are atomic on 586 and newer */
29262+ cmpxchg8b (%esi)
29263+ jne 1b
29264+
29265+ pax_force_retaddr
29266+ ret
29267+ CFI_ENDPROC
29268+ENDPROC(atomic64_set_unchecked_cx8)
29269+
29270 ENTRY(atomic64_xchg_cx8)
29271 CFI_STARTPROC
29272
29273@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29274 cmpxchg8b (%esi)
29275 jne 1b
29276
29277+ pax_force_retaddr
29278 ret
29279 CFI_ENDPROC
29280 ENDPROC(atomic64_xchg_cx8)
29281
29282-.macro addsub_return func ins insc
29283-ENTRY(atomic64_\func\()_return_cx8)
29284+.macro addsub_return func ins insc unchecked=""
29285+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29286 CFI_STARTPROC
29287 SAVE ebp
29288 SAVE ebx
29289@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29290 movl %edx, %ecx
29291 \ins\()l %esi, %ebx
29292 \insc\()l %edi, %ecx
29293+
29294+.ifb \unchecked
29295+#ifdef CONFIG_PAX_REFCOUNT
29296+ into
29297+2:
29298+ _ASM_EXTABLE(2b, 3f)
29299+#endif
29300+.endif
29301+
29302 LOCK_PREFIX
29303 cmpxchg8b (%ebp)
29304 jne 1b
29305-
29306-10:
29307 movl %ebx, %eax
29308 movl %ecx, %edx
29309+
29310+.ifb \unchecked
29311+#ifdef CONFIG_PAX_REFCOUNT
29312+3:
29313+#endif
29314+.endif
29315+
29316 RESTORE edi
29317 RESTORE esi
29318 RESTORE ebx
29319 RESTORE ebp
29320+ pax_force_retaddr
29321 ret
29322 CFI_ENDPROC
29323-ENDPROC(atomic64_\func\()_return_cx8)
29324+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29325 .endm
29326
29327 addsub_return add add adc
29328 addsub_return sub sub sbb
29329+addsub_return add add adc _unchecked
29330+addsub_return sub sub sbb _unchecked
29331
29332-.macro incdec_return func ins insc
29333-ENTRY(atomic64_\func\()_return_cx8)
29334+.macro incdec_return func ins insc unchecked=""
29335+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29336 CFI_STARTPROC
29337 SAVE ebx
29338
29339@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29340 movl %edx, %ecx
29341 \ins\()l $1, %ebx
29342 \insc\()l $0, %ecx
29343+
29344+.ifb \unchecked
29345+#ifdef CONFIG_PAX_REFCOUNT
29346+ into
29347+2:
29348+ _ASM_EXTABLE(2b, 3f)
29349+#endif
29350+.endif
29351+
29352 LOCK_PREFIX
29353 cmpxchg8b (%esi)
29354 jne 1b
29355
29356-10:
29357 movl %ebx, %eax
29358 movl %ecx, %edx
29359+
29360+.ifb \unchecked
29361+#ifdef CONFIG_PAX_REFCOUNT
29362+3:
29363+#endif
29364+.endif
29365+
29366 RESTORE ebx
29367+ pax_force_retaddr
29368 ret
29369 CFI_ENDPROC
29370-ENDPROC(atomic64_\func\()_return_cx8)
29371+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29372 .endm
29373
29374 incdec_return inc add adc
29375 incdec_return dec sub sbb
29376+incdec_return inc add adc _unchecked
29377+incdec_return dec sub sbb _unchecked
29378
29379 ENTRY(atomic64_dec_if_positive_cx8)
29380 CFI_STARTPROC
29381@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29382 movl %edx, %ecx
29383 subl $1, %ebx
29384 sbb $0, %ecx
29385+
29386+#ifdef CONFIG_PAX_REFCOUNT
29387+ into
29388+1234:
29389+ _ASM_EXTABLE(1234b, 2f)
29390+#endif
29391+
29392 js 2f
29393 LOCK_PREFIX
29394 cmpxchg8b (%esi)
29395@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29396 movl %ebx, %eax
29397 movl %ecx, %edx
29398 RESTORE ebx
29399+ pax_force_retaddr
29400 ret
29401 CFI_ENDPROC
29402 ENDPROC(atomic64_dec_if_positive_cx8)
29403@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29404 movl %edx, %ecx
29405 addl %ebp, %ebx
29406 adcl %edi, %ecx
29407+
29408+#ifdef CONFIG_PAX_REFCOUNT
29409+ into
29410+1234:
29411+ _ASM_EXTABLE(1234b, 3f)
29412+#endif
29413+
29414 LOCK_PREFIX
29415 cmpxchg8b (%esi)
29416 jne 1b
29417@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29418 CFI_ADJUST_CFA_OFFSET -8
29419 RESTORE ebx
29420 RESTORE ebp
29421+ pax_force_retaddr
29422 ret
29423 4:
29424 cmpl %edx, 4(%esp)
29425@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29426 xorl %ecx, %ecx
29427 addl $1, %ebx
29428 adcl %edx, %ecx
29429+
29430+#ifdef CONFIG_PAX_REFCOUNT
29431+ into
29432+1234:
29433+ _ASM_EXTABLE(1234b, 3f)
29434+#endif
29435+
29436 LOCK_PREFIX
29437 cmpxchg8b (%esi)
29438 jne 1b
29439@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29440 movl $1, %eax
29441 3:
29442 RESTORE ebx
29443+ pax_force_retaddr
29444 ret
29445 CFI_ENDPROC
29446 ENDPROC(atomic64_inc_not_zero_cx8)
29447diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29448index e78b8eee..7e173a8 100644
29449--- a/arch/x86/lib/checksum_32.S
29450+++ b/arch/x86/lib/checksum_32.S
29451@@ -29,7 +29,8 @@
29452 #include <asm/dwarf2.h>
29453 #include <asm/errno.h>
29454 #include <asm/asm.h>
29455-
29456+#include <asm/segment.h>
29457+
29458 /*
29459 * computes a partial checksum, e.g. for TCP/UDP fragments
29460 */
29461@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29462
29463 #define ARGBASE 16
29464 #define FP 12
29465-
29466-ENTRY(csum_partial_copy_generic)
29467+
29468+ENTRY(csum_partial_copy_generic_to_user)
29469 CFI_STARTPROC
29470+
29471+#ifdef CONFIG_PAX_MEMORY_UDEREF
29472+ pushl_cfi %gs
29473+ popl_cfi %es
29474+ jmp csum_partial_copy_generic
29475+#endif
29476+
29477+ENTRY(csum_partial_copy_generic_from_user)
29478+
29479+#ifdef CONFIG_PAX_MEMORY_UDEREF
29480+ pushl_cfi %gs
29481+ popl_cfi %ds
29482+#endif
29483+
29484+ENTRY(csum_partial_copy_generic)
29485 subl $4,%esp
29486 CFI_ADJUST_CFA_OFFSET 4
29487 pushl_cfi %edi
29488@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29489 jmp 4f
29490 SRC(1: movw (%esi), %bx )
29491 addl $2, %esi
29492-DST( movw %bx, (%edi) )
29493+DST( movw %bx, %es:(%edi) )
29494 addl $2, %edi
29495 addw %bx, %ax
29496 adcl $0, %eax
29497@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29498 SRC(1: movl (%esi), %ebx )
29499 SRC( movl 4(%esi), %edx )
29500 adcl %ebx, %eax
29501-DST( movl %ebx, (%edi) )
29502+DST( movl %ebx, %es:(%edi) )
29503 adcl %edx, %eax
29504-DST( movl %edx, 4(%edi) )
29505+DST( movl %edx, %es:4(%edi) )
29506
29507 SRC( movl 8(%esi), %ebx )
29508 SRC( movl 12(%esi), %edx )
29509 adcl %ebx, %eax
29510-DST( movl %ebx, 8(%edi) )
29511+DST( movl %ebx, %es:8(%edi) )
29512 adcl %edx, %eax
29513-DST( movl %edx, 12(%edi) )
29514+DST( movl %edx, %es:12(%edi) )
29515
29516 SRC( movl 16(%esi), %ebx )
29517 SRC( movl 20(%esi), %edx )
29518 adcl %ebx, %eax
29519-DST( movl %ebx, 16(%edi) )
29520+DST( movl %ebx, %es:16(%edi) )
29521 adcl %edx, %eax
29522-DST( movl %edx, 20(%edi) )
29523+DST( movl %edx, %es:20(%edi) )
29524
29525 SRC( movl 24(%esi), %ebx )
29526 SRC( movl 28(%esi), %edx )
29527 adcl %ebx, %eax
29528-DST( movl %ebx, 24(%edi) )
29529+DST( movl %ebx, %es:24(%edi) )
29530 adcl %edx, %eax
29531-DST( movl %edx, 28(%edi) )
29532+DST( movl %edx, %es:28(%edi) )
29533
29534 lea 32(%esi), %esi
29535 lea 32(%edi), %edi
29536@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29537 shrl $2, %edx # This clears CF
29538 SRC(3: movl (%esi), %ebx )
29539 adcl %ebx, %eax
29540-DST( movl %ebx, (%edi) )
29541+DST( movl %ebx, %es:(%edi) )
29542 lea 4(%esi), %esi
29543 lea 4(%edi), %edi
29544 dec %edx
29545@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29546 jb 5f
29547 SRC( movw (%esi), %cx )
29548 leal 2(%esi), %esi
29549-DST( movw %cx, (%edi) )
29550+DST( movw %cx, %es:(%edi) )
29551 leal 2(%edi), %edi
29552 je 6f
29553 shll $16,%ecx
29554 SRC(5: movb (%esi), %cl )
29555-DST( movb %cl, (%edi) )
29556+DST( movb %cl, %es:(%edi) )
29557 6: addl %ecx, %eax
29558 adcl $0, %eax
29559 7:
29560@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29561
29562 6001:
29563 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29564- movl $-EFAULT, (%ebx)
29565+ movl $-EFAULT, %ss:(%ebx)
29566
29567 # zero the complete destination - computing the rest
29568 # is too much work
29569@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29570
29571 6002:
29572 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29573- movl $-EFAULT,(%ebx)
29574+ movl $-EFAULT,%ss:(%ebx)
29575 jmp 5000b
29576
29577 .previous
29578
29579+ pushl_cfi %ss
29580+ popl_cfi %ds
29581+ pushl_cfi %ss
29582+ popl_cfi %es
29583 popl_cfi %ebx
29584 CFI_RESTORE ebx
29585 popl_cfi %esi
29586@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29587 popl_cfi %ecx # equivalent to addl $4,%esp
29588 ret
29589 CFI_ENDPROC
29590-ENDPROC(csum_partial_copy_generic)
29591+ENDPROC(csum_partial_copy_generic_to_user)
29592
29593 #else
29594
29595 /* Version for PentiumII/PPro */
29596
29597 #define ROUND1(x) \
29598+ nop; nop; nop; \
29599 SRC(movl x(%esi), %ebx ) ; \
29600 addl %ebx, %eax ; \
29601- DST(movl %ebx, x(%edi) ) ;
29602+ DST(movl %ebx, %es:x(%edi)) ;
29603
29604 #define ROUND(x) \
29605+ nop; nop; nop; \
29606 SRC(movl x(%esi), %ebx ) ; \
29607 adcl %ebx, %eax ; \
29608- DST(movl %ebx, x(%edi) ) ;
29609+ DST(movl %ebx, %es:x(%edi)) ;
29610
29611 #define ARGBASE 12
29612-
29613-ENTRY(csum_partial_copy_generic)
29614+
29615+ENTRY(csum_partial_copy_generic_to_user)
29616 CFI_STARTPROC
29617+
29618+#ifdef CONFIG_PAX_MEMORY_UDEREF
29619+ pushl_cfi %gs
29620+ popl_cfi %es
29621+ jmp csum_partial_copy_generic
29622+#endif
29623+
29624+ENTRY(csum_partial_copy_generic_from_user)
29625+
29626+#ifdef CONFIG_PAX_MEMORY_UDEREF
29627+ pushl_cfi %gs
29628+ popl_cfi %ds
29629+#endif
29630+
29631+ENTRY(csum_partial_copy_generic)
29632 pushl_cfi %ebx
29633 CFI_REL_OFFSET ebx, 0
29634 pushl_cfi %edi
29635@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29636 subl %ebx, %edi
29637 lea -1(%esi),%edx
29638 andl $-32,%edx
29639- lea 3f(%ebx,%ebx), %ebx
29640+ lea 3f(%ebx,%ebx,2), %ebx
29641 testl %esi, %esi
29642 jmp *%ebx
29643 1: addl $64,%esi
29644@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29645 jb 5f
29646 SRC( movw (%esi), %dx )
29647 leal 2(%esi), %esi
29648-DST( movw %dx, (%edi) )
29649+DST( movw %dx, %es:(%edi) )
29650 leal 2(%edi), %edi
29651 je 6f
29652 shll $16,%edx
29653 5:
29654 SRC( movb (%esi), %dl )
29655-DST( movb %dl, (%edi) )
29656+DST( movb %dl, %es:(%edi) )
29657 6: addl %edx, %eax
29658 adcl $0, %eax
29659 7:
29660 .section .fixup, "ax"
29661 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29662- movl $-EFAULT, (%ebx)
29663+ movl $-EFAULT, %ss:(%ebx)
29664 # zero the complete destination (computing the rest is too much work)
29665 movl ARGBASE+8(%esp),%edi # dst
29666 movl ARGBASE+12(%esp),%ecx # len
29667@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29668 rep; stosb
29669 jmp 7b
29670 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29671- movl $-EFAULT, (%ebx)
29672+ movl $-EFAULT, %ss:(%ebx)
29673 jmp 7b
29674 .previous
29675
29676+#ifdef CONFIG_PAX_MEMORY_UDEREF
29677+ pushl_cfi %ss
29678+ popl_cfi %ds
29679+ pushl_cfi %ss
29680+ popl_cfi %es
29681+#endif
29682+
29683 popl_cfi %esi
29684 CFI_RESTORE esi
29685 popl_cfi %edi
29686@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29687 CFI_RESTORE ebx
29688 ret
29689 CFI_ENDPROC
29690-ENDPROC(csum_partial_copy_generic)
29691+ENDPROC(csum_partial_copy_generic_to_user)
29692
29693 #undef ROUND
29694 #undef ROUND1
29695diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29696index f2145cf..cea889d 100644
29697--- a/arch/x86/lib/clear_page_64.S
29698+++ b/arch/x86/lib/clear_page_64.S
29699@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29700 movl $4096/8,%ecx
29701 xorl %eax,%eax
29702 rep stosq
29703+ pax_force_retaddr
29704 ret
29705 CFI_ENDPROC
29706 ENDPROC(clear_page_c)
29707@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29708 movl $4096,%ecx
29709 xorl %eax,%eax
29710 rep stosb
29711+ pax_force_retaddr
29712 ret
29713 CFI_ENDPROC
29714 ENDPROC(clear_page_c_e)
29715@@ -43,6 +45,7 @@ ENTRY(clear_page)
29716 leaq 64(%rdi),%rdi
29717 jnz .Lloop
29718 nop
29719+ pax_force_retaddr
29720 ret
29721 CFI_ENDPROC
29722 .Lclear_page_end:
29723@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29724
29725 #include <asm/cpufeature.h>
29726
29727- .section .altinstr_replacement,"ax"
29728+ .section .altinstr_replacement,"a"
29729 1: .byte 0xeb /* jmp <disp8> */
29730 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29731 2: .byte 0xeb /* jmp <disp8> */
29732diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29733index 40a1725..5d12ac4 100644
29734--- a/arch/x86/lib/cmpxchg16b_emu.S
29735+++ b/arch/x86/lib/cmpxchg16b_emu.S
29736@@ -8,6 +8,7 @@
29737 #include <linux/linkage.h>
29738 #include <asm/dwarf2.h>
29739 #include <asm/percpu.h>
29740+#include <asm/alternative-asm.h>
29741
29742 .text
29743
29744@@ -46,12 +47,14 @@ CFI_STARTPROC
29745 CFI_REMEMBER_STATE
29746 popfq_cfi
29747 mov $1, %al
29748+ pax_force_retaddr
29749 ret
29750
29751 CFI_RESTORE_STATE
29752 .Lnot_same:
29753 popfq_cfi
29754 xor %al,%al
29755+ pax_force_retaddr
29756 ret
29757
29758 CFI_ENDPROC
29759diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29760index 176cca6..e0d658e 100644
29761--- a/arch/x86/lib/copy_page_64.S
29762+++ b/arch/x86/lib/copy_page_64.S
29763@@ -9,6 +9,7 @@ copy_page_rep:
29764 CFI_STARTPROC
29765 movl $4096/8, %ecx
29766 rep movsq
29767+ pax_force_retaddr
29768 ret
29769 CFI_ENDPROC
29770 ENDPROC(copy_page_rep)
29771@@ -24,8 +25,8 @@ ENTRY(copy_page)
29772 CFI_ADJUST_CFA_OFFSET 2*8
29773 movq %rbx, (%rsp)
29774 CFI_REL_OFFSET rbx, 0
29775- movq %r12, 1*8(%rsp)
29776- CFI_REL_OFFSET r12, 1*8
29777+ movq %r13, 1*8(%rsp)
29778+ CFI_REL_OFFSET r13, 1*8
29779
29780 movl $(4096/64)-5, %ecx
29781 .p2align 4
29782@@ -38,7 +39,7 @@ ENTRY(copy_page)
29783 movq 0x8*4(%rsi), %r9
29784 movq 0x8*5(%rsi), %r10
29785 movq 0x8*6(%rsi), %r11
29786- movq 0x8*7(%rsi), %r12
29787+ movq 0x8*7(%rsi), %r13
29788
29789 prefetcht0 5*64(%rsi)
29790
29791@@ -49,7 +50,7 @@ ENTRY(copy_page)
29792 movq %r9, 0x8*4(%rdi)
29793 movq %r10, 0x8*5(%rdi)
29794 movq %r11, 0x8*6(%rdi)
29795- movq %r12, 0x8*7(%rdi)
29796+ movq %r13, 0x8*7(%rdi)
29797
29798 leaq 64 (%rsi), %rsi
29799 leaq 64 (%rdi), %rdi
29800@@ -68,7 +69,7 @@ ENTRY(copy_page)
29801 movq 0x8*4(%rsi), %r9
29802 movq 0x8*5(%rsi), %r10
29803 movq 0x8*6(%rsi), %r11
29804- movq 0x8*7(%rsi), %r12
29805+ movq 0x8*7(%rsi), %r13
29806
29807 movq %rax, 0x8*0(%rdi)
29808 movq %rbx, 0x8*1(%rdi)
29809@@ -77,7 +78,7 @@ ENTRY(copy_page)
29810 movq %r9, 0x8*4(%rdi)
29811 movq %r10, 0x8*5(%rdi)
29812 movq %r11, 0x8*6(%rdi)
29813- movq %r12, 0x8*7(%rdi)
29814+ movq %r13, 0x8*7(%rdi)
29815
29816 leaq 64(%rdi), %rdi
29817 leaq 64(%rsi), %rsi
29818@@ -85,10 +86,11 @@ ENTRY(copy_page)
29819
29820 movq (%rsp), %rbx
29821 CFI_RESTORE rbx
29822- movq 1*8(%rsp), %r12
29823- CFI_RESTORE r12
29824+ movq 1*8(%rsp), %r13
29825+ CFI_RESTORE r13
29826 addq $2*8, %rsp
29827 CFI_ADJUST_CFA_OFFSET -2*8
29828+ pax_force_retaddr
29829 ret
29830 .Lcopy_page_end:
29831 CFI_ENDPROC
29832@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29833
29834 #include <asm/cpufeature.h>
29835
29836- .section .altinstr_replacement,"ax"
29837+ .section .altinstr_replacement,"a"
29838 1: .byte 0xeb /* jmp <disp8> */
29839 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29840 2:
29841diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29842index dee945d..a84067b 100644
29843--- a/arch/x86/lib/copy_user_64.S
29844+++ b/arch/x86/lib/copy_user_64.S
29845@@ -18,31 +18,7 @@
29846 #include <asm/alternative-asm.h>
29847 #include <asm/asm.h>
29848 #include <asm/smap.h>
29849-
29850-/*
29851- * By placing feature2 after feature1 in altinstructions section, we logically
29852- * implement:
29853- * If CPU has feature2, jmp to alt2 is used
29854- * else if CPU has feature1, jmp to alt1 is used
29855- * else jmp to orig is used.
29856- */
29857- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29858-0:
29859- .byte 0xe9 /* 32bit jump */
29860- .long \orig-1f /* by default jump to orig */
29861-1:
29862- .section .altinstr_replacement,"ax"
29863-2: .byte 0xe9 /* near jump with 32bit immediate */
29864- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29865-3: .byte 0xe9 /* near jump with 32bit immediate */
29866- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29867- .previous
29868-
29869- .section .altinstructions,"a"
29870- altinstruction_entry 0b,2b,\feature1,5,5
29871- altinstruction_entry 0b,3b,\feature2,5,5
29872- .previous
29873- .endm
29874+#include <asm/pgtable.h>
29875
29876 .macro ALIGN_DESTINATION
29877 #ifdef FIX_ALIGNMENT
29878@@ -70,52 +46,6 @@
29879 #endif
29880 .endm
29881
29882-/* Standard copy_to_user with segment limit checking */
29883-ENTRY(_copy_to_user)
29884- CFI_STARTPROC
29885- GET_THREAD_INFO(%rax)
29886- movq %rdi,%rcx
29887- addq %rdx,%rcx
29888- jc bad_to_user
29889- cmpq TI_addr_limit(%rax),%rcx
29890- ja bad_to_user
29891- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29892- copy_user_generic_unrolled,copy_user_generic_string, \
29893- copy_user_enhanced_fast_string
29894- CFI_ENDPROC
29895-ENDPROC(_copy_to_user)
29896-
29897-/* Standard copy_from_user with segment limit checking */
29898-ENTRY(_copy_from_user)
29899- CFI_STARTPROC
29900- GET_THREAD_INFO(%rax)
29901- movq %rsi,%rcx
29902- addq %rdx,%rcx
29903- jc bad_from_user
29904- cmpq TI_addr_limit(%rax),%rcx
29905- ja bad_from_user
29906- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29907- copy_user_generic_unrolled,copy_user_generic_string, \
29908- copy_user_enhanced_fast_string
29909- CFI_ENDPROC
29910-ENDPROC(_copy_from_user)
29911-
29912- .section .fixup,"ax"
29913- /* must zero dest */
29914-ENTRY(bad_from_user)
29915-bad_from_user:
29916- CFI_STARTPROC
29917- movl %edx,%ecx
29918- xorl %eax,%eax
29919- rep
29920- stosb
29921-bad_to_user:
29922- movl %edx,%eax
29923- ret
29924- CFI_ENDPROC
29925-ENDPROC(bad_from_user)
29926- .previous
29927-
29928 /*
29929 * copy_user_generic_unrolled - memory copy with exception handling.
29930 * This version is for CPUs like P4 that don't have efficient micro
29931@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29932 */
29933 ENTRY(copy_user_generic_unrolled)
29934 CFI_STARTPROC
29935+ ASM_PAX_OPEN_USERLAND
29936 ASM_STAC
29937 cmpl $8,%edx
29938 jb 20f /* less then 8 bytes, go to byte copy loop */
29939@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29940 jnz 21b
29941 23: xor %eax,%eax
29942 ASM_CLAC
29943+ ASM_PAX_CLOSE_USERLAND
29944+ pax_force_retaddr
29945 ret
29946
29947 .section .fixup,"ax"
29948@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29949 */
29950 ENTRY(copy_user_generic_string)
29951 CFI_STARTPROC
29952+ ASM_PAX_OPEN_USERLAND
29953 ASM_STAC
29954 cmpl $8,%edx
29955 jb 2f /* less than 8 bytes, go to byte copy loop */
29956@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
29957 movsb
29958 xorl %eax,%eax
29959 ASM_CLAC
29960+ ASM_PAX_CLOSE_USERLAND
29961+ pax_force_retaddr
29962 ret
29963
29964 .section .fixup,"ax"
29965@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
29966 */
29967 ENTRY(copy_user_enhanced_fast_string)
29968 CFI_STARTPROC
29969+ ASM_PAX_OPEN_USERLAND
29970 ASM_STAC
29971 movl %edx,%ecx
29972 1: rep
29973 movsb
29974 xorl %eax,%eax
29975 ASM_CLAC
29976+ ASM_PAX_CLOSE_USERLAND
29977+ pax_force_retaddr
29978 ret
29979
29980 .section .fixup,"ax"
29981diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
29982index 6a4f43c..c70fb52 100644
29983--- a/arch/x86/lib/copy_user_nocache_64.S
29984+++ b/arch/x86/lib/copy_user_nocache_64.S
29985@@ -8,6 +8,7 @@
29986
29987 #include <linux/linkage.h>
29988 #include <asm/dwarf2.h>
29989+#include <asm/alternative-asm.h>
29990
29991 #define FIX_ALIGNMENT 1
29992
29993@@ -16,6 +17,7 @@
29994 #include <asm/thread_info.h>
29995 #include <asm/asm.h>
29996 #include <asm/smap.h>
29997+#include <asm/pgtable.h>
29998
29999 .macro ALIGN_DESTINATION
30000 #ifdef FIX_ALIGNMENT
30001@@ -49,6 +51,16 @@
30002 */
30003 ENTRY(__copy_user_nocache)
30004 CFI_STARTPROC
30005+
30006+#ifdef CONFIG_PAX_MEMORY_UDEREF
30007+ mov pax_user_shadow_base,%rcx
30008+ cmp %rcx,%rsi
30009+ jae 1f
30010+ add %rcx,%rsi
30011+1:
30012+#endif
30013+
30014+ ASM_PAX_OPEN_USERLAND
30015 ASM_STAC
30016 cmpl $8,%edx
30017 jb 20f /* less then 8 bytes, go to byte copy loop */
30018@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30019 jnz 21b
30020 23: xorl %eax,%eax
30021 ASM_CLAC
30022+ ASM_PAX_CLOSE_USERLAND
30023 sfence
30024+ pax_force_retaddr
30025 ret
30026
30027 .section .fixup,"ax"
30028diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30029index 2419d5f..fe52d0e 100644
30030--- a/arch/x86/lib/csum-copy_64.S
30031+++ b/arch/x86/lib/csum-copy_64.S
30032@@ -9,6 +9,7 @@
30033 #include <asm/dwarf2.h>
30034 #include <asm/errno.h>
30035 #include <asm/asm.h>
30036+#include <asm/alternative-asm.h>
30037
30038 /*
30039 * Checksum copy with exception handling.
30040@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30041 CFI_ADJUST_CFA_OFFSET 7*8
30042 movq %rbx, 2*8(%rsp)
30043 CFI_REL_OFFSET rbx, 2*8
30044- movq %r12, 3*8(%rsp)
30045- CFI_REL_OFFSET r12, 3*8
30046+ movq %r15, 3*8(%rsp)
30047+ CFI_REL_OFFSET r15, 3*8
30048 movq %r14, 4*8(%rsp)
30049 CFI_REL_OFFSET r14, 4*8
30050 movq %r13, 5*8(%rsp)
30051@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30052 movl %edx, %ecx
30053
30054 xorl %r9d, %r9d
30055- movq %rcx, %r12
30056+ movq %rcx, %r15
30057
30058- shrq $6, %r12
30059+ shrq $6, %r15
30060 jz .Lhandle_tail /* < 64 */
30061
30062 clc
30063
30064 /* main loop. clear in 64 byte blocks */
30065 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30066- /* r11: temp3, rdx: temp4, r12 loopcnt */
30067+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30068 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30069 .p2align 4
30070 .Lloop:
30071@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30072 adcq %r14, %rax
30073 adcq %r13, %rax
30074
30075- decl %r12d
30076+ decl %r15d
30077
30078 dest
30079 movq %rbx, (%rsi)
30080@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30081 .Lende:
30082 movq 2*8(%rsp), %rbx
30083 CFI_RESTORE rbx
30084- movq 3*8(%rsp), %r12
30085- CFI_RESTORE r12
30086+ movq 3*8(%rsp), %r15
30087+ CFI_RESTORE r15
30088 movq 4*8(%rsp), %r14
30089 CFI_RESTORE r14
30090 movq 5*8(%rsp), %r13
30091@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30092 CFI_RESTORE rbp
30093 addq $7*8, %rsp
30094 CFI_ADJUST_CFA_OFFSET -7*8
30095+ pax_force_retaddr
30096 ret
30097 CFI_RESTORE_STATE
30098
30099diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30100index 1318f75..44c30fd 100644
30101--- a/arch/x86/lib/csum-wrappers_64.c
30102+++ b/arch/x86/lib/csum-wrappers_64.c
30103@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30104 len -= 2;
30105 }
30106 }
30107+ pax_open_userland();
30108 stac();
30109- isum = csum_partial_copy_generic((__force const void *)src,
30110+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30111 dst, len, isum, errp, NULL);
30112 clac();
30113+ pax_close_userland();
30114 if (unlikely(*errp))
30115 goto out_err;
30116
30117@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30118 }
30119
30120 *errp = 0;
30121+ pax_open_userland();
30122 stac();
30123- ret = csum_partial_copy_generic(src, (void __force *)dst,
30124+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30125 len, isum, NULL, errp);
30126 clac();
30127+ pax_close_userland();
30128 return ret;
30129 }
30130 EXPORT_SYMBOL(csum_partial_copy_to_user);
30131diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30132index a451235..1daa956 100644
30133--- a/arch/x86/lib/getuser.S
30134+++ b/arch/x86/lib/getuser.S
30135@@ -33,17 +33,40 @@
30136 #include <asm/thread_info.h>
30137 #include <asm/asm.h>
30138 #include <asm/smap.h>
30139+#include <asm/segment.h>
30140+#include <asm/pgtable.h>
30141+#include <asm/alternative-asm.h>
30142+
30143+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30144+#define __copyuser_seg gs;
30145+#else
30146+#define __copyuser_seg
30147+#endif
30148
30149 .text
30150 ENTRY(__get_user_1)
30151 CFI_STARTPROC
30152+
30153+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30154 GET_THREAD_INFO(%_ASM_DX)
30155 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30156 jae bad_get_user
30157 ASM_STAC
30158-1: movzbl (%_ASM_AX),%edx
30159+
30160+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30161+ mov pax_user_shadow_base,%_ASM_DX
30162+ cmp %_ASM_DX,%_ASM_AX
30163+ jae 1234f
30164+ add %_ASM_DX,%_ASM_AX
30165+1234:
30166+#endif
30167+
30168+#endif
30169+
30170+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30171 xor %eax,%eax
30172 ASM_CLAC
30173+ pax_force_retaddr
30174 ret
30175 CFI_ENDPROC
30176 ENDPROC(__get_user_1)
30177@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30178 ENTRY(__get_user_2)
30179 CFI_STARTPROC
30180 add $1,%_ASM_AX
30181+
30182+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30183 jc bad_get_user
30184 GET_THREAD_INFO(%_ASM_DX)
30185 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30186 jae bad_get_user
30187 ASM_STAC
30188-2: movzwl -1(%_ASM_AX),%edx
30189+
30190+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30191+ mov pax_user_shadow_base,%_ASM_DX
30192+ cmp %_ASM_DX,%_ASM_AX
30193+ jae 1234f
30194+ add %_ASM_DX,%_ASM_AX
30195+1234:
30196+#endif
30197+
30198+#endif
30199+
30200+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30201 xor %eax,%eax
30202 ASM_CLAC
30203+ pax_force_retaddr
30204 ret
30205 CFI_ENDPROC
30206 ENDPROC(__get_user_2)
30207@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30208 ENTRY(__get_user_4)
30209 CFI_STARTPROC
30210 add $3,%_ASM_AX
30211+
30212+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30213 jc bad_get_user
30214 GET_THREAD_INFO(%_ASM_DX)
30215 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30216 jae bad_get_user
30217 ASM_STAC
30218-3: movl -3(%_ASM_AX),%edx
30219+
30220+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30221+ mov pax_user_shadow_base,%_ASM_DX
30222+ cmp %_ASM_DX,%_ASM_AX
30223+ jae 1234f
30224+ add %_ASM_DX,%_ASM_AX
30225+1234:
30226+#endif
30227+
30228+#endif
30229+
30230+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30231 xor %eax,%eax
30232 ASM_CLAC
30233+ pax_force_retaddr
30234 ret
30235 CFI_ENDPROC
30236 ENDPROC(__get_user_4)
30237@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30238 GET_THREAD_INFO(%_ASM_DX)
30239 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30240 jae bad_get_user
30241+
30242+#ifdef CONFIG_PAX_MEMORY_UDEREF
30243+ mov pax_user_shadow_base,%_ASM_DX
30244+ cmp %_ASM_DX,%_ASM_AX
30245+ jae 1234f
30246+ add %_ASM_DX,%_ASM_AX
30247+1234:
30248+#endif
30249+
30250 ASM_STAC
30251 4: movq -7(%_ASM_AX),%rdx
30252 xor %eax,%eax
30253 ASM_CLAC
30254+ pax_force_retaddr
30255 ret
30256 #else
30257 add $7,%_ASM_AX
30258@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30259 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30260 jae bad_get_user_8
30261 ASM_STAC
30262-4: movl -7(%_ASM_AX),%edx
30263-5: movl -3(%_ASM_AX),%ecx
30264+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30265+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30266 xor %eax,%eax
30267 ASM_CLAC
30268+ pax_force_retaddr
30269 ret
30270 #endif
30271 CFI_ENDPROC
30272@@ -113,6 +175,7 @@ bad_get_user:
30273 xor %edx,%edx
30274 mov $(-EFAULT),%_ASM_AX
30275 ASM_CLAC
30276+ pax_force_retaddr
30277 ret
30278 CFI_ENDPROC
30279 END(bad_get_user)
30280@@ -124,6 +187,7 @@ bad_get_user_8:
30281 xor %ecx,%ecx
30282 mov $(-EFAULT),%_ASM_AX
30283 ASM_CLAC
30284+ pax_force_retaddr
30285 ret
30286 CFI_ENDPROC
30287 END(bad_get_user_8)
30288diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30289index 1313ae6..84f25ea 100644
30290--- a/arch/x86/lib/insn.c
30291+++ b/arch/x86/lib/insn.c
30292@@ -20,8 +20,10 @@
30293
30294 #ifdef __KERNEL__
30295 #include <linux/string.h>
30296+#include <asm/pgtable_types.h>
30297 #else
30298 #include <string.h>
30299+#define ktla_ktva(addr) addr
30300 #endif
30301 #include <asm/inat.h>
30302 #include <asm/insn.h>
30303@@ -53,9 +55,9 @@
30304 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30305 {
30306 memset(insn, 0, sizeof(*insn));
30307- insn->kaddr = kaddr;
30308- insn->end_kaddr = kaddr + buf_len;
30309- insn->next_byte = kaddr;
30310+ insn->kaddr = ktla_ktva(kaddr);
30311+ insn->end_kaddr = insn->kaddr + buf_len;
30312+ insn->next_byte = insn->kaddr;
30313 insn->x86_64 = x86_64 ? 1 : 0;
30314 insn->opnd_bytes = 4;
30315 if (x86_64)
30316diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30317index 05a95e7..326f2fa 100644
30318--- a/arch/x86/lib/iomap_copy_64.S
30319+++ b/arch/x86/lib/iomap_copy_64.S
30320@@ -17,6 +17,7 @@
30321
30322 #include <linux/linkage.h>
30323 #include <asm/dwarf2.h>
30324+#include <asm/alternative-asm.h>
30325
30326 /*
30327 * override generic version in lib/iomap_copy.c
30328@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30329 CFI_STARTPROC
30330 movl %edx,%ecx
30331 rep movsd
30332+ pax_force_retaddr
30333 ret
30334 CFI_ENDPROC
30335 ENDPROC(__iowrite32_copy)
30336diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30337index 56313a3..0db417e 100644
30338--- a/arch/x86/lib/memcpy_64.S
30339+++ b/arch/x86/lib/memcpy_64.S
30340@@ -24,7 +24,7 @@
30341 * This gets patched over the unrolled variant (below) via the
30342 * alternative instructions framework:
30343 */
30344- .section .altinstr_replacement, "ax", @progbits
30345+ .section .altinstr_replacement, "a", @progbits
30346 .Lmemcpy_c:
30347 movq %rdi, %rax
30348 movq %rdx, %rcx
30349@@ -33,6 +33,7 @@
30350 rep movsq
30351 movl %edx, %ecx
30352 rep movsb
30353+ pax_force_retaddr
30354 ret
30355 .Lmemcpy_e:
30356 .previous
30357@@ -44,11 +45,12 @@
30358 * This gets patched over the unrolled variant (below) via the
30359 * alternative instructions framework:
30360 */
30361- .section .altinstr_replacement, "ax", @progbits
30362+ .section .altinstr_replacement, "a", @progbits
30363 .Lmemcpy_c_e:
30364 movq %rdi, %rax
30365 movq %rdx, %rcx
30366 rep movsb
30367+ pax_force_retaddr
30368 ret
30369 .Lmemcpy_e_e:
30370 .previous
30371@@ -136,6 +138,7 @@ ENTRY(memcpy)
30372 movq %r9, 1*8(%rdi)
30373 movq %r10, -2*8(%rdi, %rdx)
30374 movq %r11, -1*8(%rdi, %rdx)
30375+ pax_force_retaddr
30376 retq
30377 .p2align 4
30378 .Lless_16bytes:
30379@@ -148,6 +151,7 @@ ENTRY(memcpy)
30380 movq -1*8(%rsi, %rdx), %r9
30381 movq %r8, 0*8(%rdi)
30382 movq %r9, -1*8(%rdi, %rdx)
30383+ pax_force_retaddr
30384 retq
30385 .p2align 4
30386 .Lless_8bytes:
30387@@ -161,6 +165,7 @@ ENTRY(memcpy)
30388 movl -4(%rsi, %rdx), %r8d
30389 movl %ecx, (%rdi)
30390 movl %r8d, -4(%rdi, %rdx)
30391+ pax_force_retaddr
30392 retq
30393 .p2align 4
30394 .Lless_3bytes:
30395@@ -179,6 +184,7 @@ ENTRY(memcpy)
30396 movb %cl, (%rdi)
30397
30398 .Lend:
30399+ pax_force_retaddr
30400 retq
30401 CFI_ENDPROC
30402 ENDPROC(memcpy)
30403diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30404index 65268a6..dd1de11 100644
30405--- a/arch/x86/lib/memmove_64.S
30406+++ b/arch/x86/lib/memmove_64.S
30407@@ -202,14 +202,16 @@ ENTRY(memmove)
30408 movb (%rsi), %r11b
30409 movb %r11b, (%rdi)
30410 13:
30411+ pax_force_retaddr
30412 retq
30413 CFI_ENDPROC
30414
30415- .section .altinstr_replacement,"ax"
30416+ .section .altinstr_replacement,"a"
30417 .Lmemmove_begin_forward_efs:
30418 /* Forward moving data. */
30419 movq %rdx, %rcx
30420 rep movsb
30421+ pax_force_retaddr
30422 retq
30423 .Lmemmove_end_forward_efs:
30424 .previous
30425diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30426index 2dcb380..2eb79fe 100644
30427--- a/arch/x86/lib/memset_64.S
30428+++ b/arch/x86/lib/memset_64.S
30429@@ -16,7 +16,7 @@
30430 *
30431 * rax original destination
30432 */
30433- .section .altinstr_replacement, "ax", @progbits
30434+ .section .altinstr_replacement, "a", @progbits
30435 .Lmemset_c:
30436 movq %rdi,%r9
30437 movq %rdx,%rcx
30438@@ -30,6 +30,7 @@
30439 movl %edx,%ecx
30440 rep stosb
30441 movq %r9,%rax
30442+ pax_force_retaddr
30443 ret
30444 .Lmemset_e:
30445 .previous
30446@@ -45,13 +46,14 @@
30447 *
30448 * rax original destination
30449 */
30450- .section .altinstr_replacement, "ax", @progbits
30451+ .section .altinstr_replacement, "a", @progbits
30452 .Lmemset_c_e:
30453 movq %rdi,%r9
30454 movb %sil,%al
30455 movq %rdx,%rcx
30456 rep stosb
30457 movq %r9,%rax
30458+ pax_force_retaddr
30459 ret
30460 .Lmemset_e_e:
30461 .previous
30462@@ -118,6 +120,7 @@ ENTRY(__memset)
30463
30464 .Lende:
30465 movq %r10,%rax
30466+ pax_force_retaddr
30467 ret
30468
30469 CFI_RESTORE_STATE
30470diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30471index c9f2d9b..e7fd2c0 100644
30472--- a/arch/x86/lib/mmx_32.c
30473+++ b/arch/x86/lib/mmx_32.c
30474@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30475 {
30476 void *p;
30477 int i;
30478+ unsigned long cr0;
30479
30480 if (unlikely(in_interrupt()))
30481 return __memcpy(to, from, len);
30482@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30483 kernel_fpu_begin();
30484
30485 __asm__ __volatile__ (
30486- "1: prefetch (%0)\n" /* This set is 28 bytes */
30487- " prefetch 64(%0)\n"
30488- " prefetch 128(%0)\n"
30489- " prefetch 192(%0)\n"
30490- " prefetch 256(%0)\n"
30491+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30492+ " prefetch 64(%1)\n"
30493+ " prefetch 128(%1)\n"
30494+ " prefetch 192(%1)\n"
30495+ " prefetch 256(%1)\n"
30496 "2: \n"
30497 ".section .fixup, \"ax\"\n"
30498- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30499+ "3: \n"
30500+
30501+#ifdef CONFIG_PAX_KERNEXEC
30502+ " movl %%cr0, %0\n"
30503+ " movl %0, %%eax\n"
30504+ " andl $0xFFFEFFFF, %%eax\n"
30505+ " movl %%eax, %%cr0\n"
30506+#endif
30507+
30508+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30509+
30510+#ifdef CONFIG_PAX_KERNEXEC
30511+ " movl %0, %%cr0\n"
30512+#endif
30513+
30514 " jmp 2b\n"
30515 ".previous\n"
30516 _ASM_EXTABLE(1b, 3b)
30517- : : "r" (from));
30518+ : "=&r" (cr0) : "r" (from) : "ax");
30519
30520 for ( ; i > 5; i--) {
30521 __asm__ __volatile__ (
30522- "1: prefetch 320(%0)\n"
30523- "2: movq (%0), %%mm0\n"
30524- " movq 8(%0), %%mm1\n"
30525- " movq 16(%0), %%mm2\n"
30526- " movq 24(%0), %%mm3\n"
30527- " movq %%mm0, (%1)\n"
30528- " movq %%mm1, 8(%1)\n"
30529- " movq %%mm2, 16(%1)\n"
30530- " movq %%mm3, 24(%1)\n"
30531- " movq 32(%0), %%mm0\n"
30532- " movq 40(%0), %%mm1\n"
30533- " movq 48(%0), %%mm2\n"
30534- " movq 56(%0), %%mm3\n"
30535- " movq %%mm0, 32(%1)\n"
30536- " movq %%mm1, 40(%1)\n"
30537- " movq %%mm2, 48(%1)\n"
30538- " movq %%mm3, 56(%1)\n"
30539+ "1: prefetch 320(%1)\n"
30540+ "2: movq (%1), %%mm0\n"
30541+ " movq 8(%1), %%mm1\n"
30542+ " movq 16(%1), %%mm2\n"
30543+ " movq 24(%1), %%mm3\n"
30544+ " movq %%mm0, (%2)\n"
30545+ " movq %%mm1, 8(%2)\n"
30546+ " movq %%mm2, 16(%2)\n"
30547+ " movq %%mm3, 24(%2)\n"
30548+ " movq 32(%1), %%mm0\n"
30549+ " movq 40(%1), %%mm1\n"
30550+ " movq 48(%1), %%mm2\n"
30551+ " movq 56(%1), %%mm3\n"
30552+ " movq %%mm0, 32(%2)\n"
30553+ " movq %%mm1, 40(%2)\n"
30554+ " movq %%mm2, 48(%2)\n"
30555+ " movq %%mm3, 56(%2)\n"
30556 ".section .fixup, \"ax\"\n"
30557- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30558+ "3:\n"
30559+
30560+#ifdef CONFIG_PAX_KERNEXEC
30561+ " movl %%cr0, %0\n"
30562+ " movl %0, %%eax\n"
30563+ " andl $0xFFFEFFFF, %%eax\n"
30564+ " movl %%eax, %%cr0\n"
30565+#endif
30566+
30567+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30568+
30569+#ifdef CONFIG_PAX_KERNEXEC
30570+ " movl %0, %%cr0\n"
30571+#endif
30572+
30573 " jmp 2b\n"
30574 ".previous\n"
30575 _ASM_EXTABLE(1b, 3b)
30576- : : "r" (from), "r" (to) : "memory");
30577+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30578
30579 from += 64;
30580 to += 64;
30581@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30582 static void fast_copy_page(void *to, void *from)
30583 {
30584 int i;
30585+ unsigned long cr0;
30586
30587 kernel_fpu_begin();
30588
30589@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30590 * but that is for later. -AV
30591 */
30592 __asm__ __volatile__(
30593- "1: prefetch (%0)\n"
30594- " prefetch 64(%0)\n"
30595- " prefetch 128(%0)\n"
30596- " prefetch 192(%0)\n"
30597- " prefetch 256(%0)\n"
30598+ "1: prefetch (%1)\n"
30599+ " prefetch 64(%1)\n"
30600+ " prefetch 128(%1)\n"
30601+ " prefetch 192(%1)\n"
30602+ " prefetch 256(%1)\n"
30603 "2: \n"
30604 ".section .fixup, \"ax\"\n"
30605- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30606+ "3: \n"
30607+
30608+#ifdef CONFIG_PAX_KERNEXEC
30609+ " movl %%cr0, %0\n"
30610+ " movl %0, %%eax\n"
30611+ " andl $0xFFFEFFFF, %%eax\n"
30612+ " movl %%eax, %%cr0\n"
30613+#endif
30614+
30615+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30616+
30617+#ifdef CONFIG_PAX_KERNEXEC
30618+ " movl %0, %%cr0\n"
30619+#endif
30620+
30621 " jmp 2b\n"
30622 ".previous\n"
30623- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30624+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30625
30626 for (i = 0; i < (4096-320)/64; i++) {
30627 __asm__ __volatile__ (
30628- "1: prefetch 320(%0)\n"
30629- "2: movq (%0), %%mm0\n"
30630- " movntq %%mm0, (%1)\n"
30631- " movq 8(%0), %%mm1\n"
30632- " movntq %%mm1, 8(%1)\n"
30633- " movq 16(%0), %%mm2\n"
30634- " movntq %%mm2, 16(%1)\n"
30635- " movq 24(%0), %%mm3\n"
30636- " movntq %%mm3, 24(%1)\n"
30637- " movq 32(%0), %%mm4\n"
30638- " movntq %%mm4, 32(%1)\n"
30639- " movq 40(%0), %%mm5\n"
30640- " movntq %%mm5, 40(%1)\n"
30641- " movq 48(%0), %%mm6\n"
30642- " movntq %%mm6, 48(%1)\n"
30643- " movq 56(%0), %%mm7\n"
30644- " movntq %%mm7, 56(%1)\n"
30645+ "1: prefetch 320(%1)\n"
30646+ "2: movq (%1), %%mm0\n"
30647+ " movntq %%mm0, (%2)\n"
30648+ " movq 8(%1), %%mm1\n"
30649+ " movntq %%mm1, 8(%2)\n"
30650+ " movq 16(%1), %%mm2\n"
30651+ " movntq %%mm2, 16(%2)\n"
30652+ " movq 24(%1), %%mm3\n"
30653+ " movntq %%mm3, 24(%2)\n"
30654+ " movq 32(%1), %%mm4\n"
30655+ " movntq %%mm4, 32(%2)\n"
30656+ " movq 40(%1), %%mm5\n"
30657+ " movntq %%mm5, 40(%2)\n"
30658+ " movq 48(%1), %%mm6\n"
30659+ " movntq %%mm6, 48(%2)\n"
30660+ " movq 56(%1), %%mm7\n"
30661+ " movntq %%mm7, 56(%2)\n"
30662 ".section .fixup, \"ax\"\n"
30663- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30664+ "3:\n"
30665+
30666+#ifdef CONFIG_PAX_KERNEXEC
30667+ " movl %%cr0, %0\n"
30668+ " movl %0, %%eax\n"
30669+ " andl $0xFFFEFFFF, %%eax\n"
30670+ " movl %%eax, %%cr0\n"
30671+#endif
30672+
30673+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30674+
30675+#ifdef CONFIG_PAX_KERNEXEC
30676+ " movl %0, %%cr0\n"
30677+#endif
30678+
30679 " jmp 2b\n"
30680 ".previous\n"
30681- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30682+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30683
30684 from += 64;
30685 to += 64;
30686@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30687 static void fast_copy_page(void *to, void *from)
30688 {
30689 int i;
30690+ unsigned long cr0;
30691
30692 kernel_fpu_begin();
30693
30694 __asm__ __volatile__ (
30695- "1: prefetch (%0)\n"
30696- " prefetch 64(%0)\n"
30697- " prefetch 128(%0)\n"
30698- " prefetch 192(%0)\n"
30699- " prefetch 256(%0)\n"
30700+ "1: prefetch (%1)\n"
30701+ " prefetch 64(%1)\n"
30702+ " prefetch 128(%1)\n"
30703+ " prefetch 192(%1)\n"
30704+ " prefetch 256(%1)\n"
30705 "2: \n"
30706 ".section .fixup, \"ax\"\n"
30707- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30708+ "3: \n"
30709+
30710+#ifdef CONFIG_PAX_KERNEXEC
30711+ " movl %%cr0, %0\n"
30712+ " movl %0, %%eax\n"
30713+ " andl $0xFFFEFFFF, %%eax\n"
30714+ " movl %%eax, %%cr0\n"
30715+#endif
30716+
30717+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30718+
30719+#ifdef CONFIG_PAX_KERNEXEC
30720+ " movl %0, %%cr0\n"
30721+#endif
30722+
30723 " jmp 2b\n"
30724 ".previous\n"
30725- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30726+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30727
30728 for (i = 0; i < 4096/64; i++) {
30729 __asm__ __volatile__ (
30730- "1: prefetch 320(%0)\n"
30731- "2: movq (%0), %%mm0\n"
30732- " movq 8(%0), %%mm1\n"
30733- " movq 16(%0), %%mm2\n"
30734- " movq 24(%0), %%mm3\n"
30735- " movq %%mm0, (%1)\n"
30736- " movq %%mm1, 8(%1)\n"
30737- " movq %%mm2, 16(%1)\n"
30738- " movq %%mm3, 24(%1)\n"
30739- " movq 32(%0), %%mm0\n"
30740- " movq 40(%0), %%mm1\n"
30741- " movq 48(%0), %%mm2\n"
30742- " movq 56(%0), %%mm3\n"
30743- " movq %%mm0, 32(%1)\n"
30744- " movq %%mm1, 40(%1)\n"
30745- " movq %%mm2, 48(%1)\n"
30746- " movq %%mm3, 56(%1)\n"
30747+ "1: prefetch 320(%1)\n"
30748+ "2: movq (%1), %%mm0\n"
30749+ " movq 8(%1), %%mm1\n"
30750+ " movq 16(%1), %%mm2\n"
30751+ " movq 24(%1), %%mm3\n"
30752+ " movq %%mm0, (%2)\n"
30753+ " movq %%mm1, 8(%2)\n"
30754+ " movq %%mm2, 16(%2)\n"
30755+ " movq %%mm3, 24(%2)\n"
30756+ " movq 32(%1), %%mm0\n"
30757+ " movq 40(%1), %%mm1\n"
30758+ " movq 48(%1), %%mm2\n"
30759+ " movq 56(%1), %%mm3\n"
30760+ " movq %%mm0, 32(%2)\n"
30761+ " movq %%mm1, 40(%2)\n"
30762+ " movq %%mm2, 48(%2)\n"
30763+ " movq %%mm3, 56(%2)\n"
30764 ".section .fixup, \"ax\"\n"
30765- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30766+ "3:\n"
30767+
30768+#ifdef CONFIG_PAX_KERNEXEC
30769+ " movl %%cr0, %0\n"
30770+ " movl %0, %%eax\n"
30771+ " andl $0xFFFEFFFF, %%eax\n"
30772+ " movl %%eax, %%cr0\n"
30773+#endif
30774+
30775+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30776+
30777+#ifdef CONFIG_PAX_KERNEXEC
30778+ " movl %0, %%cr0\n"
30779+#endif
30780+
30781 " jmp 2b\n"
30782 ".previous\n"
30783 _ASM_EXTABLE(1b, 3b)
30784- : : "r" (from), "r" (to) : "memory");
30785+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30786
30787 from += 64;
30788 to += 64;
30789diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30790index f6d13ee..d789440 100644
30791--- a/arch/x86/lib/msr-reg.S
30792+++ b/arch/x86/lib/msr-reg.S
30793@@ -3,6 +3,7 @@
30794 #include <asm/dwarf2.h>
30795 #include <asm/asm.h>
30796 #include <asm/msr.h>
30797+#include <asm/alternative-asm.h>
30798
30799 #ifdef CONFIG_X86_64
30800 /*
30801@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30802 movl %edi, 28(%r10)
30803 popq_cfi %rbp
30804 popq_cfi %rbx
30805+ pax_force_retaddr
30806 ret
30807 3:
30808 CFI_RESTORE_STATE
30809diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30810index fc6ba17..d4d989d 100644
30811--- a/arch/x86/lib/putuser.S
30812+++ b/arch/x86/lib/putuser.S
30813@@ -16,7 +16,9 @@
30814 #include <asm/errno.h>
30815 #include <asm/asm.h>
30816 #include <asm/smap.h>
30817-
30818+#include <asm/segment.h>
30819+#include <asm/pgtable.h>
30820+#include <asm/alternative-asm.h>
30821
30822 /*
30823 * __put_user_X
30824@@ -30,57 +32,125 @@
30825 * as they get called from within inline assembly.
30826 */
30827
30828-#define ENTER CFI_STARTPROC ; \
30829- GET_THREAD_INFO(%_ASM_BX)
30830-#define EXIT ASM_CLAC ; \
30831- ret ; \
30832+#define ENTER CFI_STARTPROC
30833+#define EXIT ASM_CLAC ; \
30834+ pax_force_retaddr ; \
30835+ ret ; \
30836 CFI_ENDPROC
30837
30838+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30839+#define _DEST %_ASM_CX,%_ASM_BX
30840+#else
30841+#define _DEST %_ASM_CX
30842+#endif
30843+
30844+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30845+#define __copyuser_seg gs;
30846+#else
30847+#define __copyuser_seg
30848+#endif
30849+
30850 .text
30851 ENTRY(__put_user_1)
30852 ENTER
30853+
30854+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30855+ GET_THREAD_INFO(%_ASM_BX)
30856 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30857 jae bad_put_user
30858 ASM_STAC
30859-1: movb %al,(%_ASM_CX)
30860+
30861+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30862+ mov pax_user_shadow_base,%_ASM_BX
30863+ cmp %_ASM_BX,%_ASM_CX
30864+ jb 1234f
30865+ xor %ebx,%ebx
30866+1234:
30867+#endif
30868+
30869+#endif
30870+
30871+1: __copyuser_seg movb %al,(_DEST)
30872 xor %eax,%eax
30873 EXIT
30874 ENDPROC(__put_user_1)
30875
30876 ENTRY(__put_user_2)
30877 ENTER
30878+
30879+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30880+ GET_THREAD_INFO(%_ASM_BX)
30881 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30882 sub $1,%_ASM_BX
30883 cmp %_ASM_BX,%_ASM_CX
30884 jae bad_put_user
30885 ASM_STAC
30886-2: movw %ax,(%_ASM_CX)
30887+
30888+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30889+ mov pax_user_shadow_base,%_ASM_BX
30890+ cmp %_ASM_BX,%_ASM_CX
30891+ jb 1234f
30892+ xor %ebx,%ebx
30893+1234:
30894+#endif
30895+
30896+#endif
30897+
30898+2: __copyuser_seg movw %ax,(_DEST)
30899 xor %eax,%eax
30900 EXIT
30901 ENDPROC(__put_user_2)
30902
30903 ENTRY(__put_user_4)
30904 ENTER
30905+
30906+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30907+ GET_THREAD_INFO(%_ASM_BX)
30908 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30909 sub $3,%_ASM_BX
30910 cmp %_ASM_BX,%_ASM_CX
30911 jae bad_put_user
30912 ASM_STAC
30913-3: movl %eax,(%_ASM_CX)
30914+
30915+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30916+ mov pax_user_shadow_base,%_ASM_BX
30917+ cmp %_ASM_BX,%_ASM_CX
30918+ jb 1234f
30919+ xor %ebx,%ebx
30920+1234:
30921+#endif
30922+
30923+#endif
30924+
30925+3: __copyuser_seg movl %eax,(_DEST)
30926 xor %eax,%eax
30927 EXIT
30928 ENDPROC(__put_user_4)
30929
30930 ENTRY(__put_user_8)
30931 ENTER
30932+
30933+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30934+ GET_THREAD_INFO(%_ASM_BX)
30935 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30936 sub $7,%_ASM_BX
30937 cmp %_ASM_BX,%_ASM_CX
30938 jae bad_put_user
30939 ASM_STAC
30940-4: mov %_ASM_AX,(%_ASM_CX)
30941+
30942+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30943+ mov pax_user_shadow_base,%_ASM_BX
30944+ cmp %_ASM_BX,%_ASM_CX
30945+ jb 1234f
30946+ xor %ebx,%ebx
30947+1234:
30948+#endif
30949+
30950+#endif
30951+
30952+4: __copyuser_seg mov %_ASM_AX,(_DEST)
30953 #ifdef CONFIG_X86_32
30954-5: movl %edx,4(%_ASM_CX)
30955+5: __copyuser_seg movl %edx,4(_DEST)
30956 #endif
30957 xor %eax,%eax
30958 EXIT
30959diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
30960index 5dff5f0..cadebf4 100644
30961--- a/arch/x86/lib/rwsem.S
30962+++ b/arch/x86/lib/rwsem.S
30963@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
30964 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30965 CFI_RESTORE __ASM_REG(dx)
30966 restore_common_regs
30967+ pax_force_retaddr
30968 ret
30969 CFI_ENDPROC
30970 ENDPROC(call_rwsem_down_read_failed)
30971@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
30972 movq %rax,%rdi
30973 call rwsem_down_write_failed
30974 restore_common_regs
30975+ pax_force_retaddr
30976 ret
30977 CFI_ENDPROC
30978 ENDPROC(call_rwsem_down_write_failed)
30979@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
30980 movq %rax,%rdi
30981 call rwsem_wake
30982 restore_common_regs
30983-1: ret
30984+1: pax_force_retaddr
30985+ ret
30986 CFI_ENDPROC
30987 ENDPROC(call_rwsem_wake)
30988
30989@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
30990 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30991 CFI_RESTORE __ASM_REG(dx)
30992 restore_common_regs
30993+ pax_force_retaddr
30994 ret
30995 CFI_ENDPROC
30996 ENDPROC(call_rwsem_downgrade_wake)
30997diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
30998index b30b5eb..2b57052 100644
30999--- a/arch/x86/lib/thunk_64.S
31000+++ b/arch/x86/lib/thunk_64.S
31001@@ -9,6 +9,7 @@
31002 #include <asm/dwarf2.h>
31003 #include <asm/calling.h>
31004 #include <asm/asm.h>
31005+#include <asm/alternative-asm.h>
31006
31007 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31008 .macro THUNK name, func, put_ret_addr_in_rdi=0
31009@@ -16,11 +17,11 @@
31010 \name:
31011 CFI_STARTPROC
31012
31013- /* this one pushes 9 elems, the next one would be %rIP */
31014- SAVE_ARGS
31015+ /* this one pushes 15+1 elems, the next one would be %rIP */
31016+ SAVE_ARGS 8
31017
31018 .if \put_ret_addr_in_rdi
31019- movq_cfi_restore 9*8, rdi
31020+ movq_cfi_restore RIP, rdi
31021 .endif
31022
31023 call \func
31024@@ -47,9 +48,10 @@
31025
31026 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31027 CFI_STARTPROC
31028- SAVE_ARGS
31029+ SAVE_ARGS 8
31030 restore:
31031- RESTORE_ARGS
31032+ RESTORE_ARGS 1,8
31033+ pax_force_retaddr
31034 ret
31035 CFI_ENDPROC
31036 _ASM_NOKPROBE(restore)
31037diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31038index e2f5e21..4b22130 100644
31039--- a/arch/x86/lib/usercopy_32.c
31040+++ b/arch/x86/lib/usercopy_32.c
31041@@ -42,11 +42,13 @@ do { \
31042 int __d0; \
31043 might_fault(); \
31044 __asm__ __volatile__( \
31045+ __COPYUSER_SET_ES \
31046 ASM_STAC "\n" \
31047 "0: rep; stosl\n" \
31048 " movl %2,%0\n" \
31049 "1: rep; stosb\n" \
31050 "2: " ASM_CLAC "\n" \
31051+ __COPYUSER_RESTORE_ES \
31052 ".section .fixup,\"ax\"\n" \
31053 "3: lea 0(%2,%0,4),%0\n" \
31054 " jmp 2b\n" \
31055@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31056
31057 #ifdef CONFIG_X86_INTEL_USERCOPY
31058 static unsigned long
31059-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31060+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31061 {
31062 int d0, d1;
31063 __asm__ __volatile__(
31064@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31065 " .align 2,0x90\n"
31066 "3: movl 0(%4), %%eax\n"
31067 "4: movl 4(%4), %%edx\n"
31068- "5: movl %%eax, 0(%3)\n"
31069- "6: movl %%edx, 4(%3)\n"
31070+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31071+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31072 "7: movl 8(%4), %%eax\n"
31073 "8: movl 12(%4),%%edx\n"
31074- "9: movl %%eax, 8(%3)\n"
31075- "10: movl %%edx, 12(%3)\n"
31076+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31077+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31078 "11: movl 16(%4), %%eax\n"
31079 "12: movl 20(%4), %%edx\n"
31080- "13: movl %%eax, 16(%3)\n"
31081- "14: movl %%edx, 20(%3)\n"
31082+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31083+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31084 "15: movl 24(%4), %%eax\n"
31085 "16: movl 28(%4), %%edx\n"
31086- "17: movl %%eax, 24(%3)\n"
31087- "18: movl %%edx, 28(%3)\n"
31088+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31089+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31090 "19: movl 32(%4), %%eax\n"
31091 "20: movl 36(%4), %%edx\n"
31092- "21: movl %%eax, 32(%3)\n"
31093- "22: movl %%edx, 36(%3)\n"
31094+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31095+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31096 "23: movl 40(%4), %%eax\n"
31097 "24: movl 44(%4), %%edx\n"
31098- "25: movl %%eax, 40(%3)\n"
31099- "26: movl %%edx, 44(%3)\n"
31100+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31101+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31102 "27: movl 48(%4), %%eax\n"
31103 "28: movl 52(%4), %%edx\n"
31104- "29: movl %%eax, 48(%3)\n"
31105- "30: movl %%edx, 52(%3)\n"
31106+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31107+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31108 "31: movl 56(%4), %%eax\n"
31109 "32: movl 60(%4), %%edx\n"
31110- "33: movl %%eax, 56(%3)\n"
31111- "34: movl %%edx, 60(%3)\n"
31112+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31113+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31114 " addl $-64, %0\n"
31115 " addl $64, %4\n"
31116 " addl $64, %3\n"
31117@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31118 " shrl $2, %0\n"
31119 " andl $3, %%eax\n"
31120 " cld\n"
31121+ __COPYUSER_SET_ES
31122 "99: rep; movsl\n"
31123 "36: movl %%eax, %0\n"
31124 "37: rep; movsb\n"
31125 "100:\n"
31126+ __COPYUSER_RESTORE_ES
31127+ ".section .fixup,\"ax\"\n"
31128+ "101: lea 0(%%eax,%0,4),%0\n"
31129+ " jmp 100b\n"
31130+ ".previous\n"
31131+ _ASM_EXTABLE(1b,100b)
31132+ _ASM_EXTABLE(2b,100b)
31133+ _ASM_EXTABLE(3b,100b)
31134+ _ASM_EXTABLE(4b,100b)
31135+ _ASM_EXTABLE(5b,100b)
31136+ _ASM_EXTABLE(6b,100b)
31137+ _ASM_EXTABLE(7b,100b)
31138+ _ASM_EXTABLE(8b,100b)
31139+ _ASM_EXTABLE(9b,100b)
31140+ _ASM_EXTABLE(10b,100b)
31141+ _ASM_EXTABLE(11b,100b)
31142+ _ASM_EXTABLE(12b,100b)
31143+ _ASM_EXTABLE(13b,100b)
31144+ _ASM_EXTABLE(14b,100b)
31145+ _ASM_EXTABLE(15b,100b)
31146+ _ASM_EXTABLE(16b,100b)
31147+ _ASM_EXTABLE(17b,100b)
31148+ _ASM_EXTABLE(18b,100b)
31149+ _ASM_EXTABLE(19b,100b)
31150+ _ASM_EXTABLE(20b,100b)
31151+ _ASM_EXTABLE(21b,100b)
31152+ _ASM_EXTABLE(22b,100b)
31153+ _ASM_EXTABLE(23b,100b)
31154+ _ASM_EXTABLE(24b,100b)
31155+ _ASM_EXTABLE(25b,100b)
31156+ _ASM_EXTABLE(26b,100b)
31157+ _ASM_EXTABLE(27b,100b)
31158+ _ASM_EXTABLE(28b,100b)
31159+ _ASM_EXTABLE(29b,100b)
31160+ _ASM_EXTABLE(30b,100b)
31161+ _ASM_EXTABLE(31b,100b)
31162+ _ASM_EXTABLE(32b,100b)
31163+ _ASM_EXTABLE(33b,100b)
31164+ _ASM_EXTABLE(34b,100b)
31165+ _ASM_EXTABLE(35b,100b)
31166+ _ASM_EXTABLE(36b,100b)
31167+ _ASM_EXTABLE(37b,100b)
31168+ _ASM_EXTABLE(99b,101b)
31169+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31170+ : "1"(to), "2"(from), "0"(size)
31171+ : "eax", "edx", "memory");
31172+ return size;
31173+}
31174+
31175+static unsigned long
31176+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31177+{
31178+ int d0, d1;
31179+ __asm__ __volatile__(
31180+ " .align 2,0x90\n"
31181+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31182+ " cmpl $67, %0\n"
31183+ " jbe 3f\n"
31184+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31185+ " .align 2,0x90\n"
31186+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31187+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31188+ "5: movl %%eax, 0(%3)\n"
31189+ "6: movl %%edx, 4(%3)\n"
31190+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31191+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31192+ "9: movl %%eax, 8(%3)\n"
31193+ "10: movl %%edx, 12(%3)\n"
31194+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31195+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31196+ "13: movl %%eax, 16(%3)\n"
31197+ "14: movl %%edx, 20(%3)\n"
31198+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31199+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31200+ "17: movl %%eax, 24(%3)\n"
31201+ "18: movl %%edx, 28(%3)\n"
31202+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31203+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31204+ "21: movl %%eax, 32(%3)\n"
31205+ "22: movl %%edx, 36(%3)\n"
31206+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31207+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31208+ "25: movl %%eax, 40(%3)\n"
31209+ "26: movl %%edx, 44(%3)\n"
31210+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31211+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31212+ "29: movl %%eax, 48(%3)\n"
31213+ "30: movl %%edx, 52(%3)\n"
31214+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31215+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31216+ "33: movl %%eax, 56(%3)\n"
31217+ "34: movl %%edx, 60(%3)\n"
31218+ " addl $-64, %0\n"
31219+ " addl $64, %4\n"
31220+ " addl $64, %3\n"
31221+ " cmpl $63, %0\n"
31222+ " ja 1b\n"
31223+ "35: movl %0, %%eax\n"
31224+ " shrl $2, %0\n"
31225+ " andl $3, %%eax\n"
31226+ " cld\n"
31227+ "99: rep; "__copyuser_seg" movsl\n"
31228+ "36: movl %%eax, %0\n"
31229+ "37: rep; "__copyuser_seg" movsb\n"
31230+ "100:\n"
31231 ".section .fixup,\"ax\"\n"
31232 "101: lea 0(%%eax,%0,4),%0\n"
31233 " jmp 100b\n"
31234@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31235 int d0, d1;
31236 __asm__ __volatile__(
31237 " .align 2,0x90\n"
31238- "0: movl 32(%4), %%eax\n"
31239+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31240 " cmpl $67, %0\n"
31241 " jbe 2f\n"
31242- "1: movl 64(%4), %%eax\n"
31243+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31244 " .align 2,0x90\n"
31245- "2: movl 0(%4), %%eax\n"
31246- "21: movl 4(%4), %%edx\n"
31247+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31248+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31249 " movl %%eax, 0(%3)\n"
31250 " movl %%edx, 4(%3)\n"
31251- "3: movl 8(%4), %%eax\n"
31252- "31: movl 12(%4),%%edx\n"
31253+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31254+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31255 " movl %%eax, 8(%3)\n"
31256 " movl %%edx, 12(%3)\n"
31257- "4: movl 16(%4), %%eax\n"
31258- "41: movl 20(%4), %%edx\n"
31259+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31260+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31261 " movl %%eax, 16(%3)\n"
31262 " movl %%edx, 20(%3)\n"
31263- "10: movl 24(%4), %%eax\n"
31264- "51: movl 28(%4), %%edx\n"
31265+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31266+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31267 " movl %%eax, 24(%3)\n"
31268 " movl %%edx, 28(%3)\n"
31269- "11: movl 32(%4), %%eax\n"
31270- "61: movl 36(%4), %%edx\n"
31271+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31272+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31273 " movl %%eax, 32(%3)\n"
31274 " movl %%edx, 36(%3)\n"
31275- "12: movl 40(%4), %%eax\n"
31276- "71: movl 44(%4), %%edx\n"
31277+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31278+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31279 " movl %%eax, 40(%3)\n"
31280 " movl %%edx, 44(%3)\n"
31281- "13: movl 48(%4), %%eax\n"
31282- "81: movl 52(%4), %%edx\n"
31283+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31284+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31285 " movl %%eax, 48(%3)\n"
31286 " movl %%edx, 52(%3)\n"
31287- "14: movl 56(%4), %%eax\n"
31288- "91: movl 60(%4), %%edx\n"
31289+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31290+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31291 " movl %%eax, 56(%3)\n"
31292 " movl %%edx, 60(%3)\n"
31293 " addl $-64, %0\n"
31294@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31295 " shrl $2, %0\n"
31296 " andl $3, %%eax\n"
31297 " cld\n"
31298- "6: rep; movsl\n"
31299+ "6: rep; "__copyuser_seg" movsl\n"
31300 " movl %%eax,%0\n"
31301- "7: rep; movsb\n"
31302+ "7: rep; "__copyuser_seg" movsb\n"
31303 "8:\n"
31304 ".section .fixup,\"ax\"\n"
31305 "9: lea 0(%%eax,%0,4),%0\n"
31306@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31307
31308 __asm__ __volatile__(
31309 " .align 2,0x90\n"
31310- "0: movl 32(%4), %%eax\n"
31311+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31312 " cmpl $67, %0\n"
31313 " jbe 2f\n"
31314- "1: movl 64(%4), %%eax\n"
31315+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31316 " .align 2,0x90\n"
31317- "2: movl 0(%4), %%eax\n"
31318- "21: movl 4(%4), %%edx\n"
31319+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31320+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31321 " movnti %%eax, 0(%3)\n"
31322 " movnti %%edx, 4(%3)\n"
31323- "3: movl 8(%4), %%eax\n"
31324- "31: movl 12(%4),%%edx\n"
31325+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31326+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31327 " movnti %%eax, 8(%3)\n"
31328 " movnti %%edx, 12(%3)\n"
31329- "4: movl 16(%4), %%eax\n"
31330- "41: movl 20(%4), %%edx\n"
31331+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31332+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31333 " movnti %%eax, 16(%3)\n"
31334 " movnti %%edx, 20(%3)\n"
31335- "10: movl 24(%4), %%eax\n"
31336- "51: movl 28(%4), %%edx\n"
31337+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31338+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31339 " movnti %%eax, 24(%3)\n"
31340 " movnti %%edx, 28(%3)\n"
31341- "11: movl 32(%4), %%eax\n"
31342- "61: movl 36(%4), %%edx\n"
31343+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31344+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31345 " movnti %%eax, 32(%3)\n"
31346 " movnti %%edx, 36(%3)\n"
31347- "12: movl 40(%4), %%eax\n"
31348- "71: movl 44(%4), %%edx\n"
31349+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31350+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31351 " movnti %%eax, 40(%3)\n"
31352 " movnti %%edx, 44(%3)\n"
31353- "13: movl 48(%4), %%eax\n"
31354- "81: movl 52(%4), %%edx\n"
31355+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31356+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31357 " movnti %%eax, 48(%3)\n"
31358 " movnti %%edx, 52(%3)\n"
31359- "14: movl 56(%4), %%eax\n"
31360- "91: movl 60(%4), %%edx\n"
31361+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31362+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31363 " movnti %%eax, 56(%3)\n"
31364 " movnti %%edx, 60(%3)\n"
31365 " addl $-64, %0\n"
31366@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31367 " shrl $2, %0\n"
31368 " andl $3, %%eax\n"
31369 " cld\n"
31370- "6: rep; movsl\n"
31371+ "6: rep; "__copyuser_seg" movsl\n"
31372 " movl %%eax,%0\n"
31373- "7: rep; movsb\n"
31374+ "7: rep; "__copyuser_seg" movsb\n"
31375 "8:\n"
31376 ".section .fixup,\"ax\"\n"
31377 "9: lea 0(%%eax,%0,4),%0\n"
31378@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31379
31380 __asm__ __volatile__(
31381 " .align 2,0x90\n"
31382- "0: movl 32(%4), %%eax\n"
31383+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31384 " cmpl $67, %0\n"
31385 " jbe 2f\n"
31386- "1: movl 64(%4), %%eax\n"
31387+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31388 " .align 2,0x90\n"
31389- "2: movl 0(%4), %%eax\n"
31390- "21: movl 4(%4), %%edx\n"
31391+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31392+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31393 " movnti %%eax, 0(%3)\n"
31394 " movnti %%edx, 4(%3)\n"
31395- "3: movl 8(%4), %%eax\n"
31396- "31: movl 12(%4),%%edx\n"
31397+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31398+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31399 " movnti %%eax, 8(%3)\n"
31400 " movnti %%edx, 12(%3)\n"
31401- "4: movl 16(%4), %%eax\n"
31402- "41: movl 20(%4), %%edx\n"
31403+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31404+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31405 " movnti %%eax, 16(%3)\n"
31406 " movnti %%edx, 20(%3)\n"
31407- "10: movl 24(%4), %%eax\n"
31408- "51: movl 28(%4), %%edx\n"
31409+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31410+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31411 " movnti %%eax, 24(%3)\n"
31412 " movnti %%edx, 28(%3)\n"
31413- "11: movl 32(%4), %%eax\n"
31414- "61: movl 36(%4), %%edx\n"
31415+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31416+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31417 " movnti %%eax, 32(%3)\n"
31418 " movnti %%edx, 36(%3)\n"
31419- "12: movl 40(%4), %%eax\n"
31420- "71: movl 44(%4), %%edx\n"
31421+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31422+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31423 " movnti %%eax, 40(%3)\n"
31424 " movnti %%edx, 44(%3)\n"
31425- "13: movl 48(%4), %%eax\n"
31426- "81: movl 52(%4), %%edx\n"
31427+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31428+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31429 " movnti %%eax, 48(%3)\n"
31430 " movnti %%edx, 52(%3)\n"
31431- "14: movl 56(%4), %%eax\n"
31432- "91: movl 60(%4), %%edx\n"
31433+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31434+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31435 " movnti %%eax, 56(%3)\n"
31436 " movnti %%edx, 60(%3)\n"
31437 " addl $-64, %0\n"
31438@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31439 " shrl $2, %0\n"
31440 " andl $3, %%eax\n"
31441 " cld\n"
31442- "6: rep; movsl\n"
31443+ "6: rep; "__copyuser_seg" movsl\n"
31444 " movl %%eax,%0\n"
31445- "7: rep; movsb\n"
31446+ "7: rep; "__copyuser_seg" movsb\n"
31447 "8:\n"
31448 ".section .fixup,\"ax\"\n"
31449 "9: lea 0(%%eax,%0,4),%0\n"
31450@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31451 */
31452 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31453 unsigned long size);
31454-unsigned long __copy_user_intel(void __user *to, const void *from,
31455+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31456+ unsigned long size);
31457+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31458 unsigned long size);
31459 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31460 const void __user *from, unsigned long size);
31461 #endif /* CONFIG_X86_INTEL_USERCOPY */
31462
31463 /* Generic arbitrary sized copy. */
31464-#define __copy_user(to, from, size) \
31465+#define __copy_user(to, from, size, prefix, set, restore) \
31466 do { \
31467 int __d0, __d1, __d2; \
31468 __asm__ __volatile__( \
31469+ set \
31470 " cmp $7,%0\n" \
31471 " jbe 1f\n" \
31472 " movl %1,%0\n" \
31473 " negl %0\n" \
31474 " andl $7,%0\n" \
31475 " subl %0,%3\n" \
31476- "4: rep; movsb\n" \
31477+ "4: rep; "prefix"movsb\n" \
31478 " movl %3,%0\n" \
31479 " shrl $2,%0\n" \
31480 " andl $3,%3\n" \
31481 " .align 2,0x90\n" \
31482- "0: rep; movsl\n" \
31483+ "0: rep; "prefix"movsl\n" \
31484 " movl %3,%0\n" \
31485- "1: rep; movsb\n" \
31486+ "1: rep; "prefix"movsb\n" \
31487 "2:\n" \
31488+ restore \
31489 ".section .fixup,\"ax\"\n" \
31490 "5: addl %3,%0\n" \
31491 " jmp 2b\n" \
31492@@ -538,14 +650,14 @@ do { \
31493 " negl %0\n" \
31494 " andl $7,%0\n" \
31495 " subl %0,%3\n" \
31496- "4: rep; movsb\n" \
31497+ "4: rep; "__copyuser_seg"movsb\n" \
31498 " movl %3,%0\n" \
31499 " shrl $2,%0\n" \
31500 " andl $3,%3\n" \
31501 " .align 2,0x90\n" \
31502- "0: rep; movsl\n" \
31503+ "0: rep; "__copyuser_seg"movsl\n" \
31504 " movl %3,%0\n" \
31505- "1: rep; movsb\n" \
31506+ "1: rep; "__copyuser_seg"movsb\n" \
31507 "2:\n" \
31508 ".section .fixup,\"ax\"\n" \
31509 "5: addl %3,%0\n" \
31510@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31511 {
31512 stac();
31513 if (movsl_is_ok(to, from, n))
31514- __copy_user(to, from, n);
31515+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31516 else
31517- n = __copy_user_intel(to, from, n);
31518+ n = __generic_copy_to_user_intel(to, from, n);
31519 clac();
31520 return n;
31521 }
31522@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31523 {
31524 stac();
31525 if (movsl_is_ok(to, from, n))
31526- __copy_user(to, from, n);
31527+ __copy_user(to, from, n, __copyuser_seg, "", "");
31528 else
31529- n = __copy_user_intel((void __user *)to,
31530- (const void *)from, n);
31531+ n = __generic_copy_from_user_intel(to, from, n);
31532 clac();
31533 return n;
31534 }
31535@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31536 if (n > 64 && cpu_has_xmm2)
31537 n = __copy_user_intel_nocache(to, from, n);
31538 else
31539- __copy_user(to, from, n);
31540+ __copy_user(to, from, n, __copyuser_seg, "", "");
31541 #else
31542- __copy_user(to, from, n);
31543+ __copy_user(to, from, n, __copyuser_seg, "", "");
31544 #endif
31545 clac();
31546 return n;
31547 }
31548 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31549
31550-/**
31551- * copy_to_user: - Copy a block of data into user space.
31552- * @to: Destination address, in user space.
31553- * @from: Source address, in kernel space.
31554- * @n: Number of bytes to copy.
31555- *
31556- * Context: User context only. This function may sleep.
31557- *
31558- * Copy data from kernel space to user space.
31559- *
31560- * Returns number of bytes that could not be copied.
31561- * On success, this will be zero.
31562- */
31563-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31564+#ifdef CONFIG_PAX_MEMORY_UDEREF
31565+void __set_fs(mm_segment_t x)
31566 {
31567- if (access_ok(VERIFY_WRITE, to, n))
31568- n = __copy_to_user(to, from, n);
31569- return n;
31570+ switch (x.seg) {
31571+ case 0:
31572+ loadsegment(gs, 0);
31573+ break;
31574+ case TASK_SIZE_MAX:
31575+ loadsegment(gs, __USER_DS);
31576+ break;
31577+ case -1UL:
31578+ loadsegment(gs, __KERNEL_DS);
31579+ break;
31580+ default:
31581+ BUG();
31582+ }
31583 }
31584-EXPORT_SYMBOL(_copy_to_user);
31585+EXPORT_SYMBOL(__set_fs);
31586
31587-/**
31588- * copy_from_user: - Copy a block of data from user space.
31589- * @to: Destination address, in kernel space.
31590- * @from: Source address, in user space.
31591- * @n: Number of bytes to copy.
31592- *
31593- * Context: User context only. This function may sleep.
31594- *
31595- * Copy data from user space to kernel space.
31596- *
31597- * Returns number of bytes that could not be copied.
31598- * On success, this will be zero.
31599- *
31600- * If some data could not be copied, this function will pad the copied
31601- * data to the requested size using zero bytes.
31602- */
31603-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31604+void set_fs(mm_segment_t x)
31605 {
31606- if (access_ok(VERIFY_READ, from, n))
31607- n = __copy_from_user(to, from, n);
31608- else
31609- memset(to, 0, n);
31610- return n;
31611+ current_thread_info()->addr_limit = x;
31612+ __set_fs(x);
31613 }
31614-EXPORT_SYMBOL(_copy_from_user);
31615+EXPORT_SYMBOL(set_fs);
31616+#endif
31617diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31618index c905e89..01ab928 100644
31619--- a/arch/x86/lib/usercopy_64.c
31620+++ b/arch/x86/lib/usercopy_64.c
31621@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31622 might_fault();
31623 /* no memory constraint because it doesn't change any memory gcc knows
31624 about */
31625+ pax_open_userland();
31626 stac();
31627 asm volatile(
31628 " testq %[size8],%[size8]\n"
31629@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31630 _ASM_EXTABLE(0b,3b)
31631 _ASM_EXTABLE(1b,2b)
31632 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31633- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31634+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31635 [zero] "r" (0UL), [eight] "r" (8UL));
31636 clac();
31637+ pax_close_userland();
31638 return size;
31639 }
31640 EXPORT_SYMBOL(__clear_user);
31641@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31642 }
31643 EXPORT_SYMBOL(clear_user);
31644
31645-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31646+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31647 {
31648- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31649- return copy_user_generic((__force void *)to, (__force void *)from, len);
31650- }
31651- return len;
31652+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31653+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31654+ return len;
31655 }
31656 EXPORT_SYMBOL(copy_in_user);
31657
31658@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31659 * it is not necessary to optimize tail handling.
31660 */
31661 __visible unsigned long
31662-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31663+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31664 {
31665 char c;
31666 unsigned zero_len;
31667
31668+ clac();
31669+ pax_close_userland();
31670 for (; len; --len, to++) {
31671 if (__get_user_nocheck(c, from++, sizeof(char)))
31672 break;
31673@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31674 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31675 if (__put_user_nocheck(c, to++, sizeof(char)))
31676 break;
31677- clac();
31678 return len;
31679 }
31680diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31681index ecfdc46..55b9309 100644
31682--- a/arch/x86/mm/Makefile
31683+++ b/arch/x86/mm/Makefile
31684@@ -32,3 +32,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31685 obj-$(CONFIG_MEMTEST) += memtest.o
31686
31687 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31688+
31689+quote:="
31690+obj-$(CONFIG_X86_64) += uderef_64.o
31691+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31692diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31693index 903ec1e..c4166b2 100644
31694--- a/arch/x86/mm/extable.c
31695+++ b/arch/x86/mm/extable.c
31696@@ -6,12 +6,24 @@
31697 static inline unsigned long
31698 ex_insn_addr(const struct exception_table_entry *x)
31699 {
31700- return (unsigned long)&x->insn + x->insn;
31701+ unsigned long reloc = 0;
31702+
31703+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31704+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31705+#endif
31706+
31707+ return (unsigned long)&x->insn + x->insn + reloc;
31708 }
31709 static inline unsigned long
31710 ex_fixup_addr(const struct exception_table_entry *x)
31711 {
31712- return (unsigned long)&x->fixup + x->fixup;
31713+ unsigned long reloc = 0;
31714+
31715+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31716+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31717+#endif
31718+
31719+ return (unsigned long)&x->fixup + x->fixup + reloc;
31720 }
31721
31722 int fixup_exception(struct pt_regs *regs)
31723@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31724 unsigned long new_ip;
31725
31726 #ifdef CONFIG_PNPBIOS
31727- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31728+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31729 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31730 extern u32 pnp_bios_is_utter_crap;
31731 pnp_bios_is_utter_crap = 1;
31732@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31733 i += 4;
31734 p->fixup -= i;
31735 i += 4;
31736+
31737+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31738+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31739+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31740+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31741+#endif
31742+
31743 }
31744 }
31745
31746diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31747index e3ff27a..f38f7c0 100644
31748--- a/arch/x86/mm/fault.c
31749+++ b/arch/x86/mm/fault.c
31750@@ -13,12 +13,19 @@
31751 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31752 #include <linux/prefetch.h> /* prefetchw */
31753 #include <linux/context_tracking.h> /* exception_enter(), ... */
31754+#include <linux/unistd.h>
31755+#include <linux/compiler.h>
31756
31757 #include <asm/traps.h> /* dotraplinkage, ... */
31758 #include <asm/pgalloc.h> /* pgd_*(), ... */
31759 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31760 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31761 #include <asm/vsyscall.h> /* emulate_vsyscall */
31762+#include <asm/tlbflush.h>
31763+
31764+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31765+#include <asm/stacktrace.h>
31766+#endif
31767
31768 #define CREATE_TRACE_POINTS
31769 #include <asm/trace/exceptions.h>
31770@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31771 int ret = 0;
31772
31773 /* kprobe_running() needs smp_processor_id() */
31774- if (kprobes_built_in() && !user_mode_vm(regs)) {
31775+ if (kprobes_built_in() && !user_mode(regs)) {
31776 preempt_disable();
31777 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31778 ret = 1;
31779@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31780 return !instr_lo || (instr_lo>>1) == 1;
31781 case 0x00:
31782 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31783- if (probe_kernel_address(instr, opcode))
31784+ if (user_mode(regs)) {
31785+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31786+ return 0;
31787+ } else if (probe_kernel_address(instr, opcode))
31788 return 0;
31789
31790 *prefetch = (instr_lo == 0xF) &&
31791@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31792 while (instr < max_instr) {
31793 unsigned char opcode;
31794
31795- if (probe_kernel_address(instr, opcode))
31796+ if (user_mode(regs)) {
31797+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31798+ break;
31799+ } else if (probe_kernel_address(instr, opcode))
31800 break;
31801
31802 instr++;
31803@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31804 force_sig_info(si_signo, &info, tsk);
31805 }
31806
31807+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31808+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31809+#endif
31810+
31811+#ifdef CONFIG_PAX_EMUTRAMP
31812+static int pax_handle_fetch_fault(struct pt_regs *regs);
31813+#endif
31814+
31815+#ifdef CONFIG_PAX_PAGEEXEC
31816+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31817+{
31818+ pgd_t *pgd;
31819+ pud_t *pud;
31820+ pmd_t *pmd;
31821+
31822+ pgd = pgd_offset(mm, address);
31823+ if (!pgd_present(*pgd))
31824+ return NULL;
31825+ pud = pud_offset(pgd, address);
31826+ if (!pud_present(*pud))
31827+ return NULL;
31828+ pmd = pmd_offset(pud, address);
31829+ if (!pmd_present(*pmd))
31830+ return NULL;
31831+ return pmd;
31832+}
31833+#endif
31834+
31835 DEFINE_SPINLOCK(pgd_lock);
31836 LIST_HEAD(pgd_list);
31837
31838@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31839 for (address = VMALLOC_START & PMD_MASK;
31840 address >= TASK_SIZE && address < FIXADDR_TOP;
31841 address += PMD_SIZE) {
31842+
31843+#ifdef CONFIG_PAX_PER_CPU_PGD
31844+ unsigned long cpu;
31845+#else
31846 struct page *page;
31847+#endif
31848
31849 spin_lock(&pgd_lock);
31850+
31851+#ifdef CONFIG_PAX_PER_CPU_PGD
31852+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31853+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31854+ pmd_t *ret;
31855+
31856+ ret = vmalloc_sync_one(pgd, address);
31857+ if (!ret)
31858+ break;
31859+ pgd = get_cpu_pgd(cpu, kernel);
31860+#else
31861 list_for_each_entry(page, &pgd_list, lru) {
31862+ pgd_t *pgd;
31863 spinlock_t *pgt_lock;
31864 pmd_t *ret;
31865
31866@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31867 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31868
31869 spin_lock(pgt_lock);
31870- ret = vmalloc_sync_one(page_address(page), address);
31871+ pgd = page_address(page);
31872+#endif
31873+
31874+ ret = vmalloc_sync_one(pgd, address);
31875+
31876+#ifndef CONFIG_PAX_PER_CPU_PGD
31877 spin_unlock(pgt_lock);
31878+#endif
31879
31880 if (!ret)
31881 break;
31882@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
31883 * an interrupt in the middle of a task switch..
31884 */
31885 pgd_paddr = read_cr3();
31886+
31887+#ifdef CONFIG_PAX_PER_CPU_PGD
31888+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31889+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31890+#endif
31891+
31892 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31893 if (!pmd_k)
31894 return -1;
31895@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
31896 * happen within a race in page table update. In the later
31897 * case just flush:
31898 */
31899- pgd = pgd_offset(current->active_mm, address);
31900+
31901 pgd_ref = pgd_offset_k(address);
31902 if (pgd_none(*pgd_ref))
31903 return -1;
31904
31905+#ifdef CONFIG_PAX_PER_CPU_PGD
31906+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31907+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31908+ if (pgd_none(*pgd)) {
31909+ set_pgd(pgd, *pgd_ref);
31910+ arch_flush_lazy_mmu_mode();
31911+ } else {
31912+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31913+ }
31914+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31915+#else
31916+ pgd = pgd_offset(current->active_mm, address);
31917+#endif
31918+
31919 if (pgd_none(*pgd)) {
31920 set_pgd(pgd, *pgd_ref);
31921 arch_flush_lazy_mmu_mode();
31922@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31923 static int is_errata100(struct pt_regs *regs, unsigned long address)
31924 {
31925 #ifdef CONFIG_X86_64
31926- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31927+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31928 return 1;
31929 #endif
31930 return 0;
31931@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
31932 }
31933
31934 static const char nx_warning[] = KERN_CRIT
31935-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
31936+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
31937 static const char smep_warning[] = KERN_CRIT
31938-"unable to execute userspace code (SMEP?) (uid: %d)\n";
31939+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
31940
31941 static void
31942 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31943@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31944 if (!oops_may_print())
31945 return;
31946
31947- if (error_code & PF_INSTR) {
31948+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
31949 unsigned int level;
31950 pgd_t *pgd;
31951 pte_t *pte;
31952@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31953 pte = lookup_address_in_pgd(pgd, address, &level);
31954
31955 if (pte && pte_present(*pte) && !pte_exec(*pte))
31956- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
31957+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31958 if (pte && pte_present(*pte) && pte_exec(*pte) &&
31959 (pgd_flags(*pgd) & _PAGE_USER) &&
31960 (read_cr4() & X86_CR4_SMEP))
31961- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
31962+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31963 }
31964
31965+#ifdef CONFIG_PAX_KERNEXEC
31966+ if (init_mm.start_code <= address && address < init_mm.end_code) {
31967+ if (current->signal->curr_ip)
31968+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
31969+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
31970+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31971+ else
31972+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
31973+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31974+ }
31975+#endif
31976+
31977 printk(KERN_ALERT "BUG: unable to handle kernel ");
31978 if (address < PAGE_SIZE)
31979 printk(KERN_CONT "NULL pointer dereference");
31980@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
31981 return;
31982 }
31983 #endif
31984+
31985+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31986+ if (pax_is_fetch_fault(regs, error_code, address)) {
31987+
31988+#ifdef CONFIG_PAX_EMUTRAMP
31989+ switch (pax_handle_fetch_fault(regs)) {
31990+ case 2:
31991+ return;
31992+ }
31993+#endif
31994+
31995+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
31996+ do_group_exit(SIGKILL);
31997+ }
31998+#endif
31999+
32000 /* Kernel addresses are always protection faults: */
32001 if (address >= TASK_SIZE)
32002 error_code |= PF_PROT;
32003@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32004 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32005 printk(KERN_ERR
32006 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32007- tsk->comm, tsk->pid, address);
32008+ tsk->comm, task_pid_nr(tsk), address);
32009 code = BUS_MCEERR_AR;
32010 }
32011 #endif
32012@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32013 return 1;
32014 }
32015
32016+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32017+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32018+{
32019+ pte_t *pte;
32020+ pmd_t *pmd;
32021+ spinlock_t *ptl;
32022+ unsigned char pte_mask;
32023+
32024+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32025+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32026+ return 0;
32027+
32028+ /* PaX: it's our fault, let's handle it if we can */
32029+
32030+ /* PaX: take a look at read faults before acquiring any locks */
32031+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32032+ /* instruction fetch attempt from a protected page in user mode */
32033+ up_read(&mm->mmap_sem);
32034+
32035+#ifdef CONFIG_PAX_EMUTRAMP
32036+ switch (pax_handle_fetch_fault(regs)) {
32037+ case 2:
32038+ return 1;
32039+ }
32040+#endif
32041+
32042+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32043+ do_group_exit(SIGKILL);
32044+ }
32045+
32046+ pmd = pax_get_pmd(mm, address);
32047+ if (unlikely(!pmd))
32048+ return 0;
32049+
32050+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32051+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32052+ pte_unmap_unlock(pte, ptl);
32053+ return 0;
32054+ }
32055+
32056+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32057+ /* write attempt to a protected page in user mode */
32058+ pte_unmap_unlock(pte, ptl);
32059+ return 0;
32060+ }
32061+
32062+#ifdef CONFIG_SMP
32063+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32064+#else
32065+ if (likely(address > get_limit(regs->cs)))
32066+#endif
32067+ {
32068+ set_pte(pte, pte_mkread(*pte));
32069+ __flush_tlb_one(address);
32070+ pte_unmap_unlock(pte, ptl);
32071+ up_read(&mm->mmap_sem);
32072+ return 1;
32073+ }
32074+
32075+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32076+
32077+ /*
32078+ * PaX: fill DTLB with user rights and retry
32079+ */
32080+ __asm__ __volatile__ (
32081+ "orb %2,(%1)\n"
32082+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32083+/*
32084+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32085+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32086+ * page fault when examined during a TLB load attempt. this is true not only
32087+ * for PTEs holding a non-present entry but also present entries that will
32088+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32089+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32090+ * for our target pages since their PTEs are simply not in the TLBs at all.
32091+
32092+ * the best thing in omitting it is that we gain around 15-20% speed in the
32093+ * fast path of the page fault handler and can get rid of tracing since we
32094+ * can no longer flush unintended entries.
32095+ */
32096+ "invlpg (%0)\n"
32097+#endif
32098+ __copyuser_seg"testb $0,(%0)\n"
32099+ "xorb %3,(%1)\n"
32100+ :
32101+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32102+ : "memory", "cc");
32103+ pte_unmap_unlock(pte, ptl);
32104+ up_read(&mm->mmap_sem);
32105+ return 1;
32106+}
32107+#endif
32108+
32109 /*
32110 * Handle a spurious fault caused by a stale TLB entry.
32111 *
32112@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32113 static inline int
32114 access_error(unsigned long error_code, struct vm_area_struct *vma)
32115 {
32116+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32117+ return 1;
32118+
32119 if (error_code & PF_WRITE) {
32120 /* write, present and write, not present: */
32121 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32122@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32123 if (error_code & PF_USER)
32124 return false;
32125
32126- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32127+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32128 return false;
32129
32130 return true;
32131@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32132 tsk = current;
32133 mm = tsk->mm;
32134
32135+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32136+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32137+ if (!search_exception_tables(regs->ip)) {
32138+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32139+ bad_area_nosemaphore(regs, error_code, address);
32140+ return;
32141+ }
32142+ if (address < pax_user_shadow_base) {
32143+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32144+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32145+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32146+ } else
32147+ address -= pax_user_shadow_base;
32148+ }
32149+#endif
32150+
32151 /*
32152 * Detect and handle instructions that would cause a page fault for
32153 * both a tracked kernel page and a userspace page.
32154@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32155 * User-mode registers count as a user access even for any
32156 * potential system fault or CPU buglet:
32157 */
32158- if (user_mode_vm(regs)) {
32159+ if (user_mode(regs)) {
32160 local_irq_enable();
32161 error_code |= PF_USER;
32162 flags |= FAULT_FLAG_USER;
32163@@ -1187,6 +1411,11 @@ retry:
32164 might_sleep();
32165 }
32166
32167+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32168+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32169+ return;
32170+#endif
32171+
32172 vma = find_vma(mm, address);
32173 if (unlikely(!vma)) {
32174 bad_area(regs, error_code, address);
32175@@ -1198,18 +1427,24 @@ retry:
32176 bad_area(regs, error_code, address);
32177 return;
32178 }
32179- if (error_code & PF_USER) {
32180- /*
32181- * Accessing the stack below %sp is always a bug.
32182- * The large cushion allows instructions like enter
32183- * and pusha to work. ("enter $65535, $31" pushes
32184- * 32 pointers and then decrements %sp by 65535.)
32185- */
32186- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32187- bad_area(regs, error_code, address);
32188- return;
32189- }
32190+ /*
32191+ * Accessing the stack below %sp is always a bug.
32192+ * The large cushion allows instructions like enter
32193+ * and pusha to work. ("enter $65535, $31" pushes
32194+ * 32 pointers and then decrements %sp by 65535.)
32195+ */
32196+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32197+ bad_area(regs, error_code, address);
32198+ return;
32199 }
32200+
32201+#ifdef CONFIG_PAX_SEGMEXEC
32202+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32203+ bad_area(regs, error_code, address);
32204+ return;
32205+ }
32206+#endif
32207+
32208 if (unlikely(expand_stack(vma, address))) {
32209 bad_area(regs, error_code, address);
32210 return;
32211@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32212 }
32213 NOKPROBE_SYMBOL(trace_do_page_fault);
32214 #endif /* CONFIG_TRACING */
32215+
32216+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32217+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32218+{
32219+ struct mm_struct *mm = current->mm;
32220+ unsigned long ip = regs->ip;
32221+
32222+ if (v8086_mode(regs))
32223+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32224+
32225+#ifdef CONFIG_PAX_PAGEEXEC
32226+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32227+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32228+ return true;
32229+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32230+ return true;
32231+ return false;
32232+ }
32233+#endif
32234+
32235+#ifdef CONFIG_PAX_SEGMEXEC
32236+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32237+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32238+ return true;
32239+ return false;
32240+ }
32241+#endif
32242+
32243+ return false;
32244+}
32245+#endif
32246+
32247+#ifdef CONFIG_PAX_EMUTRAMP
32248+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32249+{
32250+ int err;
32251+
32252+ do { /* PaX: libffi trampoline emulation */
32253+ unsigned char mov, jmp;
32254+ unsigned int addr1, addr2;
32255+
32256+#ifdef CONFIG_X86_64
32257+ if ((regs->ip + 9) >> 32)
32258+ break;
32259+#endif
32260+
32261+ err = get_user(mov, (unsigned char __user *)regs->ip);
32262+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32263+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32264+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32265+
32266+ if (err)
32267+ break;
32268+
32269+ if (mov == 0xB8 && jmp == 0xE9) {
32270+ regs->ax = addr1;
32271+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32272+ return 2;
32273+ }
32274+ } while (0);
32275+
32276+ do { /* PaX: gcc trampoline emulation #1 */
32277+ unsigned char mov1, mov2;
32278+ unsigned short jmp;
32279+ unsigned int addr1, addr2;
32280+
32281+#ifdef CONFIG_X86_64
32282+ if ((regs->ip + 11) >> 32)
32283+ break;
32284+#endif
32285+
32286+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32287+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32288+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32289+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32290+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32291+
32292+ if (err)
32293+ break;
32294+
32295+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32296+ regs->cx = addr1;
32297+ regs->ax = addr2;
32298+ regs->ip = addr2;
32299+ return 2;
32300+ }
32301+ } while (0);
32302+
32303+ do { /* PaX: gcc trampoline emulation #2 */
32304+ unsigned char mov, jmp;
32305+ unsigned int addr1, addr2;
32306+
32307+#ifdef CONFIG_X86_64
32308+ if ((regs->ip + 9) >> 32)
32309+ break;
32310+#endif
32311+
32312+ err = get_user(mov, (unsigned char __user *)regs->ip);
32313+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32314+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32315+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32316+
32317+ if (err)
32318+ break;
32319+
32320+ if (mov == 0xB9 && jmp == 0xE9) {
32321+ regs->cx = addr1;
32322+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32323+ return 2;
32324+ }
32325+ } while (0);
32326+
32327+ return 1; /* PaX in action */
32328+}
32329+
32330+#ifdef CONFIG_X86_64
32331+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32332+{
32333+ int err;
32334+
32335+ do { /* PaX: libffi trampoline emulation */
32336+ unsigned short mov1, mov2, jmp1;
32337+ unsigned char stcclc, jmp2;
32338+ unsigned long addr1, addr2;
32339+
32340+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32341+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32342+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32343+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32344+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32345+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32346+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32347+
32348+ if (err)
32349+ break;
32350+
32351+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32352+ regs->r11 = addr1;
32353+ regs->r10 = addr2;
32354+ if (stcclc == 0xF8)
32355+ regs->flags &= ~X86_EFLAGS_CF;
32356+ else
32357+ regs->flags |= X86_EFLAGS_CF;
32358+ regs->ip = addr1;
32359+ return 2;
32360+ }
32361+ } while (0);
32362+
32363+ do { /* PaX: gcc trampoline emulation #1 */
32364+ unsigned short mov1, mov2, jmp1;
32365+ unsigned char jmp2;
32366+ unsigned int addr1;
32367+ unsigned long addr2;
32368+
32369+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32370+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32371+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32372+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32373+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32374+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32375+
32376+ if (err)
32377+ break;
32378+
32379+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32380+ regs->r11 = addr1;
32381+ regs->r10 = addr2;
32382+ regs->ip = addr1;
32383+ return 2;
32384+ }
32385+ } while (0);
32386+
32387+ do { /* PaX: gcc trampoline emulation #2 */
32388+ unsigned short mov1, mov2, jmp1;
32389+ unsigned char jmp2;
32390+ unsigned long addr1, addr2;
32391+
32392+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32393+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32394+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32395+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32396+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32397+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32398+
32399+ if (err)
32400+ break;
32401+
32402+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32403+ regs->r11 = addr1;
32404+ regs->r10 = addr2;
32405+ regs->ip = addr1;
32406+ return 2;
32407+ }
32408+ } while (0);
32409+
32410+ return 1; /* PaX in action */
32411+}
32412+#endif
32413+
32414+/*
32415+ * PaX: decide what to do with offenders (regs->ip = fault address)
32416+ *
32417+ * returns 1 when task should be killed
32418+ * 2 when gcc trampoline was detected
32419+ */
32420+static int pax_handle_fetch_fault(struct pt_regs *regs)
32421+{
32422+ if (v8086_mode(regs))
32423+ return 1;
32424+
32425+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32426+ return 1;
32427+
32428+#ifdef CONFIG_X86_32
32429+ return pax_handle_fetch_fault_32(regs);
32430+#else
32431+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32432+ return pax_handle_fetch_fault_32(regs);
32433+ else
32434+ return pax_handle_fetch_fault_64(regs);
32435+#endif
32436+}
32437+#endif
32438+
32439+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32440+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32441+{
32442+ long i;
32443+
32444+ printk(KERN_ERR "PAX: bytes at PC: ");
32445+ for (i = 0; i < 20; i++) {
32446+ unsigned char c;
32447+ if (get_user(c, (unsigned char __force_user *)pc+i))
32448+ printk(KERN_CONT "?? ");
32449+ else
32450+ printk(KERN_CONT "%02x ", c);
32451+ }
32452+ printk("\n");
32453+
32454+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32455+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32456+ unsigned long c;
32457+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32458+#ifdef CONFIG_X86_32
32459+ printk(KERN_CONT "???????? ");
32460+#else
32461+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32462+ printk(KERN_CONT "???????? ???????? ");
32463+ else
32464+ printk(KERN_CONT "???????????????? ");
32465+#endif
32466+ } else {
32467+#ifdef CONFIG_X86_64
32468+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32469+ printk(KERN_CONT "%08x ", (unsigned int)c);
32470+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32471+ } else
32472+#endif
32473+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32474+ }
32475+ }
32476+ printk("\n");
32477+}
32478+#endif
32479+
32480+/**
32481+ * probe_kernel_write(): safely attempt to write to a location
32482+ * @dst: address to write to
32483+ * @src: pointer to the data that shall be written
32484+ * @size: size of the data chunk
32485+ *
32486+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32487+ * happens, handle that and return -EFAULT.
32488+ */
32489+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32490+{
32491+ long ret;
32492+ mm_segment_t old_fs = get_fs();
32493+
32494+ set_fs(KERNEL_DS);
32495+ pagefault_disable();
32496+ pax_open_kernel();
32497+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32498+ pax_close_kernel();
32499+ pagefault_enable();
32500+ set_fs(old_fs);
32501+
32502+ return ret ? -EFAULT : 0;
32503+}
32504diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32505index 224b142..c2c9423 100644
32506--- a/arch/x86/mm/gup.c
32507+++ b/arch/x86/mm/gup.c
32508@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32509 addr = start;
32510 len = (unsigned long) nr_pages << PAGE_SHIFT;
32511 end = start + len;
32512- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32513+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32514 (void __user *)start, len)))
32515 return 0;
32516
32517@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32518 goto slow_irqon;
32519 #endif
32520
32521+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32522+ (void __user *)start, len)))
32523+ return 0;
32524+
32525 /*
32526 * XXX: batch / limit 'nr', to avoid large irq off latency
32527 * needs some instrumenting to determine the common sizes used by
32528diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32529index 4500142..53a363c 100644
32530--- a/arch/x86/mm/highmem_32.c
32531+++ b/arch/x86/mm/highmem_32.c
32532@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32533 idx = type + KM_TYPE_NR*smp_processor_id();
32534 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32535 BUG_ON(!pte_none(*(kmap_pte-idx)));
32536+
32537+ pax_open_kernel();
32538 set_pte(kmap_pte-idx, mk_pte(page, prot));
32539+ pax_close_kernel();
32540+
32541 arch_flush_lazy_mmu_mode();
32542
32543 return (void *)vaddr;
32544diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32545index 006cc91..bf05a83 100644
32546--- a/arch/x86/mm/hugetlbpage.c
32547+++ b/arch/x86/mm/hugetlbpage.c
32548@@ -86,23 +86,24 @@ int pud_huge(pud_t pud)
32549 #ifdef CONFIG_HUGETLB_PAGE
32550 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32551 unsigned long addr, unsigned long len,
32552- unsigned long pgoff, unsigned long flags)
32553+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32554 {
32555 struct hstate *h = hstate_file(file);
32556 struct vm_unmapped_area_info info;
32557-
32558+
32559 info.flags = 0;
32560 info.length = len;
32561 info.low_limit = current->mm->mmap_legacy_base;
32562 info.high_limit = TASK_SIZE;
32563 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32564 info.align_offset = 0;
32565+ info.threadstack_offset = offset;
32566 return vm_unmapped_area(&info);
32567 }
32568
32569 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32570 unsigned long addr0, unsigned long len,
32571- unsigned long pgoff, unsigned long flags)
32572+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32573 {
32574 struct hstate *h = hstate_file(file);
32575 struct vm_unmapped_area_info info;
32576@@ -114,6 +115,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32577 info.high_limit = current->mm->mmap_base;
32578 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32579 info.align_offset = 0;
32580+ info.threadstack_offset = offset;
32581 addr = vm_unmapped_area(&info);
32582
32583 /*
32584@@ -126,6 +128,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32585 VM_BUG_ON(addr != -ENOMEM);
32586 info.flags = 0;
32587 info.low_limit = TASK_UNMAPPED_BASE;
32588+
32589+#ifdef CONFIG_PAX_RANDMMAP
32590+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32591+ info.low_limit += current->mm->delta_mmap;
32592+#endif
32593+
32594 info.high_limit = TASK_SIZE;
32595 addr = vm_unmapped_area(&info);
32596 }
32597@@ -140,10 +148,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32598 struct hstate *h = hstate_file(file);
32599 struct mm_struct *mm = current->mm;
32600 struct vm_area_struct *vma;
32601+ unsigned long pax_task_size = TASK_SIZE;
32602+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32603
32604 if (len & ~huge_page_mask(h))
32605 return -EINVAL;
32606- if (len > TASK_SIZE)
32607+
32608+#ifdef CONFIG_PAX_SEGMEXEC
32609+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32610+ pax_task_size = SEGMEXEC_TASK_SIZE;
32611+#endif
32612+
32613+ pax_task_size -= PAGE_SIZE;
32614+
32615+ if (len > pax_task_size)
32616 return -ENOMEM;
32617
32618 if (flags & MAP_FIXED) {
32619@@ -152,19 +170,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32620 return addr;
32621 }
32622
32623+#ifdef CONFIG_PAX_RANDMMAP
32624+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32625+#endif
32626+
32627 if (addr) {
32628 addr = ALIGN(addr, huge_page_size(h));
32629 vma = find_vma(mm, addr);
32630- if (TASK_SIZE - len >= addr &&
32631- (!vma || addr + len <= vma->vm_start))
32632+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32633 return addr;
32634 }
32635 if (mm->get_unmapped_area == arch_get_unmapped_area)
32636 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32637- pgoff, flags);
32638+ pgoff, flags, offset);
32639 else
32640 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32641- pgoff, flags);
32642+ pgoff, flags, offset);
32643 }
32644 #endif /* CONFIG_HUGETLB_PAGE */
32645
32646diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32647index 079c3b6..7069023 100644
32648--- a/arch/x86/mm/init.c
32649+++ b/arch/x86/mm/init.c
32650@@ -4,6 +4,7 @@
32651 #include <linux/swap.h>
32652 #include <linux/memblock.h>
32653 #include <linux/bootmem.h> /* for max_low_pfn */
32654+#include <linux/tboot.h>
32655
32656 #include <asm/cacheflush.h>
32657 #include <asm/e820.h>
32658@@ -17,6 +18,8 @@
32659 #include <asm/proto.h>
32660 #include <asm/dma.h> /* for MAX_DMA_PFN */
32661 #include <asm/microcode.h>
32662+#include <asm/desc.h>
32663+#include <asm/bios_ebda.h>
32664
32665 /*
32666 * We need to define the tracepoints somewhere, and tlb.c
32667@@ -596,7 +599,18 @@ void __init init_mem_mapping(void)
32668 early_ioremap_page_table_range_init();
32669 #endif
32670
32671+#ifdef CONFIG_PAX_PER_CPU_PGD
32672+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32673+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32674+ KERNEL_PGD_PTRS);
32675+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32676+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32677+ KERNEL_PGD_PTRS);
32678+ load_cr3(get_cpu_pgd(0, kernel));
32679+#else
32680 load_cr3(swapper_pg_dir);
32681+#endif
32682+
32683 __flush_tlb_all();
32684
32685 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32686@@ -612,10 +626,40 @@ void __init init_mem_mapping(void)
32687 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32688 * mmio resources as well as potential bios/acpi data regions.
32689 */
32690+
32691+#ifdef CONFIG_GRKERNSEC_KMEM
32692+static unsigned int ebda_start __read_only;
32693+static unsigned int ebda_end __read_only;
32694+#endif
32695+
32696 int devmem_is_allowed(unsigned long pagenr)
32697 {
32698- if (pagenr < 256)
32699+#ifdef CONFIG_GRKERNSEC_KMEM
32700+ /* allow BDA */
32701+ if (!pagenr)
32702 return 1;
32703+ /* allow EBDA */
32704+ if (pagenr >= ebda_start && pagenr < ebda_end)
32705+ return 1;
32706+ /* if tboot is in use, allow access to its hardcoded serial log range */
32707+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32708+ return 1;
32709+#else
32710+ if (!pagenr)
32711+ return 1;
32712+#ifdef CONFIG_VM86
32713+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32714+ return 1;
32715+#endif
32716+#endif
32717+
32718+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32719+ return 1;
32720+#ifdef CONFIG_GRKERNSEC_KMEM
32721+ /* throw out everything else below 1MB */
32722+ if (pagenr <= 256)
32723+ return 0;
32724+#endif
32725 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32726 return 0;
32727 if (!page_is_ram(pagenr))
32728@@ -661,8 +705,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32729 #endif
32730 }
32731
32732+#ifdef CONFIG_GRKERNSEC_KMEM
32733+static inline void gr_init_ebda(void)
32734+{
32735+ unsigned int ebda_addr;
32736+ unsigned int ebda_size = 0;
32737+
32738+ ebda_addr = get_bios_ebda();
32739+ if (ebda_addr) {
32740+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32741+ ebda_size <<= 10;
32742+ }
32743+ if (ebda_addr && ebda_size) {
32744+ ebda_start = ebda_addr >> PAGE_SHIFT;
32745+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32746+ } else {
32747+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32748+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32749+ }
32750+}
32751+#else
32752+static inline void gr_init_ebda(void) { }
32753+#endif
32754+
32755 void free_initmem(void)
32756 {
32757+#ifdef CONFIG_PAX_KERNEXEC
32758+#ifdef CONFIG_X86_32
32759+ /* PaX: limit KERNEL_CS to actual size */
32760+ unsigned long addr, limit;
32761+ struct desc_struct d;
32762+ int cpu;
32763+#else
32764+ pgd_t *pgd;
32765+ pud_t *pud;
32766+ pmd_t *pmd;
32767+ unsigned long addr, end;
32768+#endif
32769+#endif
32770+
32771+ gr_init_ebda();
32772+
32773+#ifdef CONFIG_PAX_KERNEXEC
32774+#ifdef CONFIG_X86_32
32775+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32776+ limit = (limit - 1UL) >> PAGE_SHIFT;
32777+
32778+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32779+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32780+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32781+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32782+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32783+ }
32784+
32785+ /* PaX: make KERNEL_CS read-only */
32786+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32787+ if (!paravirt_enabled())
32788+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32789+/*
32790+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32791+ pgd = pgd_offset_k(addr);
32792+ pud = pud_offset(pgd, addr);
32793+ pmd = pmd_offset(pud, addr);
32794+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32795+ }
32796+*/
32797+#ifdef CONFIG_X86_PAE
32798+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32799+/*
32800+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32801+ pgd = pgd_offset_k(addr);
32802+ pud = pud_offset(pgd, addr);
32803+ pmd = pmd_offset(pud, addr);
32804+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32805+ }
32806+*/
32807+#endif
32808+
32809+#ifdef CONFIG_MODULES
32810+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32811+#endif
32812+
32813+#else
32814+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32815+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32816+ pgd = pgd_offset_k(addr);
32817+ pud = pud_offset(pgd, addr);
32818+ pmd = pmd_offset(pud, addr);
32819+ if (!pmd_present(*pmd))
32820+ continue;
32821+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32822+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32823+ else
32824+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32825+ }
32826+
32827+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32828+ end = addr + KERNEL_IMAGE_SIZE;
32829+ for (; addr < end; addr += PMD_SIZE) {
32830+ pgd = pgd_offset_k(addr);
32831+ pud = pud_offset(pgd, addr);
32832+ pmd = pmd_offset(pud, addr);
32833+ if (!pmd_present(*pmd))
32834+ continue;
32835+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32836+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32837+ }
32838+#endif
32839+
32840+ flush_tlb_all();
32841+#endif
32842+
32843 free_init_pages("unused kernel",
32844 (unsigned long)(&__init_begin),
32845 (unsigned long)(&__init_end));
32846diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32847index c8140e1..59257fc 100644
32848--- a/arch/x86/mm/init_32.c
32849+++ b/arch/x86/mm/init_32.c
32850@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32851 bool __read_mostly __vmalloc_start_set = false;
32852
32853 /*
32854- * Creates a middle page table and puts a pointer to it in the
32855- * given global directory entry. This only returns the gd entry
32856- * in non-PAE compilation mode, since the middle layer is folded.
32857- */
32858-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32859-{
32860- pud_t *pud;
32861- pmd_t *pmd_table;
32862-
32863-#ifdef CONFIG_X86_PAE
32864- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32865- pmd_table = (pmd_t *)alloc_low_page();
32866- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32867- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32868- pud = pud_offset(pgd, 0);
32869- BUG_ON(pmd_table != pmd_offset(pud, 0));
32870-
32871- return pmd_table;
32872- }
32873-#endif
32874- pud = pud_offset(pgd, 0);
32875- pmd_table = pmd_offset(pud, 0);
32876-
32877- return pmd_table;
32878-}
32879-
32880-/*
32881 * Create a page table and place a pointer to it in a middle page
32882 * directory entry:
32883 */
32884@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32885 pte_t *page_table = (pte_t *)alloc_low_page();
32886
32887 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32888+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32889+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32890+#else
32891 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32892+#endif
32893 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32894 }
32895
32896 return pte_offset_kernel(pmd, 0);
32897 }
32898
32899+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32900+{
32901+ pud_t *pud;
32902+ pmd_t *pmd_table;
32903+
32904+ pud = pud_offset(pgd, 0);
32905+ pmd_table = pmd_offset(pud, 0);
32906+
32907+ return pmd_table;
32908+}
32909+
32910 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32911 {
32912 int pgd_idx = pgd_index(vaddr);
32913@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32914 int pgd_idx, pmd_idx;
32915 unsigned long vaddr;
32916 pgd_t *pgd;
32917+ pud_t *pud;
32918 pmd_t *pmd;
32919 pte_t *pte = NULL;
32920 unsigned long count = page_table_range_init_count(start, end);
32921@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32922 pgd = pgd_base + pgd_idx;
32923
32924 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32925- pmd = one_md_table_init(pgd);
32926- pmd = pmd + pmd_index(vaddr);
32927+ pud = pud_offset(pgd, vaddr);
32928+ pmd = pmd_offset(pud, vaddr);
32929+
32930+#ifdef CONFIG_X86_PAE
32931+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32932+#endif
32933+
32934 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
32935 pmd++, pmd_idx++) {
32936 pte = page_table_kmap_check(one_page_table_init(pmd),
32937@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32938 }
32939 }
32940
32941-static inline int is_kernel_text(unsigned long addr)
32942+static inline int is_kernel_text(unsigned long start, unsigned long end)
32943 {
32944- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
32945- return 1;
32946- return 0;
32947+ if ((start >= ktla_ktva((unsigned long)_etext) ||
32948+ end <= ktla_ktva((unsigned long)_stext)) &&
32949+ (start >= ktla_ktva((unsigned long)_einittext) ||
32950+ end <= ktla_ktva((unsigned long)_sinittext)) &&
32951+
32952+#ifdef CONFIG_ACPI_SLEEP
32953+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
32954+#endif
32955+
32956+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
32957+ return 0;
32958+ return 1;
32959 }
32960
32961 /*
32962@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
32963 unsigned long last_map_addr = end;
32964 unsigned long start_pfn, end_pfn;
32965 pgd_t *pgd_base = swapper_pg_dir;
32966- int pgd_idx, pmd_idx, pte_ofs;
32967+ unsigned int pgd_idx, pmd_idx, pte_ofs;
32968 unsigned long pfn;
32969 pgd_t *pgd;
32970+ pud_t *pud;
32971 pmd_t *pmd;
32972 pte_t *pte;
32973 unsigned pages_2m, pages_4k;
32974@@ -291,8 +295,13 @@ repeat:
32975 pfn = start_pfn;
32976 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
32977 pgd = pgd_base + pgd_idx;
32978- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
32979- pmd = one_md_table_init(pgd);
32980+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
32981+ pud = pud_offset(pgd, 0);
32982+ pmd = pmd_offset(pud, 0);
32983+
32984+#ifdef CONFIG_X86_PAE
32985+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32986+#endif
32987
32988 if (pfn >= end_pfn)
32989 continue;
32990@@ -304,14 +313,13 @@ repeat:
32991 #endif
32992 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
32993 pmd++, pmd_idx++) {
32994- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
32995+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
32996
32997 /*
32998 * Map with big pages if possible, otherwise
32999 * create normal page tables:
33000 */
33001 if (use_pse) {
33002- unsigned int addr2;
33003 pgprot_t prot = PAGE_KERNEL_LARGE;
33004 /*
33005 * first pass will use the same initial
33006@@ -322,11 +330,7 @@ repeat:
33007 _PAGE_PSE);
33008
33009 pfn &= PMD_MASK >> PAGE_SHIFT;
33010- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33011- PAGE_OFFSET + PAGE_SIZE-1;
33012-
33013- if (is_kernel_text(addr) ||
33014- is_kernel_text(addr2))
33015+ if (is_kernel_text(address, address + PMD_SIZE))
33016 prot = PAGE_KERNEL_LARGE_EXEC;
33017
33018 pages_2m++;
33019@@ -343,7 +347,7 @@ repeat:
33020 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33021 pte += pte_ofs;
33022 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33023- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33024+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33025 pgprot_t prot = PAGE_KERNEL;
33026 /*
33027 * first pass will use the same initial
33028@@ -351,7 +355,7 @@ repeat:
33029 */
33030 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33031
33032- if (is_kernel_text(addr))
33033+ if (is_kernel_text(address, address + PAGE_SIZE))
33034 prot = PAGE_KERNEL_EXEC;
33035
33036 pages_4k++;
33037@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33038
33039 pud = pud_offset(pgd, va);
33040 pmd = pmd_offset(pud, va);
33041- if (!pmd_present(*pmd))
33042+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33043 break;
33044
33045 /* should not be large page here */
33046@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33047
33048 static void __init pagetable_init(void)
33049 {
33050- pgd_t *pgd_base = swapper_pg_dir;
33051-
33052- permanent_kmaps_init(pgd_base);
33053+ permanent_kmaps_init(swapper_pg_dir);
33054 }
33055
33056-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33057+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33058 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33059
33060 /* user-defined highmem size */
33061@@ -787,10 +789,10 @@ void __init mem_init(void)
33062 ((unsigned long)&__init_end -
33063 (unsigned long)&__init_begin) >> 10,
33064
33065- (unsigned long)&_etext, (unsigned long)&_edata,
33066- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33067+ (unsigned long)&_sdata, (unsigned long)&_edata,
33068+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33069
33070- (unsigned long)&_text, (unsigned long)&_etext,
33071+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33072 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33073
33074 /*
33075@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33076 if (!kernel_set_to_readonly)
33077 return;
33078
33079+ start = ktla_ktva(start);
33080 pr_debug("Set kernel text: %lx - %lx for read write\n",
33081 start, start+size);
33082
33083@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33084 if (!kernel_set_to_readonly)
33085 return;
33086
33087+ start = ktla_ktva(start);
33088 pr_debug("Set kernel text: %lx - %lx for read only\n",
33089 start, start+size);
33090
33091@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33092 unsigned long start = PFN_ALIGN(_text);
33093 unsigned long size = PFN_ALIGN(_etext) - start;
33094
33095+ start = ktla_ktva(start);
33096 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33097 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33098 size >> 10);
33099diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33100index 30eb05a..ae671ac 100644
33101--- a/arch/x86/mm/init_64.c
33102+++ b/arch/x86/mm/init_64.c
33103@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33104 * around without checking the pgd every time.
33105 */
33106
33107-pteval_t __supported_pte_mask __read_mostly = ~0;
33108+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33109 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33110
33111 int force_personality32;
33112@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33113
33114 for (address = start; address <= end; address += PGDIR_SIZE) {
33115 const pgd_t *pgd_ref = pgd_offset_k(address);
33116+
33117+#ifdef CONFIG_PAX_PER_CPU_PGD
33118+ unsigned long cpu;
33119+#else
33120 struct page *page;
33121+#endif
33122
33123 /*
33124 * When it is called after memory hot remove, pgd_none()
33125@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33126 continue;
33127
33128 spin_lock(&pgd_lock);
33129+
33130+#ifdef CONFIG_PAX_PER_CPU_PGD
33131+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33132+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33133+
33134+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33135+ BUG_ON(pgd_page_vaddr(*pgd)
33136+ != pgd_page_vaddr(*pgd_ref));
33137+
33138+ if (removed) {
33139+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33140+ pgd_clear(pgd);
33141+ } else {
33142+ if (pgd_none(*pgd))
33143+ set_pgd(pgd, *pgd_ref);
33144+ }
33145+
33146+ pgd = pgd_offset_cpu(cpu, kernel, address);
33147+#else
33148 list_for_each_entry(page, &pgd_list, lru) {
33149 pgd_t *pgd;
33150 spinlock_t *pgt_lock;
33151@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33152 /* the pgt_lock only for Xen */
33153 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33154 spin_lock(pgt_lock);
33155+#endif
33156
33157 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33158 BUG_ON(pgd_page_vaddr(*pgd)
33159@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33160 set_pgd(pgd, *pgd_ref);
33161 }
33162
33163+#ifndef CONFIG_PAX_PER_CPU_PGD
33164 spin_unlock(pgt_lock);
33165+#endif
33166+
33167 }
33168 spin_unlock(&pgd_lock);
33169 }
33170@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33171 {
33172 if (pgd_none(*pgd)) {
33173 pud_t *pud = (pud_t *)spp_getpage();
33174- pgd_populate(&init_mm, pgd, pud);
33175+ pgd_populate_kernel(&init_mm, pgd, pud);
33176 if (pud != pud_offset(pgd, 0))
33177 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33178 pud, pud_offset(pgd, 0));
33179@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33180 {
33181 if (pud_none(*pud)) {
33182 pmd_t *pmd = (pmd_t *) spp_getpage();
33183- pud_populate(&init_mm, pud, pmd);
33184+ pud_populate_kernel(&init_mm, pud, pmd);
33185 if (pmd != pmd_offset(pud, 0))
33186 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33187 pmd, pmd_offset(pud, 0));
33188@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33189 pmd = fill_pmd(pud, vaddr);
33190 pte = fill_pte(pmd, vaddr);
33191
33192+ pax_open_kernel();
33193 set_pte(pte, new_pte);
33194+ pax_close_kernel();
33195
33196 /*
33197 * It's enough to flush this one mapping.
33198@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33199 pgd = pgd_offset_k((unsigned long)__va(phys));
33200 if (pgd_none(*pgd)) {
33201 pud = (pud_t *) spp_getpage();
33202- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33203- _PAGE_USER));
33204+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33205 }
33206 pud = pud_offset(pgd, (unsigned long)__va(phys));
33207 if (pud_none(*pud)) {
33208 pmd = (pmd_t *) spp_getpage();
33209- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33210- _PAGE_USER));
33211+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33212 }
33213 pmd = pmd_offset(pud, phys);
33214 BUG_ON(!pmd_none(*pmd));
33215@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33216 prot);
33217
33218 spin_lock(&init_mm.page_table_lock);
33219- pud_populate(&init_mm, pud, pmd);
33220+ pud_populate_kernel(&init_mm, pud, pmd);
33221 spin_unlock(&init_mm.page_table_lock);
33222 }
33223 __flush_tlb_all();
33224@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33225 page_size_mask);
33226
33227 spin_lock(&init_mm.page_table_lock);
33228- pgd_populate(&init_mm, pgd, pud);
33229+ pgd_populate_kernel(&init_mm, pgd, pud);
33230 spin_unlock(&init_mm.page_table_lock);
33231 pgd_changed = true;
33232 }
33233diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33234index 9ca35fc..4b2b7b7 100644
33235--- a/arch/x86/mm/iomap_32.c
33236+++ b/arch/x86/mm/iomap_32.c
33237@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33238 type = kmap_atomic_idx_push();
33239 idx = type + KM_TYPE_NR * smp_processor_id();
33240 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33241+
33242+ pax_open_kernel();
33243 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33244+ pax_close_kernel();
33245+
33246 arch_flush_lazy_mmu_mode();
33247
33248 return (void *)vaddr;
33249diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33250index fdf617c..b9e85bc 100644
33251--- a/arch/x86/mm/ioremap.c
33252+++ b/arch/x86/mm/ioremap.c
33253@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33254 unsigned long i;
33255
33256 for (i = 0; i < nr_pages; ++i)
33257- if (pfn_valid(start_pfn + i) &&
33258- !PageReserved(pfn_to_page(start_pfn + i)))
33259+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33260+ !PageReserved(pfn_to_page(start_pfn + i))))
33261 return 1;
33262
33263 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33264@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33265 *
33266 * Caller must ensure there is only one unmapping for the same pointer.
33267 */
33268-void iounmap(volatile void __iomem *addr)
33269+void iounmap(const volatile void __iomem *addr)
33270 {
33271 struct vm_struct *p, *o;
33272
33273@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33274 */
33275 void *xlate_dev_mem_ptr(phys_addr_t phys)
33276 {
33277- void *addr;
33278- unsigned long start = phys & PAGE_MASK;
33279-
33280 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33281- if (page_is_ram(start >> PAGE_SHIFT))
33282+ if (page_is_ram(phys >> PAGE_SHIFT))
33283+#ifdef CONFIG_HIGHMEM
33284+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33285+#endif
33286 return __va(phys);
33287
33288- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33289- if (addr)
33290- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33291-
33292- return addr;
33293+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33294 }
33295
33296 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33297 {
33298 if (page_is_ram(phys >> PAGE_SHIFT))
33299+#ifdef CONFIG_HIGHMEM
33300+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33301+#endif
33302 return;
33303
33304 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33305 return;
33306 }
33307
33308-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33309+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33310
33311 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33312 {
33313@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33314 early_ioremap_setup();
33315
33316 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33317- memset(bm_pte, 0, sizeof(bm_pte));
33318- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33319+ pmd_populate_user(&init_mm, pmd, bm_pte);
33320
33321 /*
33322 * The boot-ioremap range spans multiple pmds, for which
33323diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33324index b4f2e7e..96c9c3e 100644
33325--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33326+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33327@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33328 * memory (e.g. tracked pages)? For now, we need this to avoid
33329 * invoking kmemcheck for PnP BIOS calls.
33330 */
33331- if (regs->flags & X86_VM_MASK)
33332+ if (v8086_mode(regs))
33333 return false;
33334- if (regs->cs != __KERNEL_CS)
33335+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33336 return false;
33337
33338 pte = kmemcheck_pte_lookup(address);
33339diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33340index df4552b..12c129c 100644
33341--- a/arch/x86/mm/mmap.c
33342+++ b/arch/x86/mm/mmap.c
33343@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33344 * Leave an at least ~128 MB hole with possible stack randomization.
33345 */
33346 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33347-#define MAX_GAP (TASK_SIZE/6*5)
33348+#define MAX_GAP (pax_task_size/6*5)
33349
33350 static int mmap_is_legacy(void)
33351 {
33352@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33353 return rnd << PAGE_SHIFT;
33354 }
33355
33356-static unsigned long mmap_base(void)
33357+static unsigned long mmap_base(struct mm_struct *mm)
33358 {
33359 unsigned long gap = rlimit(RLIMIT_STACK);
33360+ unsigned long pax_task_size = TASK_SIZE;
33361+
33362+#ifdef CONFIG_PAX_SEGMEXEC
33363+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33364+ pax_task_size = SEGMEXEC_TASK_SIZE;
33365+#endif
33366
33367 if (gap < MIN_GAP)
33368 gap = MIN_GAP;
33369 else if (gap > MAX_GAP)
33370 gap = MAX_GAP;
33371
33372- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33373+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33374 }
33375
33376 /*
33377 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33378 * does, but not when emulating X86_32
33379 */
33380-static unsigned long mmap_legacy_base(void)
33381+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33382 {
33383- if (mmap_is_ia32())
33384+ if (mmap_is_ia32()) {
33385+
33386+#ifdef CONFIG_PAX_SEGMEXEC
33387+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33388+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33389+ else
33390+#endif
33391+
33392 return TASK_UNMAPPED_BASE;
33393- else
33394+ } else
33395 return TASK_UNMAPPED_BASE + mmap_rnd();
33396 }
33397
33398@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33399 */
33400 void arch_pick_mmap_layout(struct mm_struct *mm)
33401 {
33402- mm->mmap_legacy_base = mmap_legacy_base();
33403- mm->mmap_base = mmap_base();
33404+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33405+ mm->mmap_base = mmap_base(mm);
33406+
33407+#ifdef CONFIG_PAX_RANDMMAP
33408+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33409+ mm->mmap_legacy_base += mm->delta_mmap;
33410+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33411+ }
33412+#endif
33413
33414 if (mmap_is_legacy()) {
33415 mm->mmap_base = mm->mmap_legacy_base;
33416diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33417index 0057a7a..95c7edd 100644
33418--- a/arch/x86/mm/mmio-mod.c
33419+++ b/arch/x86/mm/mmio-mod.c
33420@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33421 break;
33422 default:
33423 {
33424- unsigned char *ip = (unsigned char *)instptr;
33425+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33426 my_trace->opcode = MMIO_UNKNOWN_OP;
33427 my_trace->width = 0;
33428 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33429@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33430 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33431 void __iomem *addr)
33432 {
33433- static atomic_t next_id;
33434+ static atomic_unchecked_t next_id;
33435 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33436 /* These are page-unaligned. */
33437 struct mmiotrace_map map = {
33438@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33439 .private = trace
33440 },
33441 .phys = offset,
33442- .id = atomic_inc_return(&next_id)
33443+ .id = atomic_inc_return_unchecked(&next_id)
33444 };
33445 map.map_id = trace->id;
33446
33447@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33448 ioremap_trace_core(offset, size, addr);
33449 }
33450
33451-static void iounmap_trace_core(volatile void __iomem *addr)
33452+static void iounmap_trace_core(const volatile void __iomem *addr)
33453 {
33454 struct mmiotrace_map map = {
33455 .phys = 0,
33456@@ -328,7 +328,7 @@ not_enabled:
33457 }
33458 }
33459
33460-void mmiotrace_iounmap(volatile void __iomem *addr)
33461+void mmiotrace_iounmap(const volatile void __iomem *addr)
33462 {
33463 might_sleep();
33464 if (is_enabled()) /* recheck and proper locking in *_core() */
33465diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33466index 1a88370..3f598b5 100644
33467--- a/arch/x86/mm/numa.c
33468+++ b/arch/x86/mm/numa.c
33469@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33470 }
33471 }
33472
33473-static int __init numa_register_memblks(struct numa_meminfo *mi)
33474+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33475 {
33476 unsigned long uninitialized_var(pfn_align);
33477 int i, nid;
33478diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33479index 536ea2f..f42c293 100644
33480--- a/arch/x86/mm/pageattr.c
33481+++ b/arch/x86/mm/pageattr.c
33482@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33483 */
33484 #ifdef CONFIG_PCI_BIOS
33485 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33486- pgprot_val(forbidden) |= _PAGE_NX;
33487+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33488 #endif
33489
33490 /*
33491@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33492 * Does not cover __inittext since that is gone later on. On
33493 * 64bit we do not enforce !NX on the low mapping
33494 */
33495- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33496- pgprot_val(forbidden) |= _PAGE_NX;
33497+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33498+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33499
33500+#ifdef CONFIG_DEBUG_RODATA
33501 /*
33502 * The .rodata section needs to be read-only. Using the pfn
33503 * catches all aliases.
33504@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33505 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33506 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33507 pgprot_val(forbidden) |= _PAGE_RW;
33508+#endif
33509
33510 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33511 /*
33512@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33513 }
33514 #endif
33515
33516+#ifdef CONFIG_PAX_KERNEXEC
33517+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33518+ pgprot_val(forbidden) |= _PAGE_RW;
33519+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33520+ }
33521+#endif
33522+
33523 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33524
33525 return prot;
33526@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33527 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33528 {
33529 /* change init_mm */
33530+ pax_open_kernel();
33531 set_pte_atomic(kpte, pte);
33532+
33533 #ifdef CONFIG_X86_32
33534 if (!SHARED_KERNEL_PMD) {
33535+
33536+#ifdef CONFIG_PAX_PER_CPU_PGD
33537+ unsigned long cpu;
33538+#else
33539 struct page *page;
33540+#endif
33541
33542+#ifdef CONFIG_PAX_PER_CPU_PGD
33543+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33544+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33545+#else
33546 list_for_each_entry(page, &pgd_list, lru) {
33547- pgd_t *pgd;
33548+ pgd_t *pgd = (pgd_t *)page_address(page);
33549+#endif
33550+
33551 pud_t *pud;
33552 pmd_t *pmd;
33553
33554- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33555+ pgd += pgd_index(address);
33556 pud = pud_offset(pgd, address);
33557 pmd = pmd_offset(pud, address);
33558 set_pte_atomic((pte_t *)pmd, pte);
33559 }
33560 }
33561 #endif
33562+ pax_close_kernel();
33563 }
33564
33565 static int
33566diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33567index 7ac6869..c0ba541 100644
33568--- a/arch/x86/mm/pat.c
33569+++ b/arch/x86/mm/pat.c
33570@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33571 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33572
33573 if (pg_flags == _PGMT_DEFAULT)
33574- return -1;
33575+ return _PAGE_CACHE_MODE_NUM;
33576 else if (pg_flags == _PGMT_WC)
33577 return _PAGE_CACHE_MODE_WC;
33578 else if (pg_flags == _PGMT_UC_MINUS)
33579@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33580
33581 page = pfn_to_page(pfn);
33582 type = get_page_memtype(page);
33583- if (type != -1) {
33584+ if (type != _PAGE_CACHE_MODE_NUM) {
33585 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33586 start, end - 1, type, req_type);
33587 if (new_type)
33588@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33589
33590 if (!entry) {
33591 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33592- current->comm, current->pid, start, end - 1);
33593+ current->comm, task_pid_nr(current), start, end - 1);
33594 return -EINVAL;
33595 }
33596
33597@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33598 page = pfn_to_page(paddr >> PAGE_SHIFT);
33599 rettype = get_page_memtype(page);
33600 /*
33601- * -1 from get_page_memtype() implies RAM page is in its
33602+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33603 * default state and not reserved, and hence of type WB
33604 */
33605- if (rettype == -1)
33606+ if (rettype == _PAGE_CACHE_MODE_NUM)
33607 rettype = _PAGE_CACHE_MODE_WB;
33608
33609 return rettype;
33610@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33611
33612 while (cursor < to) {
33613 if (!devmem_is_allowed(pfn)) {
33614- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33615- current->comm, from, to - 1);
33616+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33617+ current->comm, from, to - 1, cursor);
33618 return 0;
33619 }
33620 cursor += PAGE_SIZE;
33621@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33622 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33623 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33624 "for [mem %#010Lx-%#010Lx]\n",
33625- current->comm, current->pid,
33626+ current->comm, task_pid_nr(current),
33627 cattr_name(pcm),
33628 base, (unsigned long long)(base + size-1));
33629 return -EINVAL;
33630@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33631 pcm = lookup_memtype(paddr);
33632 if (want_pcm != pcm) {
33633 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33634- current->comm, current->pid,
33635+ current->comm, task_pid_nr(current),
33636 cattr_name(want_pcm),
33637 (unsigned long long)paddr,
33638 (unsigned long long)(paddr + size - 1),
33639@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33640 free_memtype(paddr, paddr + size);
33641 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33642 " for [mem %#010Lx-%#010Lx], got %s\n",
33643- current->comm, current->pid,
33644+ current->comm, task_pid_nr(current),
33645 cattr_name(want_pcm),
33646 (unsigned long long)paddr,
33647 (unsigned long long)(paddr + size - 1),
33648diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33649index 6582adc..fcc5d0b 100644
33650--- a/arch/x86/mm/pat_rbtree.c
33651+++ b/arch/x86/mm/pat_rbtree.c
33652@@ -161,7 +161,7 @@ success:
33653
33654 failure:
33655 printk(KERN_INFO "%s:%d conflicting memory types "
33656- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33657+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33658 end, cattr_name(found_type), cattr_name(match->type));
33659 return -EBUSY;
33660 }
33661diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33662index 9f0614d..92ae64a 100644
33663--- a/arch/x86/mm/pf_in.c
33664+++ b/arch/x86/mm/pf_in.c
33665@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33666 int i;
33667 enum reason_type rv = OTHERS;
33668
33669- p = (unsigned char *)ins_addr;
33670+ p = (unsigned char *)ktla_ktva(ins_addr);
33671 p += skip_prefix(p, &prf);
33672 p += get_opcode(p, &opcode);
33673
33674@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33675 struct prefix_bits prf;
33676 int i;
33677
33678- p = (unsigned char *)ins_addr;
33679+ p = (unsigned char *)ktla_ktva(ins_addr);
33680 p += skip_prefix(p, &prf);
33681 p += get_opcode(p, &opcode);
33682
33683@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33684 struct prefix_bits prf;
33685 int i;
33686
33687- p = (unsigned char *)ins_addr;
33688+ p = (unsigned char *)ktla_ktva(ins_addr);
33689 p += skip_prefix(p, &prf);
33690 p += get_opcode(p, &opcode);
33691
33692@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33693 struct prefix_bits prf;
33694 int i;
33695
33696- p = (unsigned char *)ins_addr;
33697+ p = (unsigned char *)ktla_ktva(ins_addr);
33698 p += skip_prefix(p, &prf);
33699 p += get_opcode(p, &opcode);
33700 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33701@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33702 struct prefix_bits prf;
33703 int i;
33704
33705- p = (unsigned char *)ins_addr;
33706+ p = (unsigned char *)ktla_ktva(ins_addr);
33707 p += skip_prefix(p, &prf);
33708 p += get_opcode(p, &opcode);
33709 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33710diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33711index 6fb6927..4fc13c0 100644
33712--- a/arch/x86/mm/pgtable.c
33713+++ b/arch/x86/mm/pgtable.c
33714@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33715 list_del(&page->lru);
33716 }
33717
33718-#define UNSHARED_PTRS_PER_PGD \
33719- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33720+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33721+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33722
33723+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33724+{
33725+ unsigned int count = USER_PGD_PTRS;
33726
33727+ if (!pax_user_shadow_base)
33728+ return;
33729+
33730+ while (count--)
33731+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33732+}
33733+#endif
33734+
33735+#ifdef CONFIG_PAX_PER_CPU_PGD
33736+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33737+{
33738+ unsigned int count = USER_PGD_PTRS;
33739+
33740+ while (count--) {
33741+ pgd_t pgd;
33742+
33743+#ifdef CONFIG_X86_64
33744+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33745+#else
33746+ pgd = *src++;
33747+#endif
33748+
33749+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33750+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33751+#endif
33752+
33753+ *dst++ = pgd;
33754+ }
33755+
33756+}
33757+#endif
33758+
33759+#ifdef CONFIG_X86_64
33760+#define pxd_t pud_t
33761+#define pyd_t pgd_t
33762+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33763+#define pgtable_pxd_page_ctor(page) true
33764+#define pgtable_pxd_page_dtor(page)
33765+#define pxd_free(mm, pud) pud_free((mm), (pud))
33766+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33767+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33768+#define PYD_SIZE PGDIR_SIZE
33769+#else
33770+#define pxd_t pmd_t
33771+#define pyd_t pud_t
33772+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33773+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33774+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33775+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33776+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33777+#define pyd_offset(mm, address) pud_offset((mm), (address))
33778+#define PYD_SIZE PUD_SIZE
33779+#endif
33780+
33781+#ifdef CONFIG_PAX_PER_CPU_PGD
33782+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33783+static inline void pgd_dtor(pgd_t *pgd) {}
33784+#else
33785 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33786 {
33787 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33788@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33789 pgd_list_del(pgd);
33790 spin_unlock(&pgd_lock);
33791 }
33792+#endif
33793
33794 /*
33795 * List of all pgd's needed for non-PAE so it can invalidate entries
33796@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33797 * -- nyc
33798 */
33799
33800-#ifdef CONFIG_X86_PAE
33801+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33802 /*
33803 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33804 * updating the top-level pagetable entries to guarantee the
33805@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33806 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33807 * and initialize the kernel pmds here.
33808 */
33809-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33810+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33811
33812 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33813 {
33814@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33815 */
33816 flush_tlb_mm(mm);
33817 }
33818+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33819+#define PREALLOCATED_PXDS USER_PGD_PTRS
33820 #else /* !CONFIG_X86_PAE */
33821
33822 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33823-#define PREALLOCATED_PMDS 0
33824+#define PREALLOCATED_PXDS 0
33825
33826 #endif /* CONFIG_X86_PAE */
33827
33828-static void free_pmds(pmd_t *pmds[])
33829+static void free_pxds(pxd_t *pxds[])
33830 {
33831 int i;
33832
33833- for(i = 0; i < PREALLOCATED_PMDS; i++)
33834- if (pmds[i]) {
33835- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33836- free_page((unsigned long)pmds[i]);
33837+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33838+ if (pxds[i]) {
33839+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33840+ free_page((unsigned long)pxds[i]);
33841 }
33842 }
33843
33844-static int preallocate_pmds(pmd_t *pmds[])
33845+static int preallocate_pxds(pxd_t *pxds[])
33846 {
33847 int i;
33848 bool failed = false;
33849
33850- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33851- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33852- if (!pmd)
33853+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33854+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33855+ if (!pxd)
33856 failed = true;
33857- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33858- free_page((unsigned long)pmd);
33859- pmd = NULL;
33860+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33861+ free_page((unsigned long)pxd);
33862+ pxd = NULL;
33863 failed = true;
33864 }
33865- pmds[i] = pmd;
33866+ pxds[i] = pxd;
33867 }
33868
33869 if (failed) {
33870- free_pmds(pmds);
33871+ free_pxds(pxds);
33872 return -ENOMEM;
33873 }
33874
33875@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
33876 * preallocate which never got a corresponding vma will need to be
33877 * freed manually.
33878 */
33879-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33880+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33881 {
33882 int i;
33883
33884- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33885+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33886 pgd_t pgd = pgdp[i];
33887
33888 if (pgd_val(pgd) != 0) {
33889- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33890+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33891
33892- pgdp[i] = native_make_pgd(0);
33893+ set_pgd(pgdp + i, native_make_pgd(0));
33894
33895- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33896- pmd_free(mm, pmd);
33897+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33898+ pxd_free(mm, pxd);
33899 }
33900 }
33901 }
33902
33903-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33904+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33905 {
33906- pud_t *pud;
33907+ pyd_t *pyd;
33908 int i;
33909
33910- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33911+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33912 return;
33913
33914- pud = pud_offset(pgd, 0);
33915-
33916- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33917- pmd_t *pmd = pmds[i];
33918+#ifdef CONFIG_X86_64
33919+ pyd = pyd_offset(mm, 0L);
33920+#else
33921+ pyd = pyd_offset(pgd, 0L);
33922+#endif
33923
33924+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33925+ pxd_t *pxd = pxds[i];
33926 if (i >= KERNEL_PGD_BOUNDARY)
33927- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33928- sizeof(pmd_t) * PTRS_PER_PMD);
33929+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33930+ sizeof(pxd_t) * PTRS_PER_PMD);
33931
33932- pud_populate(mm, pud, pmd);
33933+ pyd_populate(mm, pyd, pxd);
33934 }
33935 }
33936
33937 pgd_t *pgd_alloc(struct mm_struct *mm)
33938 {
33939 pgd_t *pgd;
33940- pmd_t *pmds[PREALLOCATED_PMDS];
33941+ pxd_t *pxds[PREALLOCATED_PXDS];
33942
33943 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
33944
33945@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33946
33947 mm->pgd = pgd;
33948
33949- if (preallocate_pmds(pmds) != 0)
33950+ if (preallocate_pxds(pxds) != 0)
33951 goto out_free_pgd;
33952
33953 if (paravirt_pgd_alloc(mm) != 0)
33954- goto out_free_pmds;
33955+ goto out_free_pxds;
33956
33957 /*
33958 * Make sure that pre-populating the pmds is atomic with
33959@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33960 spin_lock(&pgd_lock);
33961
33962 pgd_ctor(mm, pgd);
33963- pgd_prepopulate_pmd(mm, pgd, pmds);
33964+ pgd_prepopulate_pxd(mm, pgd, pxds);
33965
33966 spin_unlock(&pgd_lock);
33967
33968 return pgd;
33969
33970-out_free_pmds:
33971- free_pmds(pmds);
33972+out_free_pxds:
33973+ free_pxds(pxds);
33974 out_free_pgd:
33975 free_page((unsigned long)pgd);
33976 out:
33977@@ -313,7 +380,7 @@ out:
33978
33979 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
33980 {
33981- pgd_mop_up_pmds(mm, pgd);
33982+ pgd_mop_up_pxds(mm, pgd);
33983 pgd_dtor(pgd);
33984 paravirt_pgd_free(mm, pgd);
33985 free_page((unsigned long)pgd);
33986diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
33987index 75cc097..79a097f 100644
33988--- a/arch/x86/mm/pgtable_32.c
33989+++ b/arch/x86/mm/pgtable_32.c
33990@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
33991 return;
33992 }
33993 pte = pte_offset_kernel(pmd, vaddr);
33994+
33995+ pax_open_kernel();
33996 if (pte_val(pteval))
33997 set_pte_at(&init_mm, vaddr, pte, pteval);
33998 else
33999 pte_clear(&init_mm, vaddr, pte);
34000+ pax_close_kernel();
34001
34002 /*
34003 * It's enough to flush this one mapping.
34004diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34005index e666cbb..61788c45 100644
34006--- a/arch/x86/mm/physaddr.c
34007+++ b/arch/x86/mm/physaddr.c
34008@@ -10,7 +10,7 @@
34009 #ifdef CONFIG_X86_64
34010
34011 #ifdef CONFIG_DEBUG_VIRTUAL
34012-unsigned long __phys_addr(unsigned long x)
34013+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34014 {
34015 unsigned long y = x - __START_KERNEL_map;
34016
34017@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34018 #else
34019
34020 #ifdef CONFIG_DEBUG_VIRTUAL
34021-unsigned long __phys_addr(unsigned long x)
34022+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34023 {
34024 unsigned long phys_addr = x - PAGE_OFFSET;
34025 /* VMALLOC_* aren't constants */
34026diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34027index 90555bf..f5f1828 100644
34028--- a/arch/x86/mm/setup_nx.c
34029+++ b/arch/x86/mm/setup_nx.c
34030@@ -5,8 +5,10 @@
34031 #include <asm/pgtable.h>
34032 #include <asm/proto.h>
34033
34034+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34035 static int disable_nx;
34036
34037+#ifndef CONFIG_PAX_PAGEEXEC
34038 /*
34039 * noexec = on|off
34040 *
34041@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34042 return 0;
34043 }
34044 early_param("noexec", noexec_setup);
34045+#endif
34046+
34047+#endif
34048
34049 void x86_configure_nx(void)
34050 {
34051+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34052 if (cpu_has_nx && !disable_nx)
34053 __supported_pte_mask |= _PAGE_NX;
34054 else
34055+#endif
34056 __supported_pte_mask &= ~_PAGE_NX;
34057 }
34058
34059diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34060index ee61c36..e6fedeb 100644
34061--- a/arch/x86/mm/tlb.c
34062+++ b/arch/x86/mm/tlb.c
34063@@ -48,7 +48,11 @@ void leave_mm(int cpu)
34064 BUG();
34065 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34066 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34067+
34068+#ifndef CONFIG_PAX_PER_CPU_PGD
34069 load_cr3(swapper_pg_dir);
34070+#endif
34071+
34072 /*
34073 * This gets called in the idle path where RCU
34074 * functions differently. Tracing normally
34075diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34076new file mode 100644
34077index 0000000..dace51c
34078--- /dev/null
34079+++ b/arch/x86/mm/uderef_64.c
34080@@ -0,0 +1,37 @@
34081+#include <linux/mm.h>
34082+#include <asm/pgtable.h>
34083+#include <asm/uaccess.h>
34084+
34085+#ifdef CONFIG_PAX_MEMORY_UDEREF
34086+/* PaX: due to the special call convention these functions must
34087+ * - remain leaf functions under all configurations,
34088+ * - never be called directly, only dereferenced from the wrappers.
34089+ */
34090+void __pax_open_userland(void)
34091+{
34092+ unsigned int cpu;
34093+
34094+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34095+ return;
34096+
34097+ cpu = raw_get_cpu();
34098+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34099+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34100+ raw_put_cpu_no_resched();
34101+}
34102+EXPORT_SYMBOL(__pax_open_userland);
34103+
34104+void __pax_close_userland(void)
34105+{
34106+ unsigned int cpu;
34107+
34108+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34109+ return;
34110+
34111+ cpu = raw_get_cpu();
34112+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34113+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34114+ raw_put_cpu_no_resched();
34115+}
34116+EXPORT_SYMBOL(__pax_close_userland);
34117+#endif
34118diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34119index 6440221..f84b5c7 100644
34120--- a/arch/x86/net/bpf_jit.S
34121+++ b/arch/x86/net/bpf_jit.S
34122@@ -9,6 +9,7 @@
34123 */
34124 #include <linux/linkage.h>
34125 #include <asm/dwarf2.h>
34126+#include <asm/alternative-asm.h>
34127
34128 /*
34129 * Calling convention :
34130@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34131 jle bpf_slow_path_word
34132 mov (SKBDATA,%rsi),%eax
34133 bswap %eax /* ntohl() */
34134+ pax_force_retaddr
34135 ret
34136
34137 sk_load_half:
34138@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34139 jle bpf_slow_path_half
34140 movzwl (SKBDATA,%rsi),%eax
34141 rol $8,%ax # ntohs()
34142+ pax_force_retaddr
34143 ret
34144
34145 sk_load_byte:
34146@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34147 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34148 jle bpf_slow_path_byte
34149 movzbl (SKBDATA,%rsi),%eax
34150+ pax_force_retaddr
34151 ret
34152
34153 /* rsi contains offset and can be scratched */
34154@@ -90,6 +94,7 @@ bpf_slow_path_word:
34155 js bpf_error
34156 mov - MAX_BPF_STACK + 32(%rbp),%eax
34157 bswap %eax
34158+ pax_force_retaddr
34159 ret
34160
34161 bpf_slow_path_half:
34162@@ -98,12 +103,14 @@ bpf_slow_path_half:
34163 mov - MAX_BPF_STACK + 32(%rbp),%ax
34164 rol $8,%ax
34165 movzwl %ax,%eax
34166+ pax_force_retaddr
34167 ret
34168
34169 bpf_slow_path_byte:
34170 bpf_slow_path_common(1)
34171 js bpf_error
34172 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34173+ pax_force_retaddr
34174 ret
34175
34176 #define sk_negative_common(SIZE) \
34177@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34178 sk_negative_common(4)
34179 mov (%rax), %eax
34180 bswap %eax
34181+ pax_force_retaddr
34182 ret
34183
34184 bpf_slow_path_half_neg:
34185@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34186 mov (%rax),%ax
34187 rol $8,%ax
34188 movzwl %ax,%eax
34189+ pax_force_retaddr
34190 ret
34191
34192 bpf_slow_path_byte_neg:
34193@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34194 .globl sk_load_byte_negative_offset
34195 sk_negative_common(1)
34196 movzbl (%rax), %eax
34197+ pax_force_retaddr
34198 ret
34199
34200 bpf_error:
34201@@ -156,4 +166,5 @@ bpf_error:
34202 mov - MAX_BPF_STACK + 16(%rbp),%r14
34203 mov - MAX_BPF_STACK + 24(%rbp),%r15
34204 leaveq
34205+ pax_force_retaddr
34206 ret
34207diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34208index 9875143..00f6656 100644
34209--- a/arch/x86/net/bpf_jit_comp.c
34210+++ b/arch/x86/net/bpf_jit_comp.c
34211@@ -13,7 +13,11 @@
34212 #include <linux/if_vlan.h>
34213 #include <asm/cacheflush.h>
34214
34215+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34216+int bpf_jit_enable __read_only;
34217+#else
34218 int bpf_jit_enable __read_mostly;
34219+#endif
34220
34221 /*
34222 * assembly code in arch/x86/net/bpf_jit.S
34223@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34224 static void jit_fill_hole(void *area, unsigned int size)
34225 {
34226 /* fill whole space with int3 instructions */
34227+ pax_open_kernel();
34228 memset(area, 0xcc, size);
34229+ pax_close_kernel();
34230 }
34231
34232 struct jit_context {
34233@@ -896,7 +902,9 @@ common_load:
34234 pr_err("bpf_jit_compile fatal error\n");
34235 return -EFAULT;
34236 }
34237+ pax_open_kernel();
34238 memcpy(image + proglen, temp, ilen);
34239+ pax_close_kernel();
34240 }
34241 proglen += ilen;
34242 addrs[i] = proglen;
34243@@ -968,7 +976,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34244
34245 if (image) {
34246 bpf_flush_icache(header, image + proglen);
34247- set_memory_ro((unsigned long)header, header->pages);
34248 prog->bpf_func = (void *)image;
34249 prog->jited = true;
34250 }
34251@@ -981,12 +988,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34252 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34253 struct bpf_binary_header *header = (void *)addr;
34254
34255- if (!fp->jited)
34256- goto free_filter;
34257+ if (fp->jited)
34258+ bpf_jit_binary_free(header);
34259
34260- set_memory_rw(addr, header->pages);
34261- bpf_jit_binary_free(header);
34262-
34263-free_filter:
34264 bpf_prog_unlock_free(fp);
34265 }
34266diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34267index 5d04be5..2beeaa2 100644
34268--- a/arch/x86/oprofile/backtrace.c
34269+++ b/arch/x86/oprofile/backtrace.c
34270@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34271 struct stack_frame_ia32 *fp;
34272 unsigned long bytes;
34273
34274- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34275+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34276 if (bytes != 0)
34277 return NULL;
34278
34279- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34280+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34281
34282 oprofile_add_trace(bufhead[0].return_address);
34283
34284@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34285 struct stack_frame bufhead[2];
34286 unsigned long bytes;
34287
34288- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34289+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34290 if (bytes != 0)
34291 return NULL;
34292
34293@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34294 {
34295 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34296
34297- if (!user_mode_vm(regs)) {
34298+ if (!user_mode(regs)) {
34299 unsigned long stack = kernel_stack_pointer(regs);
34300 if (depth)
34301 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34302diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34303index 1d2e639..f6ef82a 100644
34304--- a/arch/x86/oprofile/nmi_int.c
34305+++ b/arch/x86/oprofile/nmi_int.c
34306@@ -23,6 +23,7 @@
34307 #include <asm/nmi.h>
34308 #include <asm/msr.h>
34309 #include <asm/apic.h>
34310+#include <asm/pgtable.h>
34311
34312 #include "op_counter.h"
34313 #include "op_x86_model.h"
34314@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34315 if (ret)
34316 return ret;
34317
34318- if (!model->num_virt_counters)
34319- model->num_virt_counters = model->num_counters;
34320+ if (!model->num_virt_counters) {
34321+ pax_open_kernel();
34322+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34323+ pax_close_kernel();
34324+ }
34325
34326 mux_init(ops);
34327
34328diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34329index 50d86c0..7985318 100644
34330--- a/arch/x86/oprofile/op_model_amd.c
34331+++ b/arch/x86/oprofile/op_model_amd.c
34332@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34333 num_counters = AMD64_NUM_COUNTERS;
34334 }
34335
34336- op_amd_spec.num_counters = num_counters;
34337- op_amd_spec.num_controls = num_counters;
34338- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34339+ pax_open_kernel();
34340+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34341+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34342+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34343+ pax_close_kernel();
34344
34345 return 0;
34346 }
34347diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34348index d90528e..0127e2b 100644
34349--- a/arch/x86/oprofile/op_model_ppro.c
34350+++ b/arch/x86/oprofile/op_model_ppro.c
34351@@ -19,6 +19,7 @@
34352 #include <asm/msr.h>
34353 #include <asm/apic.h>
34354 #include <asm/nmi.h>
34355+#include <asm/pgtable.h>
34356
34357 #include "op_x86_model.h"
34358 #include "op_counter.h"
34359@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34360
34361 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34362
34363- op_arch_perfmon_spec.num_counters = num_counters;
34364- op_arch_perfmon_spec.num_controls = num_counters;
34365+ pax_open_kernel();
34366+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34367+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34368+ pax_close_kernel();
34369 }
34370
34371 static int arch_perfmon_init(struct oprofile_operations *ignore)
34372diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34373index 71e8a67..6a313bb 100644
34374--- a/arch/x86/oprofile/op_x86_model.h
34375+++ b/arch/x86/oprofile/op_x86_model.h
34376@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34377 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34378 struct op_msrs const * const msrs);
34379 #endif
34380-};
34381+} __do_const;
34382
34383 struct op_counter_config;
34384
34385diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34386index 44b9271..4c5a988 100644
34387--- a/arch/x86/pci/intel_mid_pci.c
34388+++ b/arch/x86/pci/intel_mid_pci.c
34389@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34390 pci_mmcfg_late_init();
34391 pcibios_enable_irq = intel_mid_pci_irq_enable;
34392 pcibios_disable_irq = intel_mid_pci_irq_disable;
34393- pci_root_ops = intel_mid_pci_ops;
34394+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34395 pci_soc_mode = 1;
34396 /* Continue with standard init */
34397 return 1;
34398diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34399index 5dc6ca5..25c03f5 100644
34400--- a/arch/x86/pci/irq.c
34401+++ b/arch/x86/pci/irq.c
34402@@ -51,7 +51,7 @@ struct irq_router {
34403 struct irq_router_handler {
34404 u16 vendor;
34405 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34406-};
34407+} __do_const;
34408
34409 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34410 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34411@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34412 return 0;
34413 }
34414
34415-static __initdata struct irq_router_handler pirq_routers[] = {
34416+static __initconst const struct irq_router_handler pirq_routers[] = {
34417 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34418 { PCI_VENDOR_ID_AL, ali_router_probe },
34419 { PCI_VENDOR_ID_ITE, ite_router_probe },
34420@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34421 static void __init pirq_find_router(struct irq_router *r)
34422 {
34423 struct irq_routing_table *rt = pirq_table;
34424- struct irq_router_handler *h;
34425+ const struct irq_router_handler *h;
34426
34427 #ifdef CONFIG_PCI_BIOS
34428 if (!rt->signature) {
34429@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34430 return 0;
34431 }
34432
34433-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34434+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34435 {
34436 .callback = fix_broken_hp_bios_irq9,
34437 .ident = "HP Pavilion N5400 Series Laptop",
34438diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34439index 9b83b90..4112152 100644
34440--- a/arch/x86/pci/pcbios.c
34441+++ b/arch/x86/pci/pcbios.c
34442@@ -79,7 +79,7 @@ union bios32 {
34443 static struct {
34444 unsigned long address;
34445 unsigned short segment;
34446-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34447+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34448
34449 /*
34450 * Returns the entry point for the given service, NULL on error
34451@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34452 unsigned long length; /* %ecx */
34453 unsigned long entry; /* %edx */
34454 unsigned long flags;
34455+ struct desc_struct d, *gdt;
34456
34457 local_irq_save(flags);
34458- __asm__("lcall *(%%edi); cld"
34459+
34460+ gdt = get_cpu_gdt_table(smp_processor_id());
34461+
34462+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34463+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34464+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34465+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34466+
34467+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34468 : "=a" (return_code),
34469 "=b" (address),
34470 "=c" (length),
34471 "=d" (entry)
34472 : "0" (service),
34473 "1" (0),
34474- "D" (&bios32_indirect));
34475+ "D" (&bios32_indirect),
34476+ "r"(__PCIBIOS_DS)
34477+ : "memory");
34478+
34479+ pax_open_kernel();
34480+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34481+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34482+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34483+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34484+ pax_close_kernel();
34485+
34486 local_irq_restore(flags);
34487
34488 switch (return_code) {
34489- case 0:
34490- return address + entry;
34491- case 0x80: /* Not present */
34492- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34493- return 0;
34494- default: /* Shouldn't happen */
34495- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34496- service, return_code);
34497+ case 0: {
34498+ int cpu;
34499+ unsigned char flags;
34500+
34501+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34502+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34503+ printk(KERN_WARNING "bios32_service: not valid\n");
34504 return 0;
34505+ }
34506+ address = address + PAGE_OFFSET;
34507+ length += 16UL; /* some BIOSs underreport this... */
34508+ flags = 4;
34509+ if (length >= 64*1024*1024) {
34510+ length >>= PAGE_SHIFT;
34511+ flags |= 8;
34512+ }
34513+
34514+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34515+ gdt = get_cpu_gdt_table(cpu);
34516+ pack_descriptor(&d, address, length, 0x9b, flags);
34517+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34518+ pack_descriptor(&d, address, length, 0x93, flags);
34519+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34520+ }
34521+ return entry;
34522+ }
34523+ case 0x80: /* Not present */
34524+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34525+ return 0;
34526+ default: /* Shouldn't happen */
34527+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34528+ service, return_code);
34529+ return 0;
34530 }
34531 }
34532
34533 static struct {
34534 unsigned long address;
34535 unsigned short segment;
34536-} pci_indirect = { 0, __KERNEL_CS };
34537+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34538
34539-static int pci_bios_present;
34540+static int pci_bios_present __read_only;
34541
34542 static int __init check_pcibios(void)
34543 {
34544@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34545 unsigned long flags, pcibios_entry;
34546
34547 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34548- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34549+ pci_indirect.address = pcibios_entry;
34550
34551 local_irq_save(flags);
34552- __asm__(
34553- "lcall *(%%edi); cld\n\t"
34554+ __asm__("movw %w6, %%ds\n\t"
34555+ "lcall *%%ss:(%%edi); cld\n\t"
34556+ "push %%ss\n\t"
34557+ "pop %%ds\n\t"
34558 "jc 1f\n\t"
34559 "xor %%ah, %%ah\n"
34560 "1:"
34561@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34562 "=b" (ebx),
34563 "=c" (ecx)
34564 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34565- "D" (&pci_indirect)
34566+ "D" (&pci_indirect),
34567+ "r" (__PCIBIOS_DS)
34568 : "memory");
34569 local_irq_restore(flags);
34570
34571@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34572
34573 switch (len) {
34574 case 1:
34575- __asm__("lcall *(%%esi); cld\n\t"
34576+ __asm__("movw %w6, %%ds\n\t"
34577+ "lcall *%%ss:(%%esi); cld\n\t"
34578+ "push %%ss\n\t"
34579+ "pop %%ds\n\t"
34580 "jc 1f\n\t"
34581 "xor %%ah, %%ah\n"
34582 "1:"
34583@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34584 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34585 "b" (bx),
34586 "D" ((long)reg),
34587- "S" (&pci_indirect));
34588+ "S" (&pci_indirect),
34589+ "r" (__PCIBIOS_DS));
34590 /*
34591 * Zero-extend the result beyond 8 bits, do not trust the
34592 * BIOS having done it:
34593@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34594 *value &= 0xff;
34595 break;
34596 case 2:
34597- __asm__("lcall *(%%esi); cld\n\t"
34598+ __asm__("movw %w6, %%ds\n\t"
34599+ "lcall *%%ss:(%%esi); cld\n\t"
34600+ "push %%ss\n\t"
34601+ "pop %%ds\n\t"
34602 "jc 1f\n\t"
34603 "xor %%ah, %%ah\n"
34604 "1:"
34605@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34606 : "1" (PCIBIOS_READ_CONFIG_WORD),
34607 "b" (bx),
34608 "D" ((long)reg),
34609- "S" (&pci_indirect));
34610+ "S" (&pci_indirect),
34611+ "r" (__PCIBIOS_DS));
34612 /*
34613 * Zero-extend the result beyond 16 bits, do not trust the
34614 * BIOS having done it:
34615@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34616 *value &= 0xffff;
34617 break;
34618 case 4:
34619- __asm__("lcall *(%%esi); cld\n\t"
34620+ __asm__("movw %w6, %%ds\n\t"
34621+ "lcall *%%ss:(%%esi); cld\n\t"
34622+ "push %%ss\n\t"
34623+ "pop %%ds\n\t"
34624 "jc 1f\n\t"
34625 "xor %%ah, %%ah\n"
34626 "1:"
34627@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34628 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34629 "b" (bx),
34630 "D" ((long)reg),
34631- "S" (&pci_indirect));
34632+ "S" (&pci_indirect),
34633+ "r" (__PCIBIOS_DS));
34634 break;
34635 }
34636
34637@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34638
34639 switch (len) {
34640 case 1:
34641- __asm__("lcall *(%%esi); cld\n\t"
34642+ __asm__("movw %w6, %%ds\n\t"
34643+ "lcall *%%ss:(%%esi); cld\n\t"
34644+ "push %%ss\n\t"
34645+ "pop %%ds\n\t"
34646 "jc 1f\n\t"
34647 "xor %%ah, %%ah\n"
34648 "1:"
34649@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34650 "c" (value),
34651 "b" (bx),
34652 "D" ((long)reg),
34653- "S" (&pci_indirect));
34654+ "S" (&pci_indirect),
34655+ "r" (__PCIBIOS_DS));
34656 break;
34657 case 2:
34658- __asm__("lcall *(%%esi); cld\n\t"
34659+ __asm__("movw %w6, %%ds\n\t"
34660+ "lcall *%%ss:(%%esi); cld\n\t"
34661+ "push %%ss\n\t"
34662+ "pop %%ds\n\t"
34663 "jc 1f\n\t"
34664 "xor %%ah, %%ah\n"
34665 "1:"
34666@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34667 "c" (value),
34668 "b" (bx),
34669 "D" ((long)reg),
34670- "S" (&pci_indirect));
34671+ "S" (&pci_indirect),
34672+ "r" (__PCIBIOS_DS));
34673 break;
34674 case 4:
34675- __asm__("lcall *(%%esi); cld\n\t"
34676+ __asm__("movw %w6, %%ds\n\t"
34677+ "lcall *%%ss:(%%esi); cld\n\t"
34678+ "push %%ss\n\t"
34679+ "pop %%ds\n\t"
34680 "jc 1f\n\t"
34681 "xor %%ah, %%ah\n"
34682 "1:"
34683@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34684 "c" (value),
34685 "b" (bx),
34686 "D" ((long)reg),
34687- "S" (&pci_indirect));
34688+ "S" (&pci_indirect),
34689+ "r" (__PCIBIOS_DS));
34690 break;
34691 }
34692
34693@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34694
34695 DBG("PCI: Fetching IRQ routing table... ");
34696 __asm__("push %%es\n\t"
34697+ "movw %w8, %%ds\n\t"
34698 "push %%ds\n\t"
34699 "pop %%es\n\t"
34700- "lcall *(%%esi); cld\n\t"
34701+ "lcall *%%ss:(%%esi); cld\n\t"
34702 "pop %%es\n\t"
34703+ "push %%ss\n\t"
34704+ "pop %%ds\n"
34705 "jc 1f\n\t"
34706 "xor %%ah, %%ah\n"
34707 "1:"
34708@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34709 "1" (0),
34710 "D" ((long) &opt),
34711 "S" (&pci_indirect),
34712- "m" (opt)
34713+ "m" (opt),
34714+ "r" (__PCIBIOS_DS)
34715 : "memory");
34716 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34717 if (ret & 0xff00)
34718@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34719 {
34720 int ret;
34721
34722- __asm__("lcall *(%%esi); cld\n\t"
34723+ __asm__("movw %w5, %%ds\n\t"
34724+ "lcall *%%ss:(%%esi); cld\n\t"
34725+ "push %%ss\n\t"
34726+ "pop %%ds\n"
34727 "jc 1f\n\t"
34728 "xor %%ah, %%ah\n"
34729 "1:"
34730@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34731 : "0" (PCIBIOS_SET_PCI_HW_INT),
34732 "b" ((dev->bus->number << 8) | dev->devfn),
34733 "c" ((irq << 8) | (pin + 10)),
34734- "S" (&pci_indirect));
34735+ "S" (&pci_indirect),
34736+ "r" (__PCIBIOS_DS));
34737 return !(ret & 0xff00);
34738 }
34739 EXPORT_SYMBOL(pcibios_set_irq_routing);
34740diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34741index 40e7cda..c7e6672 100644
34742--- a/arch/x86/platform/efi/efi_32.c
34743+++ b/arch/x86/platform/efi/efi_32.c
34744@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34745 {
34746 struct desc_ptr gdt_descr;
34747
34748+#ifdef CONFIG_PAX_KERNEXEC
34749+ struct desc_struct d;
34750+#endif
34751+
34752 local_irq_save(efi_rt_eflags);
34753
34754 load_cr3(initial_page_table);
34755 __flush_tlb_all();
34756
34757+#ifdef CONFIG_PAX_KERNEXEC
34758+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34759+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34760+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34761+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34762+#endif
34763+
34764 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34765 gdt_descr.size = GDT_SIZE - 1;
34766 load_gdt(&gdt_descr);
34767@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34768 {
34769 struct desc_ptr gdt_descr;
34770
34771+#ifdef CONFIG_PAX_KERNEXEC
34772+ struct desc_struct d;
34773+
34774+ memset(&d, 0, sizeof d);
34775+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34776+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34777+#endif
34778+
34779 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34780 gdt_descr.size = GDT_SIZE - 1;
34781 load_gdt(&gdt_descr);
34782
34783+#ifdef CONFIG_PAX_PER_CPU_PGD
34784+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34785+#else
34786 load_cr3(swapper_pg_dir);
34787+#endif
34788+
34789 __flush_tlb_all();
34790
34791 local_irq_restore(efi_rt_eflags);
34792diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34793index 17e80d8..9fa6e41 100644
34794--- a/arch/x86/platform/efi/efi_64.c
34795+++ b/arch/x86/platform/efi/efi_64.c
34796@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34797 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34798 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34799 }
34800+
34801+#ifdef CONFIG_PAX_PER_CPU_PGD
34802+ load_cr3(swapper_pg_dir);
34803+#endif
34804+
34805 __flush_tlb_all();
34806 }
34807
34808@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34809 for (pgd = 0; pgd < n_pgds; pgd++)
34810 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34811 kfree(save_pgd);
34812+
34813+#ifdef CONFIG_PAX_PER_CPU_PGD
34814+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34815+#endif
34816+
34817 __flush_tlb_all();
34818 local_irq_restore(efi_flags);
34819 early_code_mapping_set_exec(0);
34820@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34821 unsigned npages;
34822 pgd_t *pgd;
34823
34824- if (efi_enabled(EFI_OLD_MEMMAP))
34825+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34826+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34827+ * able to execute the EFI services.
34828+ */
34829+ if (__supported_pte_mask & _PAGE_NX) {
34830+ unsigned long addr = (unsigned long) __va(0);
34831+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34832+
34833+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34834+#ifdef CONFIG_PAX_PER_CPU_PGD
34835+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34836+#endif
34837+ set_pgd(pgd_offset_k(addr), pe);
34838+ }
34839+
34840 return 0;
34841+ }
34842
34843 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34844 pgd = __va(efi_scratch.efi_pgt);
34845diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34846index 040192b..7d3300f 100644
34847--- a/arch/x86/platform/efi/efi_stub_32.S
34848+++ b/arch/x86/platform/efi/efi_stub_32.S
34849@@ -6,7 +6,9 @@
34850 */
34851
34852 #include <linux/linkage.h>
34853+#include <linux/init.h>
34854 #include <asm/page_types.h>
34855+#include <asm/segment.h>
34856
34857 /*
34858 * efi_call_phys(void *, ...) is a function with variable parameters.
34859@@ -20,7 +22,7 @@
34860 * service functions will comply with gcc calling convention, too.
34861 */
34862
34863-.text
34864+__INIT
34865 ENTRY(efi_call_phys)
34866 /*
34867 * 0. The function can only be called in Linux kernel. So CS has been
34868@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34869 * The mapping of lower virtual memory has been created in prolog and
34870 * epilog.
34871 */
34872- movl $1f, %edx
34873- subl $__PAGE_OFFSET, %edx
34874- jmp *%edx
34875+#ifdef CONFIG_PAX_KERNEXEC
34876+ movl $(__KERNEXEC_EFI_DS), %edx
34877+ mov %edx, %ds
34878+ mov %edx, %es
34879+ mov %edx, %ss
34880+ addl $2f,(1f)
34881+ ljmp *(1f)
34882+
34883+__INITDATA
34884+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34885+.previous
34886+
34887+2:
34888+ subl $2b,(1b)
34889+#else
34890+ jmp 1f-__PAGE_OFFSET
34891 1:
34892+#endif
34893
34894 /*
34895 * 2. Now on the top of stack is the return
34896@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34897 * parameter 2, ..., param n. To make things easy, we save the return
34898 * address of efi_call_phys in a global variable.
34899 */
34900- popl %edx
34901- movl %edx, saved_return_addr
34902- /* get the function pointer into ECX*/
34903- popl %ecx
34904- movl %ecx, efi_rt_function_ptr
34905- movl $2f, %edx
34906- subl $__PAGE_OFFSET, %edx
34907- pushl %edx
34908+ popl (saved_return_addr)
34909+ popl (efi_rt_function_ptr)
34910
34911 /*
34912 * 3. Clear PG bit in %CR0.
34913@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34914 /*
34915 * 5. Call the physical function.
34916 */
34917- jmp *%ecx
34918+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
34919
34920-2:
34921 /*
34922 * 6. After EFI runtime service returns, control will return to
34923 * following instruction. We'd better readjust stack pointer first.
34924@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
34925 movl %cr0, %edx
34926 orl $0x80000000, %edx
34927 movl %edx, %cr0
34928- jmp 1f
34929-1:
34930+
34931 /*
34932 * 8. Now restore the virtual mode from flat mode by
34933 * adding EIP with PAGE_OFFSET.
34934 */
34935- movl $1f, %edx
34936- jmp *%edx
34937+#ifdef CONFIG_PAX_KERNEXEC
34938+ movl $(__KERNEL_DS), %edx
34939+ mov %edx, %ds
34940+ mov %edx, %es
34941+ mov %edx, %ss
34942+ ljmp $(__KERNEL_CS),$1f
34943+#else
34944+ jmp 1f+__PAGE_OFFSET
34945+#endif
34946 1:
34947
34948 /*
34949 * 9. Balance the stack. And because EAX contain the return value,
34950 * we'd better not clobber it.
34951 */
34952- leal efi_rt_function_ptr, %edx
34953- movl (%edx), %ecx
34954- pushl %ecx
34955+ pushl (efi_rt_function_ptr)
34956
34957 /*
34958- * 10. Push the saved return address onto the stack and return.
34959+ * 10. Return to the saved return address.
34960 */
34961- leal saved_return_addr, %edx
34962- movl (%edx), %ecx
34963- pushl %ecx
34964- ret
34965+ jmpl *(saved_return_addr)
34966 ENDPROC(efi_call_phys)
34967 .previous
34968
34969-.data
34970+__INITDATA
34971 saved_return_addr:
34972 .long 0
34973 efi_rt_function_ptr:
34974diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
34975index 86d0f9e..6d499f4 100644
34976--- a/arch/x86/platform/efi/efi_stub_64.S
34977+++ b/arch/x86/platform/efi/efi_stub_64.S
34978@@ -11,6 +11,7 @@
34979 #include <asm/msr.h>
34980 #include <asm/processor-flags.h>
34981 #include <asm/page_types.h>
34982+#include <asm/alternative-asm.h>
34983
34984 #define SAVE_XMM \
34985 mov %rsp, %rax; \
34986@@ -88,6 +89,7 @@ ENTRY(efi_call)
34987 RESTORE_PGT
34988 addq $48, %rsp
34989 RESTORE_XMM
34990+ pax_force_retaddr 0, 1
34991 ret
34992 ENDPROC(efi_call)
34993
34994diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
34995index 1bbedc4..eb795b5 100644
34996--- a/arch/x86/platform/intel-mid/intel-mid.c
34997+++ b/arch/x86/platform/intel-mid/intel-mid.c
34998@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
34999 {
35000 };
35001
35002-static void intel_mid_reboot(void)
35003+static void __noreturn intel_mid_reboot(void)
35004 {
35005 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35006+ BUG();
35007 }
35008
35009 static unsigned long __init intel_mid_calibrate_tsc(void)
35010diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35011index 3c1c386..59a68ed 100644
35012--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35013+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35014@@ -13,6 +13,6 @@
35015 /* For every CPU addition a new get_<cpuname>_ops interface needs
35016 * to be added.
35017 */
35018-extern void *get_penwell_ops(void);
35019-extern void *get_cloverview_ops(void);
35020-extern void *get_tangier_ops(void);
35021+extern const void *get_penwell_ops(void);
35022+extern const void *get_cloverview_ops(void);
35023+extern const void *get_tangier_ops(void);
35024diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35025index 23381d2..8ddc10e 100644
35026--- a/arch/x86/platform/intel-mid/mfld.c
35027+++ b/arch/x86/platform/intel-mid/mfld.c
35028@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35029 pm_power_off = mfld_power_off;
35030 }
35031
35032-void *get_penwell_ops(void)
35033+const void *get_penwell_ops(void)
35034 {
35035 return &penwell_ops;
35036 }
35037
35038-void *get_cloverview_ops(void)
35039+const void *get_cloverview_ops(void)
35040 {
35041 return &penwell_ops;
35042 }
35043diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35044index aaca917..66eadbc 100644
35045--- a/arch/x86/platform/intel-mid/mrfl.c
35046+++ b/arch/x86/platform/intel-mid/mrfl.c
35047@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35048 .arch_setup = tangier_arch_setup,
35049 };
35050
35051-void *get_tangier_ops(void)
35052+const void *get_tangier_ops(void)
35053 {
35054 return &tangier_ops;
35055 }
35056diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35057index d6ee929..3637cb5 100644
35058--- a/arch/x86/platform/olpc/olpc_dt.c
35059+++ b/arch/x86/platform/olpc/olpc_dt.c
35060@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35061 return res;
35062 }
35063
35064-static struct of_pdt_ops prom_olpc_ops __initdata = {
35065+static struct of_pdt_ops prom_olpc_ops __initconst = {
35066 .nextprop = olpc_dt_nextprop,
35067 .getproplen = olpc_dt_getproplen,
35068 .getproperty = olpc_dt_getproperty,
35069diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35070index 6ec7910..ecdbb11 100644
35071--- a/arch/x86/power/cpu.c
35072+++ b/arch/x86/power/cpu.c
35073@@ -137,11 +137,8 @@ static void do_fpu_end(void)
35074 static void fix_processor_context(void)
35075 {
35076 int cpu = smp_processor_id();
35077- struct tss_struct *t = &per_cpu(init_tss, cpu);
35078-#ifdef CONFIG_X86_64
35079- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35080- tss_desc tss;
35081-#endif
35082+ struct tss_struct *t = init_tss + cpu;
35083+
35084 set_tss_desc(cpu, t); /*
35085 * This just modifies memory; should not be
35086 * necessary. But... This is necessary, because
35087@@ -150,10 +147,6 @@ static void fix_processor_context(void)
35088 */
35089
35090 #ifdef CONFIG_X86_64
35091- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35092- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35093- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35094-
35095 syscall_init(); /* This sets MSR_*STAR and related */
35096 #endif
35097 load_TR_desc(); /* This does ltr */
35098diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35099index bad628a..a102610 100644
35100--- a/arch/x86/realmode/init.c
35101+++ b/arch/x86/realmode/init.c
35102@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35103 __va(real_mode_header->trampoline_header);
35104
35105 #ifdef CONFIG_X86_32
35106- trampoline_header->start = __pa_symbol(startup_32_smp);
35107+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35108+
35109+#ifdef CONFIG_PAX_KERNEXEC
35110+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35111+#endif
35112+
35113+ trampoline_header->boot_cs = __BOOT_CS;
35114 trampoline_header->gdt_limit = __BOOT_DS + 7;
35115 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35116 #else
35117@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35118 *trampoline_cr4_features = read_cr4();
35119
35120 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35121- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35122+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35123 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35124 #endif
35125 }
35126diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35127index 7c0d7be..d24dc88 100644
35128--- a/arch/x86/realmode/rm/Makefile
35129+++ b/arch/x86/realmode/rm/Makefile
35130@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35131
35132 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35133 -I$(srctree)/arch/x86/boot
35134+ifdef CONSTIFY_PLUGIN
35135+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35136+endif
35137 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35138 GCOV_PROFILE := n
35139diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35140index a28221d..93c40f1 100644
35141--- a/arch/x86/realmode/rm/header.S
35142+++ b/arch/x86/realmode/rm/header.S
35143@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35144 #endif
35145 /* APM/BIOS reboot */
35146 .long pa_machine_real_restart_asm
35147-#ifdef CONFIG_X86_64
35148+#ifdef CONFIG_X86_32
35149+ .long __KERNEL_CS
35150+#else
35151 .long __KERNEL32_CS
35152 #endif
35153 END(real_mode_header)
35154diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35155index 48ddd76..c26749f 100644
35156--- a/arch/x86/realmode/rm/trampoline_32.S
35157+++ b/arch/x86/realmode/rm/trampoline_32.S
35158@@ -24,6 +24,12 @@
35159 #include <asm/page_types.h>
35160 #include "realmode.h"
35161
35162+#ifdef CONFIG_PAX_KERNEXEC
35163+#define ta(X) (X)
35164+#else
35165+#define ta(X) (pa_ ## X)
35166+#endif
35167+
35168 .text
35169 .code16
35170
35171@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35172
35173 cli # We should be safe anyway
35174
35175- movl tr_start, %eax # where we need to go
35176-
35177 movl $0xA5A5A5A5, trampoline_status
35178 # write marker for master knows we're running
35179
35180@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35181 movw $1, %dx # protected mode (PE) bit
35182 lmsw %dx # into protected mode
35183
35184- ljmpl $__BOOT_CS, $pa_startup_32
35185+ ljmpl *(trampoline_header)
35186
35187 .section ".text32","ax"
35188 .code32
35189@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35190 .balign 8
35191 GLOBAL(trampoline_header)
35192 tr_start: .space 4
35193- tr_gdt_pad: .space 2
35194+ tr_boot_cs: .space 2
35195 tr_gdt: .space 6
35196 END(trampoline_header)
35197
35198diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35199index dac7b20..72dbaca 100644
35200--- a/arch/x86/realmode/rm/trampoline_64.S
35201+++ b/arch/x86/realmode/rm/trampoline_64.S
35202@@ -93,6 +93,7 @@ ENTRY(startup_32)
35203 movl %edx, %gs
35204
35205 movl pa_tr_cr4, %eax
35206+ andl $~X86_CR4_PCIDE, %eax
35207 movl %eax, %cr4 # Enable PAE mode
35208
35209 # Setup trampoline 4 level pagetables
35210@@ -106,7 +107,7 @@ ENTRY(startup_32)
35211 wrmsr
35212
35213 # Enable paging and in turn activate Long Mode
35214- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35215+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35216 movl %eax, %cr0
35217
35218 /*
35219diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35220index 9e7e147..25a4158 100644
35221--- a/arch/x86/realmode/rm/wakeup_asm.S
35222+++ b/arch/x86/realmode/rm/wakeup_asm.S
35223@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35224 lgdtl pmode_gdt
35225
35226 /* This really couldn't... */
35227- movl pmode_entry, %eax
35228 movl pmode_cr0, %ecx
35229 movl %ecx, %cr0
35230- ljmpl $__KERNEL_CS, $pa_startup_32
35231- /* -> jmp *%eax in trampoline_32.S */
35232+
35233+ ljmpl *pmode_entry
35234 #else
35235 jmp trampoline_start
35236 #endif
35237diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35238index 604a37e..e49702a 100644
35239--- a/arch/x86/tools/Makefile
35240+++ b/arch/x86/tools/Makefile
35241@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35242
35243 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35244
35245-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35246+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35247 hostprogs-y += relocs
35248 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35249 PHONY += relocs
35250diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35251index 0c2fae8..88036b7 100644
35252--- a/arch/x86/tools/relocs.c
35253+++ b/arch/x86/tools/relocs.c
35254@@ -1,5 +1,7 @@
35255 /* This is included from relocs_32/64.c */
35256
35257+#include "../../../include/generated/autoconf.h"
35258+
35259 #define ElfW(type) _ElfW(ELF_BITS, type)
35260 #define _ElfW(bits, type) __ElfW(bits, type)
35261 #define __ElfW(bits, type) Elf##bits##_##type
35262@@ -11,6 +13,7 @@
35263 #define Elf_Sym ElfW(Sym)
35264
35265 static Elf_Ehdr ehdr;
35266+static Elf_Phdr *phdr;
35267
35268 struct relocs {
35269 uint32_t *offset;
35270@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35271 }
35272 }
35273
35274+static void read_phdrs(FILE *fp)
35275+{
35276+ unsigned int i;
35277+
35278+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35279+ if (!phdr) {
35280+ die("Unable to allocate %d program headers\n",
35281+ ehdr.e_phnum);
35282+ }
35283+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35284+ die("Seek to %d failed: %s\n",
35285+ ehdr.e_phoff, strerror(errno));
35286+ }
35287+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35288+ die("Cannot read ELF program headers: %s\n",
35289+ strerror(errno));
35290+ }
35291+ for(i = 0; i < ehdr.e_phnum; i++) {
35292+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35293+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35294+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35295+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35296+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35297+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35298+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35299+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35300+ }
35301+
35302+}
35303+
35304 static void read_shdrs(FILE *fp)
35305 {
35306- int i;
35307+ unsigned int i;
35308 Elf_Shdr shdr;
35309
35310 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35311@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35312
35313 static void read_strtabs(FILE *fp)
35314 {
35315- int i;
35316+ unsigned int i;
35317 for (i = 0; i < ehdr.e_shnum; i++) {
35318 struct section *sec = &secs[i];
35319 if (sec->shdr.sh_type != SHT_STRTAB) {
35320@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35321
35322 static void read_symtabs(FILE *fp)
35323 {
35324- int i,j;
35325+ unsigned int i,j;
35326 for (i = 0; i < ehdr.e_shnum; i++) {
35327 struct section *sec = &secs[i];
35328 if (sec->shdr.sh_type != SHT_SYMTAB) {
35329@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35330 }
35331
35332
35333-static void read_relocs(FILE *fp)
35334+static void read_relocs(FILE *fp, int use_real_mode)
35335 {
35336- int i,j;
35337+ unsigned int i,j;
35338+ uint32_t base;
35339+
35340 for (i = 0; i < ehdr.e_shnum; i++) {
35341 struct section *sec = &secs[i];
35342 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35343@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35344 die("Cannot read symbol table: %s\n",
35345 strerror(errno));
35346 }
35347+ base = 0;
35348+
35349+#ifdef CONFIG_X86_32
35350+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35351+ if (phdr[j].p_type != PT_LOAD )
35352+ continue;
35353+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35354+ continue;
35355+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35356+ break;
35357+ }
35358+#endif
35359+
35360 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35361 Elf_Rel *rel = &sec->reltab[j];
35362- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35363+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35364 rel->r_info = elf_xword_to_cpu(rel->r_info);
35365 #if (SHT_REL_TYPE == SHT_RELA)
35366 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35367@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35368
35369 static void print_absolute_symbols(void)
35370 {
35371- int i;
35372+ unsigned int i;
35373 const char *format;
35374
35375 if (ELF_BITS == 64)
35376@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35377 for (i = 0; i < ehdr.e_shnum; i++) {
35378 struct section *sec = &secs[i];
35379 char *sym_strtab;
35380- int j;
35381+ unsigned int j;
35382
35383 if (sec->shdr.sh_type != SHT_SYMTAB) {
35384 continue;
35385@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35386
35387 static void print_absolute_relocs(void)
35388 {
35389- int i, printed = 0;
35390+ unsigned int i, printed = 0;
35391 const char *format;
35392
35393 if (ELF_BITS == 64)
35394@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35395 struct section *sec_applies, *sec_symtab;
35396 char *sym_strtab;
35397 Elf_Sym *sh_symtab;
35398- int j;
35399+ unsigned int j;
35400 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35401 continue;
35402 }
35403@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35404 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35405 Elf_Sym *sym, const char *symname))
35406 {
35407- int i;
35408+ unsigned int i;
35409 /* Walk through the relocations */
35410 for (i = 0; i < ehdr.e_shnum; i++) {
35411 char *sym_strtab;
35412 Elf_Sym *sh_symtab;
35413 struct section *sec_applies, *sec_symtab;
35414- int j;
35415+ unsigned int j;
35416 struct section *sec = &secs[i];
35417
35418 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35419@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35420 {
35421 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35422 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35423+ char *sym_strtab = sec->link->link->strtab;
35424+
35425+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35426+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35427+ return 0;
35428+
35429+#ifdef CONFIG_PAX_KERNEXEC
35430+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35431+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35432+ return 0;
35433+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35434+ return 0;
35435+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35436+ return 0;
35437+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35438+ return 0;
35439+#endif
35440
35441 switch (r_type) {
35442 case R_386_NONE:
35443@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35444
35445 static void emit_relocs(int as_text, int use_real_mode)
35446 {
35447- int i;
35448+ unsigned int i;
35449 int (*write_reloc)(uint32_t, FILE *) = write32;
35450 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35451 const char *symname);
35452@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35453 {
35454 regex_init(use_real_mode);
35455 read_ehdr(fp);
35456+ read_phdrs(fp);
35457 read_shdrs(fp);
35458 read_strtabs(fp);
35459 read_symtabs(fp);
35460- read_relocs(fp);
35461+ read_relocs(fp, use_real_mode);
35462 if (ELF_BITS == 64)
35463 percpu_init();
35464 if (show_absolute_syms) {
35465diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35466index f40281e..92728c9 100644
35467--- a/arch/x86/um/mem_32.c
35468+++ b/arch/x86/um/mem_32.c
35469@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35470 gate_vma.vm_start = FIXADDR_USER_START;
35471 gate_vma.vm_end = FIXADDR_USER_END;
35472 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35473- gate_vma.vm_page_prot = __P101;
35474+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35475
35476 return 0;
35477 }
35478diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35479index 80ffa5b..a33bd15 100644
35480--- a/arch/x86/um/tls_32.c
35481+++ b/arch/x86/um/tls_32.c
35482@@ -260,7 +260,7 @@ out:
35483 if (unlikely(task == current &&
35484 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35485 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35486- "without flushed TLS.", current->pid);
35487+ "without flushed TLS.", task_pid_nr(current));
35488 }
35489
35490 return 0;
35491diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35492index 5a4affe..9e2d522 100644
35493--- a/arch/x86/vdso/Makefile
35494+++ b/arch/x86/vdso/Makefile
35495@@ -174,7 +174,7 @@ quiet_cmd_vdso = VDSO $@
35496 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35497 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35498
35499-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35500+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35501 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35502 GCOV_PROFILE := n
35503
35504diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35505index 0224987..c7d65a5 100644
35506--- a/arch/x86/vdso/vdso2c.h
35507+++ b/arch/x86/vdso/vdso2c.h
35508@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35509 unsigned long load_size = -1; /* Work around bogus warning */
35510 unsigned long mapping_size;
35511 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35512- int i;
35513+ unsigned int i;
35514 unsigned long j;
35515 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35516 *alt_sec = NULL;
35517diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35518index e904c27..b9eaa03 100644
35519--- a/arch/x86/vdso/vdso32-setup.c
35520+++ b/arch/x86/vdso/vdso32-setup.c
35521@@ -14,6 +14,7 @@
35522 #include <asm/cpufeature.h>
35523 #include <asm/processor.h>
35524 #include <asm/vdso.h>
35525+#include <asm/mman.h>
35526
35527 #ifdef CONFIG_COMPAT_VDSO
35528 #define VDSO_DEFAULT 0
35529diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35530index 1c9f750..cfddb1a 100644
35531--- a/arch/x86/vdso/vma.c
35532+++ b/arch/x86/vdso/vma.c
35533@@ -19,10 +19,7 @@
35534 #include <asm/page.h>
35535 #include <asm/hpet.h>
35536 #include <asm/desc.h>
35537-
35538-#if defined(CONFIG_X86_64)
35539-unsigned int __read_mostly vdso64_enabled = 1;
35540-#endif
35541+#include <asm/mman.h>
35542
35543 void __init init_vdso_image(const struct vdso_image *image)
35544 {
35545@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35546 .pages = no_pages,
35547 };
35548
35549+#ifdef CONFIG_PAX_RANDMMAP
35550+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35551+ calculate_addr = false;
35552+#endif
35553+
35554 if (calculate_addr) {
35555 addr = vdso_addr(current->mm->start_stack,
35556 image->size - image->sym_vvar_start);
35557@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35558 down_write(&mm->mmap_sem);
35559
35560 addr = get_unmapped_area(NULL, addr,
35561- image->size - image->sym_vvar_start, 0, 0);
35562+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35563 if (IS_ERR_VALUE(addr)) {
35564 ret = addr;
35565 goto up_fail;
35566 }
35567
35568 text_start = addr - image->sym_vvar_start;
35569- current->mm->context.vdso = (void __user *)text_start;
35570+ mm->context.vdso = text_start;
35571
35572 /*
35573 * MAYWRITE to allow gdb to COW and set breakpoints
35574@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35575 hpet_address >> PAGE_SHIFT,
35576 PAGE_SIZE,
35577 pgprot_noncached(PAGE_READONLY));
35578-
35579- if (ret)
35580- goto up_fail;
35581 }
35582 #endif
35583
35584 up_fail:
35585 if (ret)
35586- current->mm->context.vdso = NULL;
35587+ current->mm->context.vdso = 0;
35588
35589 up_write(&mm->mmap_sem);
35590 return ret;
35591@@ -191,8 +190,8 @@ static int load_vdso32(void)
35592
35593 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35594 current_thread_info()->sysenter_return =
35595- current->mm->context.vdso +
35596- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35597+ (void __force_user *)(current->mm->context.vdso +
35598+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35599
35600 return 0;
35601 }
35602@@ -201,9 +200,6 @@ static int load_vdso32(void)
35603 #ifdef CONFIG_X86_64
35604 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35605 {
35606- if (!vdso64_enabled)
35607- return 0;
35608-
35609 return map_vdso(&vdso_image_64, true);
35610 }
35611
35612@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35613 int uses_interp)
35614 {
35615 #ifdef CONFIG_X86_X32_ABI
35616- if (test_thread_flag(TIF_X32)) {
35617- if (!vdso64_enabled)
35618- return 0;
35619-
35620+ if (test_thread_flag(TIF_X32))
35621 return map_vdso(&vdso_image_x32, true);
35622- }
35623 #endif
35624
35625 return load_vdso32();
35626@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35627 #endif
35628
35629 #ifdef CONFIG_X86_64
35630-static __init int vdso_setup(char *s)
35631-{
35632- vdso64_enabled = simple_strtoul(s, NULL, 0);
35633- return 0;
35634-}
35635-__setup("vdso=", vdso_setup);
35636-#endif
35637-
35638-#ifdef CONFIG_X86_64
35639 static void vgetcpu_cpu_init(void *arg)
35640 {
35641 int cpu = smp_processor_id();
35642diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35643index e88fda8..76ce7ce 100644
35644--- a/arch/x86/xen/Kconfig
35645+++ b/arch/x86/xen/Kconfig
35646@@ -9,6 +9,7 @@ config XEN
35647 select XEN_HAVE_PVMMU
35648 depends on X86_64 || (X86_32 && X86_PAE)
35649 depends on X86_TSC
35650+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35651 help
35652 This is the Linux Xen port. Enabling this will allow the
35653 kernel to boot in a paravirtualized environment under the
35654diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35655index 78a881b..9994bbb 100644
35656--- a/arch/x86/xen/enlighten.c
35657+++ b/arch/x86/xen/enlighten.c
35658@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35659
35660 struct shared_info xen_dummy_shared_info;
35661
35662-void *xen_initial_gdt;
35663-
35664 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35665 __read_mostly int xen_have_vector_callback;
35666 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35667@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35668 {
35669 unsigned long va = dtr->address;
35670 unsigned int size = dtr->size + 1;
35671- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35672- unsigned long frames[pages];
35673+ unsigned long frames[65536 / PAGE_SIZE];
35674 int f;
35675
35676 /*
35677@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35678 {
35679 unsigned long va = dtr->address;
35680 unsigned int size = dtr->size + 1;
35681- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35682- unsigned long frames[pages];
35683+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35684 int f;
35685
35686 /*
35687@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35688 * 8-byte entries, or 16 4k pages..
35689 */
35690
35691- BUG_ON(size > 65536);
35692+ BUG_ON(size > GDT_SIZE);
35693 BUG_ON(va & ~PAGE_MASK);
35694
35695 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35696@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35697 return 0;
35698 }
35699
35700-static void set_xen_basic_apic_ops(void)
35701+static void __init set_xen_basic_apic_ops(void)
35702 {
35703 apic->read = xen_apic_read;
35704 apic->write = xen_apic_write;
35705@@ -1291,30 +1287,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35706 #endif
35707 };
35708
35709-static void xen_reboot(int reason)
35710+static __noreturn void xen_reboot(int reason)
35711 {
35712 struct sched_shutdown r = { .reason = reason };
35713
35714- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35715- BUG();
35716+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35717+ BUG();
35718 }
35719
35720-static void xen_restart(char *msg)
35721+static __noreturn void xen_restart(char *msg)
35722 {
35723 xen_reboot(SHUTDOWN_reboot);
35724 }
35725
35726-static void xen_emergency_restart(void)
35727+static __noreturn void xen_emergency_restart(void)
35728 {
35729 xen_reboot(SHUTDOWN_reboot);
35730 }
35731
35732-static void xen_machine_halt(void)
35733+static __noreturn void xen_machine_halt(void)
35734 {
35735 xen_reboot(SHUTDOWN_poweroff);
35736 }
35737
35738-static void xen_machine_power_off(void)
35739+static __noreturn void xen_machine_power_off(void)
35740 {
35741 if (pm_power_off)
35742 pm_power_off();
35743@@ -1467,8 +1463,11 @@ static void __ref xen_setup_gdt(int cpu)
35744 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35745 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35746
35747- setup_stack_canary_segment(0);
35748- switch_to_new_gdt(0);
35749+ setup_stack_canary_segment(cpu);
35750+#ifdef CONFIG_X86_64
35751+ load_percpu_segment(cpu);
35752+#endif
35753+ switch_to_new_gdt(cpu);
35754
35755 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35756 pv_cpu_ops.load_gdt = xen_load_gdt;
35757@@ -1583,7 +1582,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35758 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35759
35760 /* Work out if we support NX */
35761- x86_configure_nx();
35762+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35763+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35764+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35765+ unsigned l, h;
35766+
35767+ __supported_pte_mask |= _PAGE_NX;
35768+ rdmsr(MSR_EFER, l, h);
35769+ l |= EFER_NX;
35770+ wrmsr(MSR_EFER, l, h);
35771+ }
35772+#endif
35773
35774 /* Get mfn list */
35775 xen_build_dynamic_phys_to_machine();
35776@@ -1611,13 +1620,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35777
35778 machine_ops = xen_machine_ops;
35779
35780- /*
35781- * The only reliable way to retain the initial address of the
35782- * percpu gdt_page is to remember it here, so we can go and
35783- * mark it RW later, when the initial percpu area is freed.
35784- */
35785- xen_initial_gdt = &per_cpu(gdt_page, 0);
35786-
35787 xen_smp_init();
35788
35789 #ifdef CONFIG_ACPI_NUMA
35790diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35791index 5c1f9ac..0e15f5c 100644
35792--- a/arch/x86/xen/mmu.c
35793+++ b/arch/x86/xen/mmu.c
35794@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35795 return val;
35796 }
35797
35798-static pteval_t pte_pfn_to_mfn(pteval_t val)
35799+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35800 {
35801 if (val & _PAGE_PRESENT) {
35802 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35803@@ -1836,7 +1836,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35804 * L3_k[511] -> level2_fixmap_pgt */
35805 convert_pfn_mfn(level3_kernel_pgt);
35806
35807+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35808+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35809+ convert_pfn_mfn(level3_vmemmap_pgt);
35810 /* L3_k[511][506] -> level1_fixmap_pgt */
35811+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35812 convert_pfn_mfn(level2_fixmap_pgt);
35813 }
35814 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35815@@ -1861,11 +1865,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35816 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35817 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35818 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35819+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35820+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35821+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35822 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35823 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35824+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35825 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35826 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35827 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
35828+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
35829
35830 /* Pin down new L4 */
35831 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
35832@@ -2049,6 +2058,7 @@ static void __init xen_post_allocator_init(void)
35833 pv_mmu_ops.set_pud = xen_set_pud;
35834 #if PAGETABLE_LEVELS == 4
35835 pv_mmu_ops.set_pgd = xen_set_pgd;
35836+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35837 #endif
35838
35839 /* This will work as long as patching hasn't happened yet
35840@@ -2127,6 +2137,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35841 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35842 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35843 .set_pgd = xen_set_pgd_hyper,
35844+ .set_pgd_batched = xen_set_pgd_hyper,
35845
35846 .alloc_pud = xen_alloc_pmd_init,
35847 .release_pud = xen_release_pmd_init,
35848diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35849index 4c071ae..00e7049 100644
35850--- a/arch/x86/xen/smp.c
35851+++ b/arch/x86/xen/smp.c
35852@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35853
35854 if (xen_pv_domain()) {
35855 if (!xen_feature(XENFEAT_writable_page_tables))
35856- /* We've switched to the "real" per-cpu gdt, so make
35857- * sure the old memory can be recycled. */
35858- make_lowmem_page_readwrite(xen_initial_gdt);
35859-
35860 #ifdef CONFIG_X86_32
35861 /*
35862 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35863 * expects __USER_DS
35864 */
35865- loadsegment(ds, __USER_DS);
35866- loadsegment(es, __USER_DS);
35867+ loadsegment(ds, __KERNEL_DS);
35868+ loadsegment(es, __KERNEL_DS);
35869 #endif
35870
35871 xen_filter_cpu_maps();
35872@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35873 #ifdef CONFIG_X86_32
35874 /* Note: PVH is not yet supported on x86_32. */
35875 ctxt->user_regs.fs = __KERNEL_PERCPU;
35876- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
35877+ savesegment(gs, ctxt->user_regs.gs);
35878 #endif
35879 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
35880
35881@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35882 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
35883 ctxt->flags = VGCF_IN_KERNEL;
35884 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
35885- ctxt->user_regs.ds = __USER_DS;
35886- ctxt->user_regs.es = __USER_DS;
35887+ ctxt->user_regs.ds = __KERNEL_DS;
35888+ ctxt->user_regs.es = __KERNEL_DS;
35889 ctxt->user_regs.ss = __KERNEL_DS;
35890
35891 xen_copy_trap_info(ctxt->trap_ctxt);
35892@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
35893 int rc;
35894
35895 per_cpu(current_task, cpu) = idle;
35896+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
35897 #ifdef CONFIG_X86_32
35898 irq_ctx_init(cpu);
35899 #else
35900 clear_tsk_thread_flag(idle, TIF_FORK);
35901 #endif
35902- per_cpu(kernel_stack, cpu) =
35903- (unsigned long)task_stack_page(idle) -
35904- KERNEL_STACK_OFFSET + THREAD_SIZE;
35905+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
35906
35907 xen_setup_runstate_info(cpu);
35908 xen_setup_timer(cpu);
35909@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
35910
35911 void __init xen_smp_init(void)
35912 {
35913- smp_ops = xen_smp_ops;
35914+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
35915 xen_fill_possible_map();
35916 }
35917
35918diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
35919index fd92a64..1f72641 100644
35920--- a/arch/x86/xen/xen-asm_32.S
35921+++ b/arch/x86/xen/xen-asm_32.S
35922@@ -99,7 +99,7 @@ ENTRY(xen_iret)
35923 pushw %fs
35924 movl $(__KERNEL_PERCPU), %eax
35925 movl %eax, %fs
35926- movl %fs:xen_vcpu, %eax
35927+ mov PER_CPU_VAR(xen_vcpu), %eax
35928 POP_FS
35929 #else
35930 movl %ss:xen_vcpu, %eax
35931diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
35932index 674b2225..f1f5dc1 100644
35933--- a/arch/x86/xen/xen-head.S
35934+++ b/arch/x86/xen/xen-head.S
35935@@ -39,6 +39,17 @@ ENTRY(startup_xen)
35936 #ifdef CONFIG_X86_32
35937 mov %esi,xen_start_info
35938 mov $init_thread_union+THREAD_SIZE,%esp
35939+#ifdef CONFIG_SMP
35940+ movl $cpu_gdt_table,%edi
35941+ movl $__per_cpu_load,%eax
35942+ movw %ax,__KERNEL_PERCPU + 2(%edi)
35943+ rorl $16,%eax
35944+ movb %al,__KERNEL_PERCPU + 4(%edi)
35945+ movb %ah,__KERNEL_PERCPU + 7(%edi)
35946+ movl $__per_cpu_end - 1,%eax
35947+ subl $__per_cpu_start,%eax
35948+ movw %ax,__KERNEL_PERCPU + 0(%edi)
35949+#endif
35950 #else
35951 mov %rsi,xen_start_info
35952 mov $init_thread_union+THREAD_SIZE,%rsp
35953diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
35954index 5686bd9..0c8b6ee 100644
35955--- a/arch/x86/xen/xen-ops.h
35956+++ b/arch/x86/xen/xen-ops.h
35957@@ -10,8 +10,6 @@
35958 extern const char xen_hypervisor_callback[];
35959 extern const char xen_failsafe_callback[];
35960
35961-extern void *xen_initial_gdt;
35962-
35963 struct trap_info;
35964 void xen_copy_trap_info(struct trap_info *traps);
35965
35966diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
35967index 525bd3d..ef888b1 100644
35968--- a/arch/xtensa/variants/dc232b/include/variant/core.h
35969+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
35970@@ -119,9 +119,9 @@
35971 ----------------------------------------------------------------------*/
35972
35973 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
35974-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
35975 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
35976 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
35977+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35978
35979 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
35980 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
35981diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
35982index 2f33760..835e50a 100644
35983--- a/arch/xtensa/variants/fsf/include/variant/core.h
35984+++ b/arch/xtensa/variants/fsf/include/variant/core.h
35985@@ -11,6 +11,7 @@
35986 #ifndef _XTENSA_CORE_H
35987 #define _XTENSA_CORE_H
35988
35989+#include <linux/const.h>
35990
35991 /****************************************************************************
35992 Parameters Useful for Any Code, USER or PRIVILEGED
35993@@ -112,9 +113,9 @@
35994 ----------------------------------------------------------------------*/
35995
35996 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
35997-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
35998 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
35999 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36000+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36001
36002 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36003 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36004diff --git a/block/bio.c b/block/bio.c
36005index 471d738..bd3da0d 100644
36006--- a/block/bio.c
36007+++ b/block/bio.c
36008@@ -1169,7 +1169,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36009 /*
36010 * Overflow, abort
36011 */
36012- if (end < start)
36013+ if (end < start || end - start > INT_MAX - nr_pages)
36014 return ERR_PTR(-EINVAL);
36015
36016 nr_pages += end - start;
36017@@ -1303,7 +1303,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
36018 /*
36019 * Overflow, abort
36020 */
36021- if (end < start)
36022+ if (end < start || end - start > INT_MAX - nr_pages)
36023 return ERR_PTR(-EINVAL);
36024
36025 nr_pages += end - start;
36026@@ -1565,7 +1565,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
36027 const int read = bio_data_dir(bio) == READ;
36028 struct bio_map_data *bmd = bio->bi_private;
36029 int i;
36030- char *p = bmd->sgvecs[0].iov_base;
36031+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
36032
36033 bio_for_each_segment_all(bvec, bio, i) {
36034 char *addr = page_address(bvec->bv_page);
36035diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36036index 0736729..2ec3b48 100644
36037--- a/block/blk-iopoll.c
36038+++ b/block/blk-iopoll.c
36039@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36040 }
36041 EXPORT_SYMBOL(blk_iopoll_complete);
36042
36043-static void blk_iopoll_softirq(struct softirq_action *h)
36044+static __latent_entropy void blk_iopoll_softirq(void)
36045 {
36046 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36047 int rearm = 0, budget = blk_iopoll_budget;
36048diff --git a/block/blk-map.c b/block/blk-map.c
36049index f890d43..97b0482 100644
36050--- a/block/blk-map.c
36051+++ b/block/blk-map.c
36052@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36053 if (!len || !kbuf)
36054 return -EINVAL;
36055
36056- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36057+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36058 if (do_copy)
36059 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36060 else
36061diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36062index 53b1737..08177d2e 100644
36063--- a/block/blk-softirq.c
36064+++ b/block/blk-softirq.c
36065@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36066 * Softirq action handler - move entries to local list and loop over them
36067 * while passing them to the queue registered handler.
36068 */
36069-static void blk_done_softirq(struct softirq_action *h)
36070+static __latent_entropy void blk_done_softirq(void)
36071 {
36072 struct list_head *cpu_list, local_list;
36073
36074diff --git a/block/bsg.c b/block/bsg.c
36075index 276e869..6fe4c61 100644
36076--- a/block/bsg.c
36077+++ b/block/bsg.c
36078@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36079 struct sg_io_v4 *hdr, struct bsg_device *bd,
36080 fmode_t has_write_perm)
36081 {
36082+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36083+ unsigned char *cmdptr;
36084+
36085 if (hdr->request_len > BLK_MAX_CDB) {
36086 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36087 if (!rq->cmd)
36088 return -ENOMEM;
36089- }
36090+ cmdptr = rq->cmd;
36091+ } else
36092+ cmdptr = tmpcmd;
36093
36094- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36095+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36096 hdr->request_len))
36097 return -EFAULT;
36098
36099+ if (cmdptr != rq->cmd)
36100+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36101+
36102 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36103 if (blk_verify_command(rq->cmd, has_write_perm))
36104 return -EPERM;
36105diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36106index f678c73..f35aa18 100644
36107--- a/block/compat_ioctl.c
36108+++ b/block/compat_ioctl.c
36109@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36110 cgc = compat_alloc_user_space(sizeof(*cgc));
36111 cgc32 = compat_ptr(arg);
36112
36113- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36114+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36115 get_user(data, &cgc32->buffer) ||
36116 put_user(compat_ptr(data), &cgc->buffer) ||
36117 copy_in_user(&cgc->buflen, &cgc32->buflen,
36118@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36119 err |= __get_user(f->spec1, &uf->spec1);
36120 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36121 err |= __get_user(name, &uf->name);
36122- f->name = compat_ptr(name);
36123+ f->name = (void __force_kernel *)compat_ptr(name);
36124 if (err) {
36125 err = -EFAULT;
36126 goto out;
36127diff --git a/block/genhd.c b/block/genhd.c
36128index 0a536dc..b8f7aca 100644
36129--- a/block/genhd.c
36130+++ b/block/genhd.c
36131@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36132
36133 /*
36134 * Register device numbers dev..(dev+range-1)
36135- * range must be nonzero
36136+ * Noop if @range is zero.
36137 * The hash chain is sorted on range, so that subranges can override.
36138 */
36139 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36140 struct kobject *(*probe)(dev_t, int *, void *),
36141 int (*lock)(dev_t, void *), void *data)
36142 {
36143- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36144+ if (range)
36145+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36146 }
36147
36148 EXPORT_SYMBOL(blk_register_region);
36149
36150+/* undo blk_register_region(), noop if @range is zero */
36151 void blk_unregister_region(dev_t devt, unsigned long range)
36152 {
36153- kobj_unmap(bdev_map, devt, range);
36154+ if (range)
36155+ kobj_unmap(bdev_map, devt, range);
36156 }
36157
36158 EXPORT_SYMBOL(blk_unregister_region);
36159diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36160index 56d08fd..2e07090 100644
36161--- a/block/partitions/efi.c
36162+++ b/block/partitions/efi.c
36163@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36164 if (!gpt)
36165 return NULL;
36166
36167+ if (!le32_to_cpu(gpt->num_partition_entries))
36168+ return NULL;
36169+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36170+ if (!pte)
36171+ return NULL;
36172+
36173 count = le32_to_cpu(gpt->num_partition_entries) *
36174 le32_to_cpu(gpt->sizeof_partition_entry);
36175- if (!count)
36176- return NULL;
36177- pte = kmalloc(count, GFP_KERNEL);
36178- if (!pte)
36179- return NULL;
36180-
36181 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36182 (u8 *) pte, count) < count) {
36183 kfree(pte);
36184diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36185index 28163fa..07190a06 100644
36186--- a/block/scsi_ioctl.c
36187+++ b/block/scsi_ioctl.c
36188@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36189 return put_user(0, p);
36190 }
36191
36192-static int sg_get_timeout(struct request_queue *q)
36193+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36194 {
36195 return jiffies_to_clock_t(q->sg_timeout);
36196 }
36197@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36198 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36199 struct sg_io_hdr *hdr, fmode_t mode)
36200 {
36201- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36202+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36203+ unsigned char *cmdptr;
36204+
36205+ if (rq->cmd != rq->__cmd)
36206+ cmdptr = rq->cmd;
36207+ else
36208+ cmdptr = tmpcmd;
36209+
36210+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36211 return -EFAULT;
36212+
36213+ if (cmdptr != rq->cmd)
36214+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36215+
36216 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36217 return -EPERM;
36218
36219@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36220 int err;
36221 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36222 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36223+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36224+ unsigned char *cmdptr;
36225
36226 if (!sic)
36227 return -EINVAL;
36228@@ -469,9 +483,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36229 */
36230 err = -EFAULT;
36231 rq->cmd_len = cmdlen;
36232- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36233+
36234+ if (rq->cmd != rq->__cmd)
36235+ cmdptr = rq->cmd;
36236+ else
36237+ cmdptr = tmpcmd;
36238+
36239+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36240 goto error;
36241
36242+ if (rq->cmd != cmdptr)
36243+ memcpy(rq->cmd, cmdptr, cmdlen);
36244+
36245 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36246 goto error;
36247
36248diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36249index 650afac1..f3307de 100644
36250--- a/crypto/cryptd.c
36251+++ b/crypto/cryptd.c
36252@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36253
36254 struct cryptd_blkcipher_request_ctx {
36255 crypto_completion_t complete;
36256-};
36257+} __no_const;
36258
36259 struct cryptd_hash_ctx {
36260 struct crypto_shash *child;
36261@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36262
36263 struct cryptd_aead_request_ctx {
36264 crypto_completion_t complete;
36265-};
36266+} __no_const;
36267
36268 static void cryptd_queue_worker(struct work_struct *work);
36269
36270diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36271index c305d41..a96de79 100644
36272--- a/crypto/pcrypt.c
36273+++ b/crypto/pcrypt.c
36274@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36275 int ret;
36276
36277 pinst->kobj.kset = pcrypt_kset;
36278- ret = kobject_add(&pinst->kobj, NULL, name);
36279+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36280 if (!ret)
36281 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36282
36283diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36284index 6921c7f..78e1af7 100644
36285--- a/drivers/acpi/acpica/hwxfsleep.c
36286+++ b/drivers/acpi/acpica/hwxfsleep.c
36287@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36288 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36289
36290 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36291- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36292- acpi_hw_extended_sleep},
36293- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36294- acpi_hw_extended_wake_prep},
36295- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36296+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36297+ .extended_function = acpi_hw_extended_sleep},
36298+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36299+ .extended_function = acpi_hw_extended_wake_prep},
36300+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36301+ .extended_function = acpi_hw_extended_wake}
36302 };
36303
36304 /*
36305diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36306index 16129c7..8b675cd 100644
36307--- a/drivers/acpi/apei/apei-internal.h
36308+++ b/drivers/acpi/apei/apei-internal.h
36309@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36310 struct apei_exec_ins_type {
36311 u32 flags;
36312 apei_exec_ins_func_t run;
36313-};
36314+} __do_const;
36315
36316 struct apei_exec_context {
36317 u32 ip;
36318diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36319index e82d097..0c855c1 100644
36320--- a/drivers/acpi/apei/ghes.c
36321+++ b/drivers/acpi/apei/ghes.c
36322@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36323 const struct acpi_hest_generic *generic,
36324 const struct acpi_hest_generic_status *estatus)
36325 {
36326- static atomic_t seqno;
36327+ static atomic_unchecked_t seqno;
36328 unsigned int curr_seqno;
36329 char pfx_seq[64];
36330
36331@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36332 else
36333 pfx = KERN_ERR;
36334 }
36335- curr_seqno = atomic_inc_return(&seqno);
36336+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36337 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36338 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36339 pfx_seq, generic->header.source_id);
36340diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36341index a83e3c6..c3d617f 100644
36342--- a/drivers/acpi/bgrt.c
36343+++ b/drivers/acpi/bgrt.c
36344@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36345 if (!bgrt_image)
36346 return -ENODEV;
36347
36348- bin_attr_image.private = bgrt_image;
36349- bin_attr_image.size = bgrt_image_size;
36350+ pax_open_kernel();
36351+ *(void **)&bin_attr_image.private = bgrt_image;
36352+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36353+ pax_close_kernel();
36354
36355 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36356 if (!bgrt_kobj)
36357diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36358index 9b693d5..8953d54 100644
36359--- a/drivers/acpi/blacklist.c
36360+++ b/drivers/acpi/blacklist.c
36361@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36362 u32 is_critical_error;
36363 };
36364
36365-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36366+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36367
36368 /*
36369 * POLICY: If *anything* doesn't work, put it on the blacklist.
36370@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36371 return 0;
36372 }
36373
36374-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36375+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36376 {
36377 .callback = dmi_disable_osi_vista,
36378 .ident = "Fujitsu Siemens",
36379diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36380index c68e724..e863008 100644
36381--- a/drivers/acpi/custom_method.c
36382+++ b/drivers/acpi/custom_method.c
36383@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36384 struct acpi_table_header table;
36385 acpi_status status;
36386
36387+#ifdef CONFIG_GRKERNSEC_KMEM
36388+ return -EPERM;
36389+#endif
36390+
36391 if (!(*ppos)) {
36392 /* parse the table header to get the table length */
36393 if (count <= sizeof(struct acpi_table_header))
36394diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36395index c0d44d3..5ad8f9a 100644
36396--- a/drivers/acpi/device_pm.c
36397+++ b/drivers/acpi/device_pm.c
36398@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36399
36400 #endif /* CONFIG_PM_SLEEP */
36401
36402+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36403+
36404 static struct dev_pm_domain acpi_general_pm_domain = {
36405 .ops = {
36406 #ifdef CONFIG_PM
36407@@ -1043,6 +1045,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36408 #endif
36409 #endif
36410 },
36411+ .detach = acpi_dev_pm_detach
36412 };
36413
36414 /**
36415@@ -1112,7 +1115,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36416 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36417 }
36418
36419- dev->pm_domain->detach = acpi_dev_pm_detach;
36420 return 0;
36421 }
36422 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36423diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36424index 87b704e..2d1d0c1 100644
36425--- a/drivers/acpi/processor_idle.c
36426+++ b/drivers/acpi/processor_idle.c
36427@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36428 {
36429 int i, count = CPUIDLE_DRIVER_STATE_START;
36430 struct acpi_processor_cx *cx;
36431- struct cpuidle_state *state;
36432+ cpuidle_state_no_const *state;
36433 struct cpuidle_driver *drv = &acpi_idle_driver;
36434
36435 if (!pr->flags.power_setup_done)
36436diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36437index 13e577c..cef11ee 100644
36438--- a/drivers/acpi/sysfs.c
36439+++ b/drivers/acpi/sysfs.c
36440@@ -423,11 +423,11 @@ static u32 num_counters;
36441 static struct attribute **all_attrs;
36442 static u32 acpi_gpe_count;
36443
36444-static struct attribute_group interrupt_stats_attr_group = {
36445+static attribute_group_no_const interrupt_stats_attr_group = {
36446 .name = "interrupts",
36447 };
36448
36449-static struct kobj_attribute *counter_attrs;
36450+static kobj_attribute_no_const *counter_attrs;
36451
36452 static void delete_gpe_attr_array(void)
36453 {
36454diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36455index 61a9c07..ea98fa1 100644
36456--- a/drivers/ata/libahci.c
36457+++ b/drivers/ata/libahci.c
36458@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36459 }
36460 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36461
36462-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36463+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36464 struct ata_taskfile *tf, int is_cmd, u16 flags,
36465 unsigned long timeout_msec)
36466 {
36467diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36468index d1a05f9..eb70e10 100644
36469--- a/drivers/ata/libata-core.c
36470+++ b/drivers/ata/libata-core.c
36471@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36472 static void ata_dev_xfermask(struct ata_device *dev);
36473 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36474
36475-atomic_t ata_print_id = ATOMIC_INIT(0);
36476+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36477
36478 struct ata_force_param {
36479 const char *name;
36480@@ -4831,7 +4831,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36481 struct ata_port *ap;
36482 unsigned int tag;
36483
36484- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36485+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36486 ap = qc->ap;
36487
36488 qc->flags = 0;
36489@@ -4847,7 +4847,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36490 struct ata_port *ap;
36491 struct ata_link *link;
36492
36493- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36494+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36495 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36496 ap = qc->ap;
36497 link = qc->dev->link;
36498@@ -5951,6 +5951,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36499 return;
36500
36501 spin_lock(&lock);
36502+ pax_open_kernel();
36503
36504 for (cur = ops->inherits; cur; cur = cur->inherits) {
36505 void **inherit = (void **)cur;
36506@@ -5964,8 +5965,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36507 if (IS_ERR(*pp))
36508 *pp = NULL;
36509
36510- ops->inherits = NULL;
36511+ *(struct ata_port_operations **)&ops->inherits = NULL;
36512
36513+ pax_close_kernel();
36514 spin_unlock(&lock);
36515 }
36516
36517@@ -6161,7 +6163,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36518
36519 /* give ports names and add SCSI hosts */
36520 for (i = 0; i < host->n_ports; i++) {
36521- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36522+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36523 host->ports[i]->local_port_no = i + 1;
36524 }
36525
36526diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36527index 6abd17a..9961bf7 100644
36528--- a/drivers/ata/libata-scsi.c
36529+++ b/drivers/ata/libata-scsi.c
36530@@ -4169,7 +4169,7 @@ int ata_sas_port_init(struct ata_port *ap)
36531
36532 if (rc)
36533 return rc;
36534- ap->print_id = atomic_inc_return(&ata_print_id);
36535+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36536 return 0;
36537 }
36538 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36539diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36540index 5f4e0cc..ff2c347 100644
36541--- a/drivers/ata/libata.h
36542+++ b/drivers/ata/libata.h
36543@@ -53,7 +53,7 @@ enum {
36544 ATA_DNXFER_QUIET = (1 << 31),
36545 };
36546
36547-extern atomic_t ata_print_id;
36548+extern atomic_unchecked_t ata_print_id;
36549 extern int atapi_passthru16;
36550 extern int libata_fua;
36551 extern int libata_noacpi;
36552diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36553index a9b0c82..207d97d 100644
36554--- a/drivers/ata/pata_arasan_cf.c
36555+++ b/drivers/ata/pata_arasan_cf.c
36556@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36557 /* Handle platform specific quirks */
36558 if (quirk) {
36559 if (quirk & CF_BROKEN_PIO) {
36560- ap->ops->set_piomode = NULL;
36561+ pax_open_kernel();
36562+ *(void **)&ap->ops->set_piomode = NULL;
36563+ pax_close_kernel();
36564 ap->pio_mask = 0;
36565 }
36566 if (quirk & CF_BROKEN_MWDMA)
36567diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36568index f9b983a..887b9d8 100644
36569--- a/drivers/atm/adummy.c
36570+++ b/drivers/atm/adummy.c
36571@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36572 vcc->pop(vcc, skb);
36573 else
36574 dev_kfree_skb_any(skb);
36575- atomic_inc(&vcc->stats->tx);
36576+ atomic_inc_unchecked(&vcc->stats->tx);
36577
36578 return 0;
36579 }
36580diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36581index f1a9198..f466a4a 100644
36582--- a/drivers/atm/ambassador.c
36583+++ b/drivers/atm/ambassador.c
36584@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36585 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36586
36587 // VC layer stats
36588- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36589+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36590
36591 // free the descriptor
36592 kfree (tx_descr);
36593@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36594 dump_skb ("<<<", vc, skb);
36595
36596 // VC layer stats
36597- atomic_inc(&atm_vcc->stats->rx);
36598+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36599 __net_timestamp(skb);
36600 // end of our responsibility
36601 atm_vcc->push (atm_vcc, skb);
36602@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36603 } else {
36604 PRINTK (KERN_INFO, "dropped over-size frame");
36605 // should we count this?
36606- atomic_inc(&atm_vcc->stats->rx_drop);
36607+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36608 }
36609
36610 } else {
36611@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36612 }
36613
36614 if (check_area (skb->data, skb->len)) {
36615- atomic_inc(&atm_vcc->stats->tx_err);
36616+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36617 return -ENOMEM; // ?
36618 }
36619
36620diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36621index 480fa6f..947067c 100644
36622--- a/drivers/atm/atmtcp.c
36623+++ b/drivers/atm/atmtcp.c
36624@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36625 if (vcc->pop) vcc->pop(vcc,skb);
36626 else dev_kfree_skb(skb);
36627 if (dev_data) return 0;
36628- atomic_inc(&vcc->stats->tx_err);
36629+ atomic_inc_unchecked(&vcc->stats->tx_err);
36630 return -ENOLINK;
36631 }
36632 size = skb->len+sizeof(struct atmtcp_hdr);
36633@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36634 if (!new_skb) {
36635 if (vcc->pop) vcc->pop(vcc,skb);
36636 else dev_kfree_skb(skb);
36637- atomic_inc(&vcc->stats->tx_err);
36638+ atomic_inc_unchecked(&vcc->stats->tx_err);
36639 return -ENOBUFS;
36640 }
36641 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36642@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36643 if (vcc->pop) vcc->pop(vcc,skb);
36644 else dev_kfree_skb(skb);
36645 out_vcc->push(out_vcc,new_skb);
36646- atomic_inc(&vcc->stats->tx);
36647- atomic_inc(&out_vcc->stats->rx);
36648+ atomic_inc_unchecked(&vcc->stats->tx);
36649+ atomic_inc_unchecked(&out_vcc->stats->rx);
36650 return 0;
36651 }
36652
36653@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36654 read_unlock(&vcc_sklist_lock);
36655 if (!out_vcc) {
36656 result = -EUNATCH;
36657- atomic_inc(&vcc->stats->tx_err);
36658+ atomic_inc_unchecked(&vcc->stats->tx_err);
36659 goto done;
36660 }
36661 skb_pull(skb,sizeof(struct atmtcp_hdr));
36662@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36663 __net_timestamp(new_skb);
36664 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36665 out_vcc->push(out_vcc,new_skb);
36666- atomic_inc(&vcc->stats->tx);
36667- atomic_inc(&out_vcc->stats->rx);
36668+ atomic_inc_unchecked(&vcc->stats->tx);
36669+ atomic_inc_unchecked(&out_vcc->stats->rx);
36670 done:
36671 if (vcc->pop) vcc->pop(vcc,skb);
36672 else dev_kfree_skb(skb);
36673diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36674index c7fab3e..68d0965 100644
36675--- a/drivers/atm/eni.c
36676+++ b/drivers/atm/eni.c
36677@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36678 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36679 vcc->dev->number);
36680 length = 0;
36681- atomic_inc(&vcc->stats->rx_err);
36682+ atomic_inc_unchecked(&vcc->stats->rx_err);
36683 }
36684 else {
36685 length = ATM_CELL_SIZE-1; /* no HEC */
36686@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36687 size);
36688 }
36689 eff = length = 0;
36690- atomic_inc(&vcc->stats->rx_err);
36691+ atomic_inc_unchecked(&vcc->stats->rx_err);
36692 }
36693 else {
36694 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36695@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36696 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36697 vcc->dev->number,vcc->vci,length,size << 2,descr);
36698 length = eff = 0;
36699- atomic_inc(&vcc->stats->rx_err);
36700+ atomic_inc_unchecked(&vcc->stats->rx_err);
36701 }
36702 }
36703 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36704@@ -770,7 +770,7 @@ rx_dequeued++;
36705 vcc->push(vcc,skb);
36706 pushed++;
36707 }
36708- atomic_inc(&vcc->stats->rx);
36709+ atomic_inc_unchecked(&vcc->stats->rx);
36710 }
36711 wake_up(&eni_dev->rx_wait);
36712 }
36713@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
36714 PCI_DMA_TODEVICE);
36715 if (vcc->pop) vcc->pop(vcc,skb);
36716 else dev_kfree_skb_irq(skb);
36717- atomic_inc(&vcc->stats->tx);
36718+ atomic_inc_unchecked(&vcc->stats->tx);
36719 wake_up(&eni_dev->tx_wait);
36720 dma_complete++;
36721 }
36722diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36723index 82f2ae0..f205c02 100644
36724--- a/drivers/atm/firestream.c
36725+++ b/drivers/atm/firestream.c
36726@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36727 }
36728 }
36729
36730- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36731+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36732
36733 fs_dprintk (FS_DEBUG_TXMEM, "i");
36734 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36735@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36736 #endif
36737 skb_put (skb, qe->p1 & 0xffff);
36738 ATM_SKB(skb)->vcc = atm_vcc;
36739- atomic_inc(&atm_vcc->stats->rx);
36740+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36741 __net_timestamp(skb);
36742 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36743 atm_vcc->push (atm_vcc, skb);
36744@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36745 kfree (pe);
36746 }
36747 if (atm_vcc)
36748- atomic_inc(&atm_vcc->stats->rx_drop);
36749+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36750 break;
36751 case 0x1f: /* Reassembly abort: no buffers. */
36752 /* Silently increment error counter. */
36753 if (atm_vcc)
36754- atomic_inc(&atm_vcc->stats->rx_drop);
36755+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36756 break;
36757 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
36758 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
36759diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
36760index d5d9eaf..65c0d53 100644
36761--- a/drivers/atm/fore200e.c
36762+++ b/drivers/atm/fore200e.c
36763@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
36764 #endif
36765 /* check error condition */
36766 if (*entry->status & STATUS_ERROR)
36767- atomic_inc(&vcc->stats->tx_err);
36768+ atomic_inc_unchecked(&vcc->stats->tx_err);
36769 else
36770- atomic_inc(&vcc->stats->tx);
36771+ atomic_inc_unchecked(&vcc->stats->tx);
36772 }
36773 }
36774
36775@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36776 if (skb == NULL) {
36777 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
36778
36779- atomic_inc(&vcc->stats->rx_drop);
36780+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36781 return -ENOMEM;
36782 }
36783
36784@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36785
36786 dev_kfree_skb_any(skb);
36787
36788- atomic_inc(&vcc->stats->rx_drop);
36789+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36790 return -ENOMEM;
36791 }
36792
36793 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36794
36795 vcc->push(vcc, skb);
36796- atomic_inc(&vcc->stats->rx);
36797+ atomic_inc_unchecked(&vcc->stats->rx);
36798
36799 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36800
36801@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
36802 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
36803 fore200e->atm_dev->number,
36804 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
36805- atomic_inc(&vcc->stats->rx_err);
36806+ atomic_inc_unchecked(&vcc->stats->rx_err);
36807 }
36808 }
36809
36810@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
36811 goto retry_here;
36812 }
36813
36814- atomic_inc(&vcc->stats->tx_err);
36815+ atomic_inc_unchecked(&vcc->stats->tx_err);
36816
36817 fore200e->tx_sat++;
36818 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
36819diff --git a/drivers/atm/he.c b/drivers/atm/he.c
36820index c39702b..785b73b 100644
36821--- a/drivers/atm/he.c
36822+++ b/drivers/atm/he.c
36823@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36824
36825 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
36826 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
36827- atomic_inc(&vcc->stats->rx_drop);
36828+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36829 goto return_host_buffers;
36830 }
36831
36832@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36833 RBRQ_LEN_ERR(he_dev->rbrq_head)
36834 ? "LEN_ERR" : "",
36835 vcc->vpi, vcc->vci);
36836- atomic_inc(&vcc->stats->rx_err);
36837+ atomic_inc_unchecked(&vcc->stats->rx_err);
36838 goto return_host_buffers;
36839 }
36840
36841@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36842 vcc->push(vcc, skb);
36843 spin_lock(&he_dev->global_lock);
36844
36845- atomic_inc(&vcc->stats->rx);
36846+ atomic_inc_unchecked(&vcc->stats->rx);
36847
36848 return_host_buffers:
36849 ++pdus_assembled;
36850@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
36851 tpd->vcc->pop(tpd->vcc, tpd->skb);
36852 else
36853 dev_kfree_skb_any(tpd->skb);
36854- atomic_inc(&tpd->vcc->stats->tx_err);
36855+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
36856 }
36857 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
36858 return;
36859@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36860 vcc->pop(vcc, skb);
36861 else
36862 dev_kfree_skb_any(skb);
36863- atomic_inc(&vcc->stats->tx_err);
36864+ atomic_inc_unchecked(&vcc->stats->tx_err);
36865 return -EINVAL;
36866 }
36867
36868@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36869 vcc->pop(vcc, skb);
36870 else
36871 dev_kfree_skb_any(skb);
36872- atomic_inc(&vcc->stats->tx_err);
36873+ atomic_inc_unchecked(&vcc->stats->tx_err);
36874 return -EINVAL;
36875 }
36876 #endif
36877@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36878 vcc->pop(vcc, skb);
36879 else
36880 dev_kfree_skb_any(skb);
36881- atomic_inc(&vcc->stats->tx_err);
36882+ atomic_inc_unchecked(&vcc->stats->tx_err);
36883 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36884 return -ENOMEM;
36885 }
36886@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36887 vcc->pop(vcc, skb);
36888 else
36889 dev_kfree_skb_any(skb);
36890- atomic_inc(&vcc->stats->tx_err);
36891+ atomic_inc_unchecked(&vcc->stats->tx_err);
36892 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36893 return -ENOMEM;
36894 }
36895@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36896 __enqueue_tpd(he_dev, tpd, cid);
36897 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36898
36899- atomic_inc(&vcc->stats->tx);
36900+ atomic_inc_unchecked(&vcc->stats->tx);
36901
36902 return 0;
36903 }
36904diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
36905index 1dc0519..1aadaf7 100644
36906--- a/drivers/atm/horizon.c
36907+++ b/drivers/atm/horizon.c
36908@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
36909 {
36910 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
36911 // VC layer stats
36912- atomic_inc(&vcc->stats->rx);
36913+ atomic_inc_unchecked(&vcc->stats->rx);
36914 __net_timestamp(skb);
36915 // end of our responsibility
36916 vcc->push (vcc, skb);
36917@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
36918 dev->tx_iovec = NULL;
36919
36920 // VC layer stats
36921- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36922+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36923
36924 // free the skb
36925 hrz_kfree_skb (skb);
36926diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
36927index 2b24ed0..b3d6acc 100644
36928--- a/drivers/atm/idt77252.c
36929+++ b/drivers/atm/idt77252.c
36930@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
36931 else
36932 dev_kfree_skb(skb);
36933
36934- atomic_inc(&vcc->stats->tx);
36935+ atomic_inc_unchecked(&vcc->stats->tx);
36936 }
36937
36938 atomic_dec(&scq->used);
36939@@ -1072,13 +1072,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36940 if ((sb = dev_alloc_skb(64)) == NULL) {
36941 printk("%s: Can't allocate buffers for aal0.\n",
36942 card->name);
36943- atomic_add(i, &vcc->stats->rx_drop);
36944+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
36945 break;
36946 }
36947 if (!atm_charge(vcc, sb->truesize)) {
36948 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
36949 card->name);
36950- atomic_add(i - 1, &vcc->stats->rx_drop);
36951+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
36952 dev_kfree_skb(sb);
36953 break;
36954 }
36955@@ -1095,7 +1095,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36956 ATM_SKB(sb)->vcc = vcc;
36957 __net_timestamp(sb);
36958 vcc->push(vcc, sb);
36959- atomic_inc(&vcc->stats->rx);
36960+ atomic_inc_unchecked(&vcc->stats->rx);
36961
36962 cell += ATM_CELL_PAYLOAD;
36963 }
36964@@ -1132,13 +1132,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36965 "(CDC: %08x)\n",
36966 card->name, len, rpp->len, readl(SAR_REG_CDC));
36967 recycle_rx_pool_skb(card, rpp);
36968- atomic_inc(&vcc->stats->rx_err);
36969+ atomic_inc_unchecked(&vcc->stats->rx_err);
36970 return;
36971 }
36972 if (stat & SAR_RSQE_CRC) {
36973 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
36974 recycle_rx_pool_skb(card, rpp);
36975- atomic_inc(&vcc->stats->rx_err);
36976+ atomic_inc_unchecked(&vcc->stats->rx_err);
36977 return;
36978 }
36979 if (skb_queue_len(&rpp->queue) > 1) {
36980@@ -1149,7 +1149,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36981 RXPRINTK("%s: Can't alloc RX skb.\n",
36982 card->name);
36983 recycle_rx_pool_skb(card, rpp);
36984- atomic_inc(&vcc->stats->rx_err);
36985+ atomic_inc_unchecked(&vcc->stats->rx_err);
36986 return;
36987 }
36988 if (!atm_charge(vcc, skb->truesize)) {
36989@@ -1168,7 +1168,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36990 __net_timestamp(skb);
36991
36992 vcc->push(vcc, skb);
36993- atomic_inc(&vcc->stats->rx);
36994+ atomic_inc_unchecked(&vcc->stats->rx);
36995
36996 return;
36997 }
36998@@ -1190,7 +1190,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36999 __net_timestamp(skb);
37000
37001 vcc->push(vcc, skb);
37002- atomic_inc(&vcc->stats->rx);
37003+ atomic_inc_unchecked(&vcc->stats->rx);
37004
37005 if (skb->truesize > SAR_FB_SIZE_3)
37006 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37007@@ -1301,14 +1301,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37008 if (vcc->qos.aal != ATM_AAL0) {
37009 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37010 card->name, vpi, vci);
37011- atomic_inc(&vcc->stats->rx_drop);
37012+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37013 goto drop;
37014 }
37015
37016 if ((sb = dev_alloc_skb(64)) == NULL) {
37017 printk("%s: Can't allocate buffers for AAL0.\n",
37018 card->name);
37019- atomic_inc(&vcc->stats->rx_err);
37020+ atomic_inc_unchecked(&vcc->stats->rx_err);
37021 goto drop;
37022 }
37023
37024@@ -1327,7 +1327,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37025 ATM_SKB(sb)->vcc = vcc;
37026 __net_timestamp(sb);
37027 vcc->push(vcc, sb);
37028- atomic_inc(&vcc->stats->rx);
37029+ atomic_inc_unchecked(&vcc->stats->rx);
37030
37031 drop:
37032 skb_pull(queue, 64);
37033@@ -1952,13 +1952,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37034
37035 if (vc == NULL) {
37036 printk("%s: NULL connection in send().\n", card->name);
37037- atomic_inc(&vcc->stats->tx_err);
37038+ atomic_inc_unchecked(&vcc->stats->tx_err);
37039 dev_kfree_skb(skb);
37040 return -EINVAL;
37041 }
37042 if (!test_bit(VCF_TX, &vc->flags)) {
37043 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37044- atomic_inc(&vcc->stats->tx_err);
37045+ atomic_inc_unchecked(&vcc->stats->tx_err);
37046 dev_kfree_skb(skb);
37047 return -EINVAL;
37048 }
37049@@ -1970,14 +1970,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37050 break;
37051 default:
37052 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37053- atomic_inc(&vcc->stats->tx_err);
37054+ atomic_inc_unchecked(&vcc->stats->tx_err);
37055 dev_kfree_skb(skb);
37056 return -EINVAL;
37057 }
37058
37059 if (skb_shinfo(skb)->nr_frags != 0) {
37060 printk("%s: No scatter-gather yet.\n", card->name);
37061- atomic_inc(&vcc->stats->tx_err);
37062+ atomic_inc_unchecked(&vcc->stats->tx_err);
37063 dev_kfree_skb(skb);
37064 return -EINVAL;
37065 }
37066@@ -1985,7 +1985,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37067
37068 err = queue_skb(card, vc, skb, oam);
37069 if (err) {
37070- atomic_inc(&vcc->stats->tx_err);
37071+ atomic_inc_unchecked(&vcc->stats->tx_err);
37072 dev_kfree_skb(skb);
37073 return err;
37074 }
37075@@ -2008,7 +2008,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37076 skb = dev_alloc_skb(64);
37077 if (!skb) {
37078 printk("%s: Out of memory in send_oam().\n", card->name);
37079- atomic_inc(&vcc->stats->tx_err);
37080+ atomic_inc_unchecked(&vcc->stats->tx_err);
37081 return -ENOMEM;
37082 }
37083 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37084diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37085index 4217f29..88f547a 100644
37086--- a/drivers/atm/iphase.c
37087+++ b/drivers/atm/iphase.c
37088@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37089 status = (u_short) (buf_desc_ptr->desc_mode);
37090 if (status & (RX_CER | RX_PTE | RX_OFL))
37091 {
37092- atomic_inc(&vcc->stats->rx_err);
37093+ atomic_inc_unchecked(&vcc->stats->rx_err);
37094 IF_ERR(printk("IA: bad packet, dropping it");)
37095 if (status & RX_CER) {
37096 IF_ERR(printk(" cause: packet CRC error\n");)
37097@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37098 len = dma_addr - buf_addr;
37099 if (len > iadev->rx_buf_sz) {
37100 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37101- atomic_inc(&vcc->stats->rx_err);
37102+ atomic_inc_unchecked(&vcc->stats->rx_err);
37103 goto out_free_desc;
37104 }
37105
37106@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37107 ia_vcc = INPH_IA_VCC(vcc);
37108 if (ia_vcc == NULL)
37109 {
37110- atomic_inc(&vcc->stats->rx_err);
37111+ atomic_inc_unchecked(&vcc->stats->rx_err);
37112 atm_return(vcc, skb->truesize);
37113 dev_kfree_skb_any(skb);
37114 goto INCR_DLE;
37115@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37116 if ((length > iadev->rx_buf_sz) || (length >
37117 (skb->len - sizeof(struct cpcs_trailer))))
37118 {
37119- atomic_inc(&vcc->stats->rx_err);
37120+ atomic_inc_unchecked(&vcc->stats->rx_err);
37121 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37122 length, skb->len);)
37123 atm_return(vcc, skb->truesize);
37124@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37125
37126 IF_RX(printk("rx_dle_intr: skb push");)
37127 vcc->push(vcc,skb);
37128- atomic_inc(&vcc->stats->rx);
37129+ atomic_inc_unchecked(&vcc->stats->rx);
37130 iadev->rx_pkt_cnt++;
37131 }
37132 INCR_DLE:
37133@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37134 {
37135 struct k_sonet_stats *stats;
37136 stats = &PRIV(_ia_dev[board])->sonet_stats;
37137- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37138- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37139- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37140- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37141- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37142- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37143- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37144- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37145- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37146+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37147+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37148+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37149+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37150+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37151+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37152+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37153+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37154+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37155 }
37156 ia_cmds.status = 0;
37157 break;
37158@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37159 if ((desc == 0) || (desc > iadev->num_tx_desc))
37160 {
37161 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37162- atomic_inc(&vcc->stats->tx);
37163+ atomic_inc_unchecked(&vcc->stats->tx);
37164 if (vcc->pop)
37165 vcc->pop(vcc, skb);
37166 else
37167@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37168 ATM_DESC(skb) = vcc->vci;
37169 skb_queue_tail(&iadev->tx_dma_q, skb);
37170
37171- atomic_inc(&vcc->stats->tx);
37172+ atomic_inc_unchecked(&vcc->stats->tx);
37173 iadev->tx_pkt_cnt++;
37174 /* Increment transaction counter */
37175 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37176
37177 #if 0
37178 /* add flow control logic */
37179- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37180+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37181 if (iavcc->vc_desc_cnt > 10) {
37182 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37183 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37184diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37185index 93eaf8d..b4ca7da 100644
37186--- a/drivers/atm/lanai.c
37187+++ b/drivers/atm/lanai.c
37188@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37189 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37190 lanai_endtx(lanai, lvcc);
37191 lanai_free_skb(lvcc->tx.atmvcc, skb);
37192- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37193+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37194 }
37195
37196 /* Try to fill the buffer - don't call unless there is backlog */
37197@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37198 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37199 __net_timestamp(skb);
37200 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37201- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37202+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37203 out:
37204 lvcc->rx.buf.ptr = end;
37205 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37206@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37207 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37208 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37209 lanai->stats.service_rxnotaal5++;
37210- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37211+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37212 return 0;
37213 }
37214 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37215@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37216 int bytes;
37217 read_unlock(&vcc_sklist_lock);
37218 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37219- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37220+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37221 lvcc->stats.x.aal5.service_trash++;
37222 bytes = (SERVICE_GET_END(s) * 16) -
37223 (((unsigned long) lvcc->rx.buf.ptr) -
37224@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37225 }
37226 if (s & SERVICE_STREAM) {
37227 read_unlock(&vcc_sklist_lock);
37228- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37229+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37230 lvcc->stats.x.aal5.service_stream++;
37231 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37232 "PDU on VCI %d!\n", lanai->number, vci);
37233@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37234 return 0;
37235 }
37236 DPRINTK("got rx crc error on vci %d\n", vci);
37237- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37238+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37239 lvcc->stats.x.aal5.service_rxcrc++;
37240 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37241 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37242diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37243index 9988ac9..7c52585 100644
37244--- a/drivers/atm/nicstar.c
37245+++ b/drivers/atm/nicstar.c
37246@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37247 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37248 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37249 card->index);
37250- atomic_inc(&vcc->stats->tx_err);
37251+ atomic_inc_unchecked(&vcc->stats->tx_err);
37252 dev_kfree_skb_any(skb);
37253 return -EINVAL;
37254 }
37255@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37256 if (!vc->tx) {
37257 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37258 card->index);
37259- atomic_inc(&vcc->stats->tx_err);
37260+ atomic_inc_unchecked(&vcc->stats->tx_err);
37261 dev_kfree_skb_any(skb);
37262 return -EINVAL;
37263 }
37264@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37265 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37266 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37267 card->index);
37268- atomic_inc(&vcc->stats->tx_err);
37269+ atomic_inc_unchecked(&vcc->stats->tx_err);
37270 dev_kfree_skb_any(skb);
37271 return -EINVAL;
37272 }
37273
37274 if (skb_shinfo(skb)->nr_frags != 0) {
37275 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37276- atomic_inc(&vcc->stats->tx_err);
37277+ atomic_inc_unchecked(&vcc->stats->tx_err);
37278 dev_kfree_skb_any(skb);
37279 return -EINVAL;
37280 }
37281@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37282 }
37283
37284 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37285- atomic_inc(&vcc->stats->tx_err);
37286+ atomic_inc_unchecked(&vcc->stats->tx_err);
37287 dev_kfree_skb_any(skb);
37288 return -EIO;
37289 }
37290- atomic_inc(&vcc->stats->tx);
37291+ atomic_inc_unchecked(&vcc->stats->tx);
37292
37293 return 0;
37294 }
37295@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37296 printk
37297 ("nicstar%d: Can't allocate buffers for aal0.\n",
37298 card->index);
37299- atomic_add(i, &vcc->stats->rx_drop);
37300+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37301 break;
37302 }
37303 if (!atm_charge(vcc, sb->truesize)) {
37304 RXPRINTK
37305 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37306 card->index);
37307- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37308+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37309 dev_kfree_skb_any(sb);
37310 break;
37311 }
37312@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37313 ATM_SKB(sb)->vcc = vcc;
37314 __net_timestamp(sb);
37315 vcc->push(vcc, sb);
37316- atomic_inc(&vcc->stats->rx);
37317+ atomic_inc_unchecked(&vcc->stats->rx);
37318 cell += ATM_CELL_PAYLOAD;
37319 }
37320
37321@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37322 if (iovb == NULL) {
37323 printk("nicstar%d: Out of iovec buffers.\n",
37324 card->index);
37325- atomic_inc(&vcc->stats->rx_drop);
37326+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37327 recycle_rx_buf(card, skb);
37328 return;
37329 }
37330@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37331 small or large buffer itself. */
37332 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37333 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37334- atomic_inc(&vcc->stats->rx_err);
37335+ atomic_inc_unchecked(&vcc->stats->rx_err);
37336 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37337 NS_MAX_IOVECS);
37338 NS_PRV_IOVCNT(iovb) = 0;
37339@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37340 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37341 card->index);
37342 which_list(card, skb);
37343- atomic_inc(&vcc->stats->rx_err);
37344+ atomic_inc_unchecked(&vcc->stats->rx_err);
37345 recycle_rx_buf(card, skb);
37346 vc->rx_iov = NULL;
37347 recycle_iov_buf(card, iovb);
37348@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37349 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37350 card->index);
37351 which_list(card, skb);
37352- atomic_inc(&vcc->stats->rx_err);
37353+ atomic_inc_unchecked(&vcc->stats->rx_err);
37354 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37355 NS_PRV_IOVCNT(iovb));
37356 vc->rx_iov = NULL;
37357@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37358 printk(" - PDU size mismatch.\n");
37359 else
37360 printk(".\n");
37361- atomic_inc(&vcc->stats->rx_err);
37362+ atomic_inc_unchecked(&vcc->stats->rx_err);
37363 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37364 NS_PRV_IOVCNT(iovb));
37365 vc->rx_iov = NULL;
37366@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37367 /* skb points to a small buffer */
37368 if (!atm_charge(vcc, skb->truesize)) {
37369 push_rxbufs(card, skb);
37370- atomic_inc(&vcc->stats->rx_drop);
37371+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37372 } else {
37373 skb_put(skb, len);
37374 dequeue_sm_buf(card, skb);
37375@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37376 ATM_SKB(skb)->vcc = vcc;
37377 __net_timestamp(skb);
37378 vcc->push(vcc, skb);
37379- atomic_inc(&vcc->stats->rx);
37380+ atomic_inc_unchecked(&vcc->stats->rx);
37381 }
37382 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37383 struct sk_buff *sb;
37384@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37385 if (len <= NS_SMBUFSIZE) {
37386 if (!atm_charge(vcc, sb->truesize)) {
37387 push_rxbufs(card, sb);
37388- atomic_inc(&vcc->stats->rx_drop);
37389+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37390 } else {
37391 skb_put(sb, len);
37392 dequeue_sm_buf(card, sb);
37393@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37394 ATM_SKB(sb)->vcc = vcc;
37395 __net_timestamp(sb);
37396 vcc->push(vcc, sb);
37397- atomic_inc(&vcc->stats->rx);
37398+ atomic_inc_unchecked(&vcc->stats->rx);
37399 }
37400
37401 push_rxbufs(card, skb);
37402@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37403
37404 if (!atm_charge(vcc, skb->truesize)) {
37405 push_rxbufs(card, skb);
37406- atomic_inc(&vcc->stats->rx_drop);
37407+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37408 } else {
37409 dequeue_lg_buf(card, skb);
37410 #ifdef NS_USE_DESTRUCTORS
37411@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37412 ATM_SKB(skb)->vcc = vcc;
37413 __net_timestamp(skb);
37414 vcc->push(vcc, skb);
37415- atomic_inc(&vcc->stats->rx);
37416+ atomic_inc_unchecked(&vcc->stats->rx);
37417 }
37418
37419 push_rxbufs(card, sb);
37420@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37421 printk
37422 ("nicstar%d: Out of huge buffers.\n",
37423 card->index);
37424- atomic_inc(&vcc->stats->rx_drop);
37425+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37426 recycle_iovec_rx_bufs(card,
37427 (struct iovec *)
37428 iovb->data,
37429@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37430 card->hbpool.count++;
37431 } else
37432 dev_kfree_skb_any(hb);
37433- atomic_inc(&vcc->stats->rx_drop);
37434+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37435 } else {
37436 /* Copy the small buffer to the huge buffer */
37437 sb = (struct sk_buff *)iov->iov_base;
37438@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37439 #endif /* NS_USE_DESTRUCTORS */
37440 __net_timestamp(hb);
37441 vcc->push(vcc, hb);
37442- atomic_inc(&vcc->stats->rx);
37443+ atomic_inc_unchecked(&vcc->stats->rx);
37444 }
37445 }
37446
37447diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37448index 21b0bc6..b5f40ba 100644
37449--- a/drivers/atm/solos-pci.c
37450+++ b/drivers/atm/solos-pci.c
37451@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37452 }
37453 atm_charge(vcc, skb->truesize);
37454 vcc->push(vcc, skb);
37455- atomic_inc(&vcc->stats->rx);
37456+ atomic_inc_unchecked(&vcc->stats->rx);
37457 break;
37458
37459 case PKT_STATUS:
37460@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37461 vcc = SKB_CB(oldskb)->vcc;
37462
37463 if (vcc) {
37464- atomic_inc(&vcc->stats->tx);
37465+ atomic_inc_unchecked(&vcc->stats->tx);
37466 solos_pop(vcc, oldskb);
37467 } else {
37468 dev_kfree_skb_irq(oldskb);
37469diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37470index 0215934..ce9f5b1 100644
37471--- a/drivers/atm/suni.c
37472+++ b/drivers/atm/suni.c
37473@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37474
37475
37476 #define ADD_LIMITED(s,v) \
37477- atomic_add((v),&stats->s); \
37478- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37479+ atomic_add_unchecked((v),&stats->s); \
37480+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37481
37482
37483 static void suni_hz(unsigned long from_timer)
37484diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37485index 5120a96..e2572bd 100644
37486--- a/drivers/atm/uPD98402.c
37487+++ b/drivers/atm/uPD98402.c
37488@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37489 struct sonet_stats tmp;
37490 int error = 0;
37491
37492- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37493+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37494 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37495 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37496 if (zero && !error) {
37497@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37498
37499
37500 #define ADD_LIMITED(s,v) \
37501- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37502- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37503- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37504+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37505+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37506+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37507
37508
37509 static void stat_event(struct atm_dev *dev)
37510@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37511 if (reason & uPD98402_INT_PFM) stat_event(dev);
37512 if (reason & uPD98402_INT_PCO) {
37513 (void) GET(PCOCR); /* clear interrupt cause */
37514- atomic_add(GET(HECCT),
37515+ atomic_add_unchecked(GET(HECCT),
37516 &PRIV(dev)->sonet_stats.uncorr_hcs);
37517 }
37518 if ((reason & uPD98402_INT_RFO) &&
37519@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37520 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37521 uPD98402_INT_LOS),PIMR); /* enable them */
37522 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37523- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37524- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37525- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37526+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37527+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37528+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37529 return 0;
37530 }
37531
37532diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37533index 969c3c2..9b72956 100644
37534--- a/drivers/atm/zatm.c
37535+++ b/drivers/atm/zatm.c
37536@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37537 }
37538 if (!size) {
37539 dev_kfree_skb_irq(skb);
37540- if (vcc) atomic_inc(&vcc->stats->rx_err);
37541+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37542 continue;
37543 }
37544 if (!atm_charge(vcc,skb->truesize)) {
37545@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37546 skb->len = size;
37547 ATM_SKB(skb)->vcc = vcc;
37548 vcc->push(vcc,skb);
37549- atomic_inc(&vcc->stats->rx);
37550+ atomic_inc_unchecked(&vcc->stats->rx);
37551 }
37552 zout(pos & 0xffff,MTA(mbx));
37553 #if 0 /* probably a stupid idea */
37554@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37555 skb_queue_head(&zatm_vcc->backlog,skb);
37556 break;
37557 }
37558- atomic_inc(&vcc->stats->tx);
37559+ atomic_inc_unchecked(&vcc->stats->tx);
37560 wake_up(&zatm_vcc->tx_wait);
37561 }
37562
37563diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37564index 876bae5..8978785 100644
37565--- a/drivers/base/bus.c
37566+++ b/drivers/base/bus.c
37567@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37568 return -EINVAL;
37569
37570 mutex_lock(&subsys->p->mutex);
37571- list_add_tail(&sif->node, &subsys->p->interfaces);
37572+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37573 if (sif->add_dev) {
37574 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37575 while ((dev = subsys_dev_iter_next(&iter)))
37576@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37577 subsys = sif->subsys;
37578
37579 mutex_lock(&subsys->p->mutex);
37580- list_del_init(&sif->node);
37581+ pax_list_del_init((struct list_head *)&sif->node);
37582 if (sif->remove_dev) {
37583 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37584 while ((dev = subsys_dev_iter_next(&iter)))
37585diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37586index 25798db..15f130e 100644
37587--- a/drivers/base/devtmpfs.c
37588+++ b/drivers/base/devtmpfs.c
37589@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37590 if (!thread)
37591 return 0;
37592
37593- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37594+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37595 if (err)
37596 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37597 else
37598@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37599 *err = sys_unshare(CLONE_NEWNS);
37600 if (*err)
37601 goto out;
37602- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37603+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37604 if (*err)
37605 goto out;
37606- sys_chdir("/.."); /* will traverse into overmounted root */
37607- sys_chroot(".");
37608+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37609+ sys_chroot((char __force_user *)".");
37610 complete(&setup_done);
37611 while (1) {
37612 spin_lock(&req_lock);
37613diff --git a/drivers/base/node.c b/drivers/base/node.c
37614index a3b82e9..f90a8ce 100644
37615--- a/drivers/base/node.c
37616+++ b/drivers/base/node.c
37617@@ -614,7 +614,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37618 struct node_attr {
37619 struct device_attribute attr;
37620 enum node_states state;
37621-};
37622+} __do_const;
37623
37624 static ssize_t show_node_state(struct device *dev,
37625 struct device_attribute *attr, char *buf)
37626diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37627index 0d8780c..0b5df3f 100644
37628--- a/drivers/base/power/domain.c
37629+++ b/drivers/base/power/domain.c
37630@@ -1725,7 +1725,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37631 {
37632 struct cpuidle_driver *cpuidle_drv;
37633 struct gpd_cpuidle_data *cpuidle_data;
37634- struct cpuidle_state *idle_state;
37635+ cpuidle_state_no_const *idle_state;
37636 int ret = 0;
37637
37638 if (IS_ERR_OR_NULL(genpd) || state < 0)
37639@@ -1793,7 +1793,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37640 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37641 {
37642 struct gpd_cpuidle_data *cpuidle_data;
37643- struct cpuidle_state *idle_state;
37644+ cpuidle_state_no_const *idle_state;
37645 int ret = 0;
37646
37647 if (IS_ERR_OR_NULL(genpd))
37648@@ -2222,7 +2222,10 @@ int genpd_dev_pm_attach(struct device *dev)
37649 return ret;
37650 }
37651
37652- dev->pm_domain->detach = genpd_dev_pm_detach;
37653+ pax_open_kernel();
37654+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37655+ pax_close_kernel();
37656+
37657 pm_genpd_poweron(pd);
37658
37659 return 0;
37660diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37661index d2be3f9..0a3167a 100644
37662--- a/drivers/base/power/sysfs.c
37663+++ b/drivers/base/power/sysfs.c
37664@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37665 return -EIO;
37666 }
37667 }
37668- return sprintf(buf, p);
37669+ return sprintf(buf, "%s", p);
37670 }
37671
37672 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37673diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37674index c2744b3..08fac19 100644
37675--- a/drivers/base/power/wakeup.c
37676+++ b/drivers/base/power/wakeup.c
37677@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37678 * They need to be modified together atomically, so it's better to use one
37679 * atomic variable to hold them both.
37680 */
37681-static atomic_t combined_event_count = ATOMIC_INIT(0);
37682+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37683
37684 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37685 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37686
37687 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37688 {
37689- unsigned int comb = atomic_read(&combined_event_count);
37690+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37691
37692 *cnt = (comb >> IN_PROGRESS_BITS);
37693 *inpr = comb & MAX_IN_PROGRESS;
37694@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37695 ws->start_prevent_time = ws->last_time;
37696
37697 /* Increment the counter of events in progress. */
37698- cec = atomic_inc_return(&combined_event_count);
37699+ cec = atomic_inc_return_unchecked(&combined_event_count);
37700
37701 trace_wakeup_source_activate(ws->name, cec);
37702 }
37703@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37704 * Increment the counter of registered wakeup events and decrement the
37705 * couter of wakeup events in progress simultaneously.
37706 */
37707- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37708+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37709 trace_wakeup_source_deactivate(ws->name, cec);
37710
37711 split_counters(&cnt, &inpr);
37712diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37713index 8d98a32..61d3165 100644
37714--- a/drivers/base/syscore.c
37715+++ b/drivers/base/syscore.c
37716@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37717 void register_syscore_ops(struct syscore_ops *ops)
37718 {
37719 mutex_lock(&syscore_ops_lock);
37720- list_add_tail(&ops->node, &syscore_ops_list);
37721+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37722 mutex_unlock(&syscore_ops_lock);
37723 }
37724 EXPORT_SYMBOL_GPL(register_syscore_ops);
37725@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37726 void unregister_syscore_ops(struct syscore_ops *ops)
37727 {
37728 mutex_lock(&syscore_ops_lock);
37729- list_del(&ops->node);
37730+ pax_list_del((struct list_head *)&ops->node);
37731 mutex_unlock(&syscore_ops_lock);
37732 }
37733 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37734diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37735index ff20f19..018f1da 100644
37736--- a/drivers/block/cciss.c
37737+++ b/drivers/block/cciss.c
37738@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
37739 while (!list_empty(&h->reqQ)) {
37740 c = list_entry(h->reqQ.next, CommandList_struct, list);
37741 /* can't do anything if fifo is full */
37742- if ((h->access.fifo_full(h))) {
37743+ if ((h->access->fifo_full(h))) {
37744 dev_warn(&h->pdev->dev, "fifo full\n");
37745 break;
37746 }
37747@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
37748 h->Qdepth--;
37749
37750 /* Tell the controller execute command */
37751- h->access.submit_command(h, c);
37752+ h->access->submit_command(h, c);
37753
37754 /* Put job onto the completed Q */
37755 addQ(&h->cmpQ, c);
37756@@ -3444,17 +3444,17 @@ startio:
37757
37758 static inline unsigned long get_next_completion(ctlr_info_t *h)
37759 {
37760- return h->access.command_completed(h);
37761+ return h->access->command_completed(h);
37762 }
37763
37764 static inline int interrupt_pending(ctlr_info_t *h)
37765 {
37766- return h->access.intr_pending(h);
37767+ return h->access->intr_pending(h);
37768 }
37769
37770 static inline long interrupt_not_for_us(ctlr_info_t *h)
37771 {
37772- return ((h->access.intr_pending(h) == 0) ||
37773+ return ((h->access->intr_pending(h) == 0) ||
37774 (h->interrupts_enabled == 0));
37775 }
37776
37777@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
37778 u32 a;
37779
37780 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37781- return h->access.command_completed(h);
37782+ return h->access->command_completed(h);
37783
37784 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
37785 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
37786@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
37787 trans_support & CFGTBL_Trans_use_short_tags);
37788
37789 /* Change the access methods to the performant access methods */
37790- h->access = SA5_performant_access;
37791+ h->access = &SA5_performant_access;
37792 h->transMethod = CFGTBL_Trans_Performant;
37793
37794 return;
37795@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
37796 if (prod_index < 0)
37797 return -ENODEV;
37798 h->product_name = products[prod_index].product_name;
37799- h->access = *(products[prod_index].access);
37800+ h->access = products[prod_index].access;
37801
37802 if (cciss_board_disabled(h)) {
37803 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
37804@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
37805 }
37806
37807 /* make sure the board interrupts are off */
37808- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37809+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37810 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
37811 if (rc)
37812 goto clean2;
37813@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
37814 * fake ones to scoop up any residual completions.
37815 */
37816 spin_lock_irqsave(&h->lock, flags);
37817- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37818+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37819 spin_unlock_irqrestore(&h->lock, flags);
37820 free_irq(h->intr[h->intr_mode], h);
37821 rc = cciss_request_irq(h, cciss_msix_discard_completions,
37822@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
37823 dev_info(&h->pdev->dev, "Board READY.\n");
37824 dev_info(&h->pdev->dev,
37825 "Waiting for stale completions to drain.\n");
37826- h->access.set_intr_mask(h, CCISS_INTR_ON);
37827+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37828 msleep(10000);
37829- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37830+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37831
37832 rc = controller_reset_failed(h->cfgtable);
37833 if (rc)
37834@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
37835 cciss_scsi_setup(h);
37836
37837 /* Turn the interrupts on so we can service requests */
37838- h->access.set_intr_mask(h, CCISS_INTR_ON);
37839+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37840
37841 /* Get the firmware version */
37842 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
37843@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
37844 kfree(flush_buf);
37845 if (return_code != IO_OK)
37846 dev_warn(&h->pdev->dev, "Error flushing cache\n");
37847- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37848+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37849 free_irq(h->intr[h->intr_mode], h);
37850 }
37851
37852diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
37853index 7fda30e..2f27946 100644
37854--- a/drivers/block/cciss.h
37855+++ b/drivers/block/cciss.h
37856@@ -101,7 +101,7 @@ struct ctlr_info
37857 /* information about each logical volume */
37858 drive_info_struct *drv[CISS_MAX_LUN];
37859
37860- struct access_method access;
37861+ struct access_method *access;
37862
37863 /* queue and queue Info */
37864 struct list_head reqQ;
37865@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
37866 }
37867
37868 static struct access_method SA5_access = {
37869- SA5_submit_command,
37870- SA5_intr_mask,
37871- SA5_fifo_full,
37872- SA5_intr_pending,
37873- SA5_completed,
37874+ .submit_command = SA5_submit_command,
37875+ .set_intr_mask = SA5_intr_mask,
37876+ .fifo_full = SA5_fifo_full,
37877+ .intr_pending = SA5_intr_pending,
37878+ .command_completed = SA5_completed,
37879 };
37880
37881 static struct access_method SA5B_access = {
37882- SA5_submit_command,
37883- SA5B_intr_mask,
37884- SA5_fifo_full,
37885- SA5B_intr_pending,
37886- SA5_completed,
37887+ .submit_command = SA5_submit_command,
37888+ .set_intr_mask = SA5B_intr_mask,
37889+ .fifo_full = SA5_fifo_full,
37890+ .intr_pending = SA5B_intr_pending,
37891+ .command_completed = SA5_completed,
37892 };
37893
37894 static struct access_method SA5_performant_access = {
37895- SA5_submit_command,
37896- SA5_performant_intr_mask,
37897- SA5_fifo_full,
37898- SA5_performant_intr_pending,
37899- SA5_performant_completed,
37900+ .submit_command = SA5_submit_command,
37901+ .set_intr_mask = SA5_performant_intr_mask,
37902+ .fifo_full = SA5_fifo_full,
37903+ .intr_pending = SA5_performant_intr_pending,
37904+ .command_completed = SA5_performant_completed,
37905 };
37906
37907 struct board_type {
37908diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
37909index 2b94403..fd6ad1f 100644
37910--- a/drivers/block/cpqarray.c
37911+++ b/drivers/block/cpqarray.c
37912@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37913 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
37914 goto Enomem4;
37915 }
37916- hba[i]->access.set_intr_mask(hba[i], 0);
37917+ hba[i]->access->set_intr_mask(hba[i], 0);
37918 if (request_irq(hba[i]->intr, do_ida_intr,
37919 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
37920 {
37921@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37922 add_timer(&hba[i]->timer);
37923
37924 /* Enable IRQ now that spinlock and rate limit timer are set up */
37925- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37926+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37927
37928 for(j=0; j<NWD; j++) {
37929 struct gendisk *disk = ida_gendisk[i][j];
37930@@ -694,7 +694,7 @@ DBGINFO(
37931 for(i=0; i<NR_PRODUCTS; i++) {
37932 if (board_id == products[i].board_id) {
37933 c->product_name = products[i].product_name;
37934- c->access = *(products[i].access);
37935+ c->access = products[i].access;
37936 break;
37937 }
37938 }
37939@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
37940 hba[ctlr]->intr = intr;
37941 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
37942 hba[ctlr]->product_name = products[j].product_name;
37943- hba[ctlr]->access = *(products[j].access);
37944+ hba[ctlr]->access = products[j].access;
37945 hba[ctlr]->ctlr = ctlr;
37946 hba[ctlr]->board_id = board_id;
37947 hba[ctlr]->pci_dev = NULL; /* not PCI */
37948@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
37949
37950 while((c = h->reqQ) != NULL) {
37951 /* Can't do anything if we're busy */
37952- if (h->access.fifo_full(h) == 0)
37953+ if (h->access->fifo_full(h) == 0)
37954 return;
37955
37956 /* Get the first entry from the request Q */
37957@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
37958 h->Qdepth--;
37959
37960 /* Tell the controller to do our bidding */
37961- h->access.submit_command(h, c);
37962+ h->access->submit_command(h, c);
37963
37964 /* Get onto the completion Q */
37965 addQ(&h->cmpQ, c);
37966@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37967 unsigned long flags;
37968 __u32 a,a1;
37969
37970- istat = h->access.intr_pending(h);
37971+ istat = h->access->intr_pending(h);
37972 /* Is this interrupt for us? */
37973 if (istat == 0)
37974 return IRQ_NONE;
37975@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37976 */
37977 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
37978 if (istat & FIFO_NOT_EMPTY) {
37979- while((a = h->access.command_completed(h))) {
37980+ while((a = h->access->command_completed(h))) {
37981 a1 = a; a &= ~3;
37982 if ((c = h->cmpQ) == NULL)
37983 {
37984@@ -1448,11 +1448,11 @@ static int sendcmd(
37985 /*
37986 * Disable interrupt
37987 */
37988- info_p->access.set_intr_mask(info_p, 0);
37989+ info_p->access->set_intr_mask(info_p, 0);
37990 /* Make sure there is room in the command FIFO */
37991 /* Actually it should be completely empty at this time. */
37992 for (i = 200000; i > 0; i--) {
37993- temp = info_p->access.fifo_full(info_p);
37994+ temp = info_p->access->fifo_full(info_p);
37995 if (temp != 0) {
37996 break;
37997 }
37998@@ -1465,7 +1465,7 @@ DBG(
37999 /*
38000 * Send the cmd
38001 */
38002- info_p->access.submit_command(info_p, c);
38003+ info_p->access->submit_command(info_p, c);
38004 complete = pollcomplete(ctlr);
38005
38006 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38007@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38008 * we check the new geometry. Then turn interrupts back on when
38009 * we're done.
38010 */
38011- host->access.set_intr_mask(host, 0);
38012+ host->access->set_intr_mask(host, 0);
38013 getgeometry(ctlr);
38014- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38015+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38016
38017 for(i=0; i<NWD; i++) {
38018 struct gendisk *disk = ida_gendisk[ctlr][i];
38019@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38020 /* Wait (up to 2 seconds) for a command to complete */
38021
38022 for (i = 200000; i > 0; i--) {
38023- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38024+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38025 if (done == 0) {
38026 udelay(10); /* a short fixed delay */
38027 } else
38028diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38029index be73e9d..7fbf140 100644
38030--- a/drivers/block/cpqarray.h
38031+++ b/drivers/block/cpqarray.h
38032@@ -99,7 +99,7 @@ struct ctlr_info {
38033 drv_info_t drv[NWD];
38034 struct proc_dir_entry *proc;
38035
38036- struct access_method access;
38037+ struct access_method *access;
38038
38039 cmdlist_t *reqQ;
38040 cmdlist_t *cmpQ;
38041diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38042index 434c77d..6d3219a 100644
38043--- a/drivers/block/drbd/drbd_bitmap.c
38044+++ b/drivers/block/drbd/drbd_bitmap.c
38045@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38046 submit_bio(rw, bio);
38047 /* this should not count as user activity and cause the
38048 * resync to throttle -- see drbd_rs_should_slow_down(). */
38049- atomic_add(len >> 9, &device->rs_sect_ev);
38050+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38051 }
38052 }
38053
38054diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38055index b905e98..0812ed8 100644
38056--- a/drivers/block/drbd/drbd_int.h
38057+++ b/drivers/block/drbd/drbd_int.h
38058@@ -385,7 +385,7 @@ struct drbd_epoch {
38059 struct drbd_connection *connection;
38060 struct list_head list;
38061 unsigned int barrier_nr;
38062- atomic_t epoch_size; /* increased on every request added. */
38063+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38064 atomic_t active; /* increased on every req. added, and dec on every finished. */
38065 unsigned long flags;
38066 };
38067@@ -946,7 +946,7 @@ struct drbd_device {
38068 unsigned int al_tr_number;
38069 int al_tr_cycle;
38070 wait_queue_head_t seq_wait;
38071- atomic_t packet_seq;
38072+ atomic_unchecked_t packet_seq;
38073 unsigned int peer_seq;
38074 spinlock_t peer_seq_lock;
38075 unsigned long comm_bm_set; /* communicated number of set bits. */
38076@@ -955,8 +955,8 @@ struct drbd_device {
38077 struct mutex own_state_mutex;
38078 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38079 char congestion_reason; /* Why we where congested... */
38080- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38081- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38082+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38083+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38084 int rs_last_sect_ev; /* counter to compare with */
38085 int rs_last_events; /* counter of read or write "events" (unit sectors)
38086 * on the lower level device when we last looked. */
38087diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38088index 1fc8342..7e7742b 100644
38089--- a/drivers/block/drbd/drbd_main.c
38090+++ b/drivers/block/drbd/drbd_main.c
38091@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38092 p->sector = sector;
38093 p->block_id = block_id;
38094 p->blksize = blksize;
38095- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38096+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38097 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38098 }
38099
38100@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38101 return -EIO;
38102 p->sector = cpu_to_be64(req->i.sector);
38103 p->block_id = (unsigned long)req;
38104- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38105+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38106 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38107 if (device->state.conn >= C_SYNC_SOURCE &&
38108 device->state.conn <= C_PAUSED_SYNC_T)
38109@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38110 atomic_set(&device->unacked_cnt, 0);
38111 atomic_set(&device->local_cnt, 0);
38112 atomic_set(&device->pp_in_use_by_net, 0);
38113- atomic_set(&device->rs_sect_in, 0);
38114- atomic_set(&device->rs_sect_ev, 0);
38115+ atomic_set_unchecked(&device->rs_sect_in, 0);
38116+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38117 atomic_set(&device->ap_in_flight, 0);
38118 atomic_set(&device->md_io.in_use, 0);
38119
38120@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38121 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38122 struct drbd_resource *resource = connection->resource;
38123
38124- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38125- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38126+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38127+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38128 kfree(connection->current_epoch);
38129
38130 idr_destroy(&connection->peer_devices);
38131diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38132index 74df8cf..e41fc24 100644
38133--- a/drivers/block/drbd/drbd_nl.c
38134+++ b/drivers/block/drbd/drbd_nl.c
38135@@ -3637,13 +3637,13 @@ finish:
38136
38137 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38138 {
38139- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38140+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38141 struct sk_buff *msg;
38142 struct drbd_genlmsghdr *d_out;
38143 unsigned seq;
38144 int err = -ENOMEM;
38145
38146- seq = atomic_inc_return(&drbd_genl_seq);
38147+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38148 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38149 if (!msg)
38150 goto failed;
38151diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38152index d169b4a..481463f 100644
38153--- a/drivers/block/drbd/drbd_receiver.c
38154+++ b/drivers/block/drbd/drbd_receiver.c
38155@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38156 struct drbd_device *device = peer_device->device;
38157 int err;
38158
38159- atomic_set(&device->packet_seq, 0);
38160+ atomic_set_unchecked(&device->packet_seq, 0);
38161 device->peer_seq = 0;
38162
38163 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38164@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38165 do {
38166 next_epoch = NULL;
38167
38168- epoch_size = atomic_read(&epoch->epoch_size);
38169+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38170
38171 switch (ev & ~EV_CLEANUP) {
38172 case EV_PUT:
38173@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38174 rv = FE_DESTROYED;
38175 } else {
38176 epoch->flags = 0;
38177- atomic_set(&epoch->epoch_size, 0);
38178+ atomic_set_unchecked(&epoch->epoch_size, 0);
38179 /* atomic_set(&epoch->active, 0); is already zero */
38180 if (rv == FE_STILL_LIVE)
38181 rv = FE_RECYCLED;
38182@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38183 conn_wait_active_ee_empty(connection);
38184 drbd_flush(connection);
38185
38186- if (atomic_read(&connection->current_epoch->epoch_size)) {
38187+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38188 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38189 if (epoch)
38190 break;
38191@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38192 }
38193
38194 epoch->flags = 0;
38195- atomic_set(&epoch->epoch_size, 0);
38196+ atomic_set_unchecked(&epoch->epoch_size, 0);
38197 atomic_set(&epoch->active, 0);
38198
38199 spin_lock(&connection->epoch_lock);
38200- if (atomic_read(&connection->current_epoch->epoch_size)) {
38201+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38202 list_add(&epoch->list, &connection->current_epoch->list);
38203 connection->current_epoch = epoch;
38204 connection->epochs++;
38205@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38206 list_add_tail(&peer_req->w.list, &device->sync_ee);
38207 spin_unlock_irq(&device->resource->req_lock);
38208
38209- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38210+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38211 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38212 return 0;
38213
38214@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38215 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38216 }
38217
38218- atomic_add(pi->size >> 9, &device->rs_sect_in);
38219+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38220
38221 return err;
38222 }
38223@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38224
38225 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38226 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38227- atomic_inc(&connection->current_epoch->epoch_size);
38228+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38229 err2 = drbd_drain_block(peer_device, pi->size);
38230 if (!err)
38231 err = err2;
38232@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38233
38234 spin_lock(&connection->epoch_lock);
38235 peer_req->epoch = connection->current_epoch;
38236- atomic_inc(&peer_req->epoch->epoch_size);
38237+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38238 atomic_inc(&peer_req->epoch->active);
38239 spin_unlock(&connection->epoch_lock);
38240
38241@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38242
38243 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38244 (int)part_stat_read(&disk->part0, sectors[1]) -
38245- atomic_read(&device->rs_sect_ev);
38246+ atomic_read_unchecked(&device->rs_sect_ev);
38247
38248 if (atomic_read(&device->ap_actlog_cnt)
38249 || curr_events - device->rs_last_events > 64) {
38250@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38251 device->use_csums = true;
38252 } else if (pi->cmd == P_OV_REPLY) {
38253 /* track progress, we may need to throttle */
38254- atomic_add(size >> 9, &device->rs_sect_in);
38255+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38256 peer_req->w.cb = w_e_end_ov_reply;
38257 dec_rs_pending(device);
38258 /* drbd_rs_begin_io done when we sent this request,
38259@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38260 goto out_free_e;
38261
38262 submit_for_resync:
38263- atomic_add(size >> 9, &device->rs_sect_ev);
38264+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38265
38266 submit:
38267 update_receiver_timing_details(connection, drbd_submit_peer_request);
38268@@ -4564,7 +4564,7 @@ struct data_cmd {
38269 int expect_payload;
38270 size_t pkt_size;
38271 int (*fn)(struct drbd_connection *, struct packet_info *);
38272-};
38273+} __do_const;
38274
38275 static struct data_cmd drbd_cmd_handler[] = {
38276 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38277@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38278 if (!list_empty(&connection->current_epoch->list))
38279 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38280 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38281- atomic_set(&connection->current_epoch->epoch_size, 0);
38282+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38283 connection->send.seen_any_write_yet = false;
38284
38285 drbd_info(connection, "Connection closed\n");
38286@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38287 put_ldev(device);
38288 }
38289 dec_rs_pending(device);
38290- atomic_add(blksize >> 9, &device->rs_sect_in);
38291+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38292
38293 return 0;
38294 }
38295@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38296 struct asender_cmd {
38297 size_t pkt_size;
38298 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38299-};
38300+} __do_const;
38301
38302 static struct asender_cmd asender_tbl[] = {
38303 [P_PING] = { 0, got_Ping },
38304diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38305index d0fae55..4469096 100644
38306--- a/drivers/block/drbd/drbd_worker.c
38307+++ b/drivers/block/drbd/drbd_worker.c
38308@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38309 list_add_tail(&peer_req->w.list, &device->read_ee);
38310 spin_unlock_irq(&device->resource->req_lock);
38311
38312- atomic_add(size >> 9, &device->rs_sect_ev);
38313+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38314 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38315 return 0;
38316
38317@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38318 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38319 int number, mxb;
38320
38321- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38322+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38323 device->rs_in_flight -= sect_in;
38324
38325 rcu_read_lock();
38326@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38327 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38328 struct fifo_buffer *plan;
38329
38330- atomic_set(&device->rs_sect_in, 0);
38331- atomic_set(&device->rs_sect_ev, 0);
38332+ atomic_set_unchecked(&device->rs_sect_in, 0);
38333+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38334 device->rs_in_flight = 0;
38335 device->rs_last_events =
38336 (int)part_stat_read(&disk->part0, sectors[0]) +
38337diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38338index 6cb1beb..bf490f7 100644
38339--- a/drivers/block/loop.c
38340+++ b/drivers/block/loop.c
38341@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
38342
38343 file_start_write(file);
38344 set_fs(get_ds());
38345- bw = file->f_op->write(file, buf, len, &pos);
38346+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38347 set_fs(old_fs);
38348 file_end_write(file);
38349 if (likely(bw == len))
38350diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
38351index d826bf3..8eb406c 100644
38352--- a/drivers/block/nvme-core.c
38353+++ b/drivers/block/nvme-core.c
38354@@ -76,7 +76,6 @@ static LIST_HEAD(dev_list);
38355 static struct task_struct *nvme_thread;
38356 static struct workqueue_struct *nvme_workq;
38357 static wait_queue_head_t nvme_kthread_wait;
38358-static struct notifier_block nvme_nb;
38359
38360 static void nvme_reset_failed_dev(struct work_struct *ws);
38361 static int nvme_process_cq(struct nvme_queue *nvmeq);
38362@@ -2955,7 +2954,6 @@ static int __init nvme_init(void)
38363 static void __exit nvme_exit(void)
38364 {
38365 pci_unregister_driver(&nvme_driver);
38366- unregister_hotcpu_notifier(&nvme_nb);
38367 unregister_blkdev(nvme_major, "nvme");
38368 destroy_workqueue(nvme_workq);
38369 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
38370diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38371index 09e628da..7607aaa 100644
38372--- a/drivers/block/pktcdvd.c
38373+++ b/drivers/block/pktcdvd.c
38374@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38375
38376 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38377 {
38378- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38379+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38380 }
38381
38382 /*
38383@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38384 return -EROFS;
38385 }
38386 pd->settings.fp = ti.fp;
38387- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38388+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38389
38390 if (ti.nwa_v) {
38391 pd->nwa = be32_to_cpu(ti.next_writable);
38392diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38393index 8a86b62..f54c87e 100644
38394--- a/drivers/block/rbd.c
38395+++ b/drivers/block/rbd.c
38396@@ -63,7 +63,7 @@
38397 * If the counter is already at its maximum value returns
38398 * -EINVAL without updating it.
38399 */
38400-static int atomic_inc_return_safe(atomic_t *v)
38401+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38402 {
38403 unsigned int counter;
38404
38405diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38406index e5565fb..71be10b4 100644
38407--- a/drivers/block/smart1,2.h
38408+++ b/drivers/block/smart1,2.h
38409@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38410 }
38411
38412 static struct access_method smart4_access = {
38413- smart4_submit_command,
38414- smart4_intr_mask,
38415- smart4_fifo_full,
38416- smart4_intr_pending,
38417- smart4_completed,
38418+ .submit_command = smart4_submit_command,
38419+ .set_intr_mask = smart4_intr_mask,
38420+ .fifo_full = smart4_fifo_full,
38421+ .intr_pending = smart4_intr_pending,
38422+ .command_completed = smart4_completed,
38423 };
38424
38425 /*
38426@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38427 }
38428
38429 static struct access_method smart2_access = {
38430- smart2_submit_command,
38431- smart2_intr_mask,
38432- smart2_fifo_full,
38433- smart2_intr_pending,
38434- smart2_completed,
38435+ .submit_command = smart2_submit_command,
38436+ .set_intr_mask = smart2_intr_mask,
38437+ .fifo_full = smart2_fifo_full,
38438+ .intr_pending = smart2_intr_pending,
38439+ .command_completed = smart2_completed,
38440 };
38441
38442 /*
38443@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38444 }
38445
38446 static struct access_method smart2e_access = {
38447- smart2e_submit_command,
38448- smart2e_intr_mask,
38449- smart2e_fifo_full,
38450- smart2e_intr_pending,
38451- smart2e_completed,
38452+ .submit_command = smart2e_submit_command,
38453+ .set_intr_mask = smart2e_intr_mask,
38454+ .fifo_full = smart2e_fifo_full,
38455+ .intr_pending = smart2e_intr_pending,
38456+ .command_completed = smart2e_completed,
38457 };
38458
38459 /*
38460@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38461 }
38462
38463 static struct access_method smart1_access = {
38464- smart1_submit_command,
38465- smart1_intr_mask,
38466- smart1_fifo_full,
38467- smart1_intr_pending,
38468- smart1_completed,
38469+ .submit_command = smart1_submit_command,
38470+ .set_intr_mask = smart1_intr_mask,
38471+ .fifo_full = smart1_fifo_full,
38472+ .intr_pending = smart1_intr_pending,
38473+ .command_completed = smart1_completed,
38474 };
38475diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38476index 55c135b..9f8d60c 100644
38477--- a/drivers/bluetooth/btwilink.c
38478+++ b/drivers/bluetooth/btwilink.c
38479@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38480
38481 static int bt_ti_probe(struct platform_device *pdev)
38482 {
38483- static struct ti_st *hst;
38484+ struct ti_st *hst;
38485 struct hci_dev *hdev;
38486 int err;
38487
38488diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38489index 5d28a45..a538f90 100644
38490--- a/drivers/cdrom/cdrom.c
38491+++ b/drivers/cdrom/cdrom.c
38492@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38493 ENSURE(reset, CDC_RESET);
38494 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38495 cdi->mc_flags = 0;
38496- cdo->n_minors = 0;
38497 cdi->options = CDO_USE_FFLAGS;
38498
38499 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38500@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38501 else
38502 cdi->cdda_method = CDDA_OLD;
38503
38504- if (!cdo->generic_packet)
38505- cdo->generic_packet = cdrom_dummy_generic_packet;
38506+ if (!cdo->generic_packet) {
38507+ pax_open_kernel();
38508+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38509+ pax_close_kernel();
38510+ }
38511
38512 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38513 mutex_lock(&cdrom_mutex);
38514@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38515 if (cdi->exit)
38516 cdi->exit(cdi);
38517
38518- cdi->ops->n_minors--;
38519 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38520 }
38521
38522@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38523 */
38524 nr = nframes;
38525 do {
38526- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38527+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38528 if (cgc.buffer)
38529 break;
38530
38531@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38532 struct cdrom_device_info *cdi;
38533 int ret;
38534
38535- ret = scnprintf(info + *pos, max_size - *pos, header);
38536+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38537 if (!ret)
38538 return 1;
38539
38540diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38541index 584bc31..e64a12c 100644
38542--- a/drivers/cdrom/gdrom.c
38543+++ b/drivers/cdrom/gdrom.c
38544@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38545 .audio_ioctl = gdrom_audio_ioctl,
38546 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38547 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38548- .n_minors = 1,
38549 };
38550
38551 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38552diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38553index efefd12..4f1d494 100644
38554--- a/drivers/char/Kconfig
38555+++ b/drivers/char/Kconfig
38556@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
38557
38558 config DEVKMEM
38559 bool "/dev/kmem virtual device support"
38560- default y
38561+ default n
38562+ depends on !GRKERNSEC_KMEM
38563 help
38564 Say Y here if you want to support the /dev/kmem device. The
38565 /dev/kmem device is rarely used, but can be used for certain
38566@@ -577,6 +578,7 @@ config DEVPORT
38567 bool
38568 depends on !M68K
38569 depends on ISA || PCI
38570+ depends on !GRKERNSEC_KMEM
38571 default y
38572
38573 source "drivers/s390/char/Kconfig"
38574diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38575index a48e05b..6bac831 100644
38576--- a/drivers/char/agp/compat_ioctl.c
38577+++ b/drivers/char/agp/compat_ioctl.c
38578@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38579 return -ENOMEM;
38580 }
38581
38582- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38583+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38584 sizeof(*usegment) * ureserve.seg_count)) {
38585 kfree(usegment);
38586 kfree(ksegment);
38587diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38588index 09f17eb..8531d2f 100644
38589--- a/drivers/char/agp/frontend.c
38590+++ b/drivers/char/agp/frontend.c
38591@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38592 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38593 return -EFAULT;
38594
38595- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38596+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38597 return -EFAULT;
38598
38599 client = agp_find_client_by_pid(reserve.pid);
38600@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38601 if (segment == NULL)
38602 return -ENOMEM;
38603
38604- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38605+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38606 sizeof(struct agp_segment) * reserve.seg_count)) {
38607 kfree(segment);
38608 return -EFAULT;
38609diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38610index 4f94375..413694e 100644
38611--- a/drivers/char/genrtc.c
38612+++ b/drivers/char/genrtc.c
38613@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38614 switch (cmd) {
38615
38616 case RTC_PLL_GET:
38617+ memset(&pll, 0, sizeof(pll));
38618 if (get_rtc_pll(&pll))
38619 return -EINVAL;
38620 else
38621diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38622index d5d4cd8..22d561d 100644
38623--- a/drivers/char/hpet.c
38624+++ b/drivers/char/hpet.c
38625@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38626 }
38627
38628 static int
38629-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38630+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38631 struct hpet_info *info)
38632 {
38633 struct hpet_timer __iomem *timer;
38634diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38635index 6b65fa4..8ebbc99 100644
38636--- a/drivers/char/ipmi/ipmi_msghandler.c
38637+++ b/drivers/char/ipmi/ipmi_msghandler.c
38638@@ -436,7 +436,7 @@ struct ipmi_smi {
38639 struct proc_dir_entry *proc_dir;
38640 char proc_dir_name[10];
38641
38642- atomic_t stats[IPMI_NUM_STATS];
38643+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38644
38645 /*
38646 * run_to_completion duplicate of smb_info, smi_info
38647@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38648 static DEFINE_MUTEX(smi_watchers_mutex);
38649
38650 #define ipmi_inc_stat(intf, stat) \
38651- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38652+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38653 #define ipmi_get_stat(intf, stat) \
38654- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38655+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38656
38657 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38658 "ACPI", "SMBIOS", "PCI",
38659@@ -2837,7 +2837,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38660 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38661 init_waitqueue_head(&intf->waitq);
38662 for (i = 0; i < IPMI_NUM_STATS; i++)
38663- atomic_set(&intf->stats[i], 0);
38664+ atomic_set_unchecked(&intf->stats[i], 0);
38665
38666 intf->proc_dir = NULL;
38667
38668diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38669index 967b73a..946e94c 100644
38670--- a/drivers/char/ipmi/ipmi_si_intf.c
38671+++ b/drivers/char/ipmi/ipmi_si_intf.c
38672@@ -284,7 +284,7 @@ struct smi_info {
38673 unsigned char slave_addr;
38674
38675 /* Counters and things for the proc filesystem. */
38676- atomic_t stats[SI_NUM_STATS];
38677+ atomic_unchecked_t stats[SI_NUM_STATS];
38678
38679 struct task_struct *thread;
38680
38681@@ -293,9 +293,9 @@ struct smi_info {
38682 };
38683
38684 #define smi_inc_stat(smi, stat) \
38685- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38686+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38687 #define smi_get_stat(smi, stat) \
38688- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38689+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38690
38691 #define SI_MAX_PARMS 4
38692
38693@@ -3412,7 +3412,7 @@ static int try_smi_init(struct smi_info *new_smi)
38694 atomic_set(&new_smi->req_events, 0);
38695 new_smi->run_to_completion = false;
38696 for (i = 0; i < SI_NUM_STATS; i++)
38697- atomic_set(&new_smi->stats[i], 0);
38698+ atomic_set_unchecked(&new_smi->stats[i], 0);
38699
38700 new_smi->interrupt_disabled = true;
38701 atomic_set(&new_smi->need_watch, 0);
38702diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38703index 4c58333..d5cca27 100644
38704--- a/drivers/char/mem.c
38705+++ b/drivers/char/mem.c
38706@@ -18,6 +18,7 @@
38707 #include <linux/raw.h>
38708 #include <linux/tty.h>
38709 #include <linux/capability.h>
38710+#include <linux/security.h>
38711 #include <linux/ptrace.h>
38712 #include <linux/device.h>
38713 #include <linux/highmem.h>
38714@@ -36,6 +37,10 @@
38715
38716 #define DEVPORT_MINOR 4
38717
38718+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38719+extern const struct file_operations grsec_fops;
38720+#endif
38721+
38722 static inline unsigned long size_inside_page(unsigned long start,
38723 unsigned long size)
38724 {
38725@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38726
38727 while (cursor < to) {
38728 if (!devmem_is_allowed(pfn)) {
38729+#ifdef CONFIG_GRKERNSEC_KMEM
38730+ gr_handle_mem_readwrite(from, to);
38731+#else
38732 printk(KERN_INFO
38733 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38734 current->comm, from, to);
38735+#endif
38736 return 0;
38737 }
38738 cursor += PAGE_SIZE;
38739@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38740 }
38741 return 1;
38742 }
38743+#elif defined(CONFIG_GRKERNSEC_KMEM)
38744+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38745+{
38746+ return 0;
38747+}
38748 #else
38749 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38750 {
38751@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38752 #endif
38753
38754 while (count > 0) {
38755- unsigned long remaining;
38756+ unsigned long remaining = 0;
38757+ char *temp;
38758
38759 sz = size_inside_page(p, count);
38760
38761@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38762 if (!ptr)
38763 return -EFAULT;
38764
38765- remaining = copy_to_user(buf, ptr, sz);
38766+#ifdef CONFIG_PAX_USERCOPY
38767+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38768+ if (!temp) {
38769+ unxlate_dev_mem_ptr(p, ptr);
38770+ return -ENOMEM;
38771+ }
38772+ remaining = probe_kernel_read(temp, ptr, sz);
38773+#else
38774+ temp = ptr;
38775+#endif
38776+
38777+ if (!remaining)
38778+ remaining = copy_to_user(buf, temp, sz);
38779+
38780+#ifdef CONFIG_PAX_USERCOPY
38781+ kfree(temp);
38782+#endif
38783+
38784 unxlate_dev_mem_ptr(p, ptr);
38785 if (remaining)
38786 return -EFAULT;
38787@@ -372,9 +404,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38788 size_t count, loff_t *ppos)
38789 {
38790 unsigned long p = *ppos;
38791- ssize_t low_count, read, sz;
38792+ ssize_t low_count, read, sz, err = 0;
38793 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
38794- int err = 0;
38795
38796 read = 0;
38797 if (p < (unsigned long) high_memory) {
38798@@ -396,6 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38799 }
38800 #endif
38801 while (low_count > 0) {
38802+ char *temp;
38803+
38804 sz = size_inside_page(p, low_count);
38805
38806 /*
38807@@ -405,7 +438,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38808 */
38809 kbuf = xlate_dev_kmem_ptr((void *)p);
38810
38811- if (copy_to_user(buf, kbuf, sz))
38812+#ifdef CONFIG_PAX_USERCOPY
38813+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38814+ if (!temp)
38815+ return -ENOMEM;
38816+ err = probe_kernel_read(temp, kbuf, sz);
38817+#else
38818+ temp = kbuf;
38819+#endif
38820+
38821+ if (!err)
38822+ err = copy_to_user(buf, temp, sz);
38823+
38824+#ifdef CONFIG_PAX_USERCOPY
38825+ kfree(temp);
38826+#endif
38827+
38828+ if (err)
38829 return -EFAULT;
38830 buf += sz;
38831 p += sz;
38832@@ -800,6 +849,9 @@ static const struct memdev {
38833 #ifdef CONFIG_PRINTK
38834 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
38835 #endif
38836+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38837+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
38838+#endif
38839 };
38840
38841 static int memory_open(struct inode *inode, struct file *filp)
38842@@ -871,7 +923,7 @@ static int __init chr_dev_init(void)
38843 continue;
38844
38845 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
38846- NULL, devlist[minor].name);
38847+ NULL, "%s", devlist[minor].name);
38848 }
38849
38850 return tty_init();
38851diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
38852index 9df78e2..01ba9ae 100644
38853--- a/drivers/char/nvram.c
38854+++ b/drivers/char/nvram.c
38855@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
38856
38857 spin_unlock_irq(&rtc_lock);
38858
38859- if (copy_to_user(buf, contents, tmp - contents))
38860+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
38861 return -EFAULT;
38862
38863 *ppos = i;
38864diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
38865index 0ea9986..e7b07e4 100644
38866--- a/drivers/char/pcmcia/synclink_cs.c
38867+++ b/drivers/char/pcmcia/synclink_cs.c
38868@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38869
38870 if (debug_level >= DEBUG_LEVEL_INFO)
38871 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
38872- __FILE__, __LINE__, info->device_name, port->count);
38873+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
38874
38875 if (tty_port_close_start(port, tty, filp) == 0)
38876 goto cleanup;
38877@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38878 cleanup:
38879 if (debug_level >= DEBUG_LEVEL_INFO)
38880 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
38881- tty->driver->name, port->count);
38882+ tty->driver->name, atomic_read(&port->count));
38883 }
38884
38885 /* Wait until the transmitter is empty.
38886@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38887
38888 if (debug_level >= DEBUG_LEVEL_INFO)
38889 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
38890- __FILE__, __LINE__, tty->driver->name, port->count);
38891+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
38892
38893 /* If port is closing, signal caller to try again */
38894 if (port->flags & ASYNC_CLOSING){
38895@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38896 goto cleanup;
38897 }
38898 spin_lock(&port->lock);
38899- port->count++;
38900+ atomic_inc(&port->count);
38901 spin_unlock(&port->lock);
38902 spin_unlock_irqrestore(&info->netlock, flags);
38903
38904- if (port->count == 1) {
38905+ if (atomic_read(&port->count) == 1) {
38906 /* 1st open on this device, init hardware */
38907 retval = startup(info, tty);
38908 if (retval < 0)
38909@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38910 unsigned short new_crctype;
38911
38912 /* return error if TTY interface open */
38913- if (info->port.count)
38914+ if (atomic_read(&info->port.count))
38915 return -EBUSY;
38916
38917 switch (encoding)
38918@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
38919
38920 /* arbitrate between network and tty opens */
38921 spin_lock_irqsave(&info->netlock, flags);
38922- if (info->port.count != 0 || info->netcount != 0) {
38923+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38924 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
38925 spin_unlock_irqrestore(&info->netlock, flags);
38926 return -EBUSY;
38927@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38928 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
38929
38930 /* return error if TTY interface open */
38931- if (info->port.count)
38932+ if (atomic_read(&info->port.count))
38933 return -EBUSY;
38934
38935 if (cmd != SIOCWANDEV)
38936diff --git a/drivers/char/random.c b/drivers/char/random.c
38937index 9cd6968..6416f00 100644
38938--- a/drivers/char/random.c
38939+++ b/drivers/char/random.c
38940@@ -289,9 +289,6 @@
38941 /*
38942 * To allow fractional bits to be tracked, the entropy_count field is
38943 * denominated in units of 1/8th bits.
38944- *
38945- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
38946- * credit_entropy_bits() needs to be 64 bits wide.
38947 */
38948 #define ENTROPY_SHIFT 3
38949 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
38950@@ -439,9 +436,9 @@ struct entropy_store {
38951 };
38952
38953 static void push_to_pool(struct work_struct *work);
38954-static __u32 input_pool_data[INPUT_POOL_WORDS];
38955-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
38956-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
38957+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
38958+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
38959+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
38960
38961 static struct entropy_store input_pool = {
38962 .poolinfo = &poolinfo_table[0],
38963@@ -635,7 +632,7 @@ retry:
38964 /* The +2 corresponds to the /4 in the denominator */
38965
38966 do {
38967- unsigned int anfrac = min(pnfrac, pool_size/2);
38968+ u64 anfrac = min(pnfrac, pool_size/2);
38969 unsigned int add =
38970 ((pool_size - entropy_count)*anfrac*3) >> s;
38971
38972@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
38973
38974 extract_buf(r, tmp);
38975 i = min_t(int, nbytes, EXTRACT_SIZE);
38976- if (copy_to_user(buf, tmp, i)) {
38977+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
38978 ret = -EFAULT;
38979 break;
38980 }
38981@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
38982 static int proc_do_uuid(struct ctl_table *table, int write,
38983 void __user *buffer, size_t *lenp, loff_t *ppos)
38984 {
38985- struct ctl_table fake_table;
38986+ ctl_table_no_const fake_table;
38987 unsigned char buf[64], tmp_uuid[16], *uuid;
38988
38989 uuid = table->data;
38990@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
38991 static int proc_do_entropy(struct ctl_table *table, int write,
38992 void __user *buffer, size_t *lenp, loff_t *ppos)
38993 {
38994- struct ctl_table fake_table;
38995+ ctl_table_no_const fake_table;
38996 int entropy_count;
38997
38998 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
38999diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39000index e496dae..b793e7d 100644
39001--- a/drivers/char/sonypi.c
39002+++ b/drivers/char/sonypi.c
39003@@ -54,6 +54,7 @@
39004
39005 #include <asm/uaccess.h>
39006 #include <asm/io.h>
39007+#include <asm/local.h>
39008
39009 #include <linux/sonypi.h>
39010
39011@@ -490,7 +491,7 @@ static struct sonypi_device {
39012 spinlock_t fifo_lock;
39013 wait_queue_head_t fifo_proc_list;
39014 struct fasync_struct *fifo_async;
39015- int open_count;
39016+ local_t open_count;
39017 int model;
39018 struct input_dev *input_jog_dev;
39019 struct input_dev *input_key_dev;
39020@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39021 static int sonypi_misc_release(struct inode *inode, struct file *file)
39022 {
39023 mutex_lock(&sonypi_device.lock);
39024- sonypi_device.open_count--;
39025+ local_dec(&sonypi_device.open_count);
39026 mutex_unlock(&sonypi_device.lock);
39027 return 0;
39028 }
39029@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39030 {
39031 mutex_lock(&sonypi_device.lock);
39032 /* Flush input queue on first open */
39033- if (!sonypi_device.open_count)
39034+ if (!local_read(&sonypi_device.open_count))
39035 kfifo_reset(&sonypi_device.fifo);
39036- sonypi_device.open_count++;
39037+ local_inc(&sonypi_device.open_count);
39038 mutex_unlock(&sonypi_device.lock);
39039
39040 return 0;
39041diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39042index 565a947..dcdc06e 100644
39043--- a/drivers/char/tpm/tpm_acpi.c
39044+++ b/drivers/char/tpm/tpm_acpi.c
39045@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39046 virt = acpi_os_map_iomem(start, len);
39047 if (!virt) {
39048 kfree(log->bios_event_log);
39049+ log->bios_event_log = NULL;
39050 printk("%s: ERROR - Unable to map memory\n", __func__);
39051 return -EIO;
39052 }
39053
39054- memcpy_fromio(log->bios_event_log, virt, len);
39055+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39056
39057 acpi_os_unmap_iomem(virt, len);
39058 return 0;
39059diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39060index 3a56a13..f8cbd25 100644
39061--- a/drivers/char/tpm/tpm_eventlog.c
39062+++ b/drivers/char/tpm/tpm_eventlog.c
39063@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39064 event = addr;
39065
39066 if ((event->event_type == 0 && event->event_size == 0) ||
39067- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39068+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39069 return NULL;
39070
39071 return addr;
39072@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39073 return NULL;
39074
39075 if ((event->event_type == 0 && event->event_size == 0) ||
39076- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39077+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39078 return NULL;
39079
39080 (*pos)++;
39081@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39082 int i;
39083
39084 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39085- seq_putc(m, data[i]);
39086+ if (!seq_putc(m, data[i]))
39087+ return -EFAULT;
39088
39089 return 0;
39090 }
39091diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39092index c3aac4c..88de09f9 100644
39093--- a/drivers/char/virtio_console.c
39094+++ b/drivers/char/virtio_console.c
39095@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39096 if (to_user) {
39097 ssize_t ret;
39098
39099- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39100+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39101 if (ret)
39102 return -EFAULT;
39103 } else {
39104@@ -789,7 +789,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39105 if (!port_has_data(port) && !port->host_connected)
39106 return 0;
39107
39108- return fill_readbuf(port, ubuf, count, true);
39109+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39110 }
39111
39112 static int wait_port_writable(struct port *port, bool nonblock)
39113diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39114index 4386697..754ceca 100644
39115--- a/drivers/clk/clk-composite.c
39116+++ b/drivers/clk/clk-composite.c
39117@@ -192,7 +192,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39118 struct clk *clk;
39119 struct clk_init_data init;
39120 struct clk_composite *composite;
39121- struct clk_ops *clk_composite_ops;
39122+ clk_ops_no_const *clk_composite_ops;
39123
39124 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39125 if (!composite) {
39126diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39127index dd3a78c..386d49c 100644
39128--- a/drivers/clk/socfpga/clk-gate.c
39129+++ b/drivers/clk/socfpga/clk-gate.c
39130@@ -22,6 +22,7 @@
39131 #include <linux/mfd/syscon.h>
39132 #include <linux/of.h>
39133 #include <linux/regmap.h>
39134+#include <asm/pgtable.h>
39135
39136 #include "clk.h"
39137
39138@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39139 return 0;
39140 }
39141
39142-static struct clk_ops gateclk_ops = {
39143+static clk_ops_no_const gateclk_ops __read_only = {
39144 .prepare = socfpga_clk_prepare,
39145 .recalc_rate = socfpga_clk_recalc_rate,
39146 .get_parent = socfpga_clk_get_parent,
39147@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39148 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39149 socfpga_clk->hw.bit_idx = clk_gate[1];
39150
39151- gateclk_ops.enable = clk_gate_ops.enable;
39152- gateclk_ops.disable = clk_gate_ops.disable;
39153+ pax_open_kernel();
39154+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39155+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39156+ pax_close_kernel();
39157 }
39158
39159 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39160diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39161index de6da95..c98278b 100644
39162--- a/drivers/clk/socfpga/clk-pll.c
39163+++ b/drivers/clk/socfpga/clk-pll.c
39164@@ -21,6 +21,7 @@
39165 #include <linux/io.h>
39166 #include <linux/of.h>
39167 #include <linux/of_address.h>
39168+#include <asm/pgtable.h>
39169
39170 #include "clk.h"
39171
39172@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39173 CLK_MGR_PLL_CLK_SRC_MASK;
39174 }
39175
39176-static struct clk_ops clk_pll_ops = {
39177+static clk_ops_no_const clk_pll_ops __read_only = {
39178 .recalc_rate = clk_pll_recalc_rate,
39179 .get_parent = clk_pll_get_parent,
39180 };
39181@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39182 pll_clk->hw.hw.init = &init;
39183
39184 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39185- clk_pll_ops.enable = clk_gate_ops.enable;
39186- clk_pll_ops.disable = clk_gate_ops.disable;
39187+ pax_open_kernel();
39188+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39189+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39190+ pax_close_kernel();
39191
39192 clk = clk_register(NULL, &pll_clk->hw.hw);
39193 if (WARN_ON(IS_ERR(clk))) {
39194diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39195index b0c18ed..1713a80 100644
39196--- a/drivers/cpufreq/acpi-cpufreq.c
39197+++ b/drivers/cpufreq/acpi-cpufreq.c
39198@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39199 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39200 per_cpu(acfreq_data, cpu) = data;
39201
39202- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39203- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39204+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39205+ pax_open_kernel();
39206+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39207+ pax_close_kernel();
39208+ }
39209
39210 result = acpi_processor_register_performance(data->acpi_data, cpu);
39211 if (result)
39212@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39213 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39214 break;
39215 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39216- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39217+ pax_open_kernel();
39218+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39219+ pax_close_kernel();
39220 break;
39221 default:
39222 break;
39223@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39224 if (!msrs)
39225 return;
39226
39227- acpi_cpufreq_driver.boost_supported = true;
39228- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39229+ pax_open_kernel();
39230+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39231+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39232+ pax_close_kernel();
39233
39234 cpu_notifier_register_begin();
39235
39236diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39237index fde97d6..3631eca 100644
39238--- a/drivers/cpufreq/cpufreq-dt.c
39239+++ b/drivers/cpufreq/cpufreq-dt.c
39240@@ -393,7 +393,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39241 if (!IS_ERR(cpu_reg))
39242 regulator_put(cpu_reg);
39243
39244- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39245+ pax_open_kernel();
39246+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39247+ pax_close_kernel();
39248
39249 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39250 if (ret)
39251diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39252index 7030c40..3a97de6 100644
39253--- a/drivers/cpufreq/cpufreq.c
39254+++ b/drivers/cpufreq/cpufreq.c
39255@@ -2135,7 +2135,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39256 }
39257
39258 mutex_lock(&cpufreq_governor_mutex);
39259- list_del(&governor->governor_list);
39260+ pax_list_del(&governor->governor_list);
39261 mutex_unlock(&cpufreq_governor_mutex);
39262 return;
39263 }
39264@@ -2351,7 +2351,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39265 return NOTIFY_OK;
39266 }
39267
39268-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39269+static struct notifier_block cpufreq_cpu_notifier = {
39270 .notifier_call = cpufreq_cpu_callback,
39271 };
39272
39273@@ -2391,13 +2391,17 @@ int cpufreq_boost_trigger_state(int state)
39274 return 0;
39275
39276 write_lock_irqsave(&cpufreq_driver_lock, flags);
39277- cpufreq_driver->boost_enabled = state;
39278+ pax_open_kernel();
39279+ *(bool *)&cpufreq_driver->boost_enabled = state;
39280+ pax_close_kernel();
39281 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39282
39283 ret = cpufreq_driver->set_boost(state);
39284 if (ret) {
39285 write_lock_irqsave(&cpufreq_driver_lock, flags);
39286- cpufreq_driver->boost_enabled = !state;
39287+ pax_open_kernel();
39288+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39289+ pax_close_kernel();
39290 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39291
39292 pr_err("%s: Cannot %s BOOST\n",
39293@@ -2454,8 +2458,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39294
39295 pr_debug("trying to register driver %s\n", driver_data->name);
39296
39297- if (driver_data->setpolicy)
39298- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39299+ if (driver_data->setpolicy) {
39300+ pax_open_kernel();
39301+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39302+ pax_close_kernel();
39303+ }
39304
39305 write_lock_irqsave(&cpufreq_driver_lock, flags);
39306 if (cpufreq_driver) {
39307@@ -2470,8 +2477,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39308 * Check if driver provides function to enable boost -
39309 * if not, use cpufreq_boost_set_sw as default
39310 */
39311- if (!cpufreq_driver->set_boost)
39312- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39313+ if (!cpufreq_driver->set_boost) {
39314+ pax_open_kernel();
39315+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39316+ pax_close_kernel();
39317+ }
39318
39319 ret = cpufreq_sysfs_create_file(&boost.attr);
39320 if (ret) {
39321diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39322index 1b44496..b80ff5e 100644
39323--- a/drivers/cpufreq/cpufreq_governor.c
39324+++ b/drivers/cpufreq/cpufreq_governor.c
39325@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39326 struct dbs_data *dbs_data;
39327 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39328 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39329- struct od_ops *od_ops = NULL;
39330+ const struct od_ops *od_ops = NULL;
39331 struct od_dbs_tuners *od_tuners = NULL;
39332 struct cs_dbs_tuners *cs_tuners = NULL;
39333 struct cpu_dbs_common_info *cpu_cdbs;
39334@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39335
39336 if ((cdata->governor == GOV_CONSERVATIVE) &&
39337 (!policy->governor->initialized)) {
39338- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39339+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39340
39341 cpufreq_register_notifier(cs_ops->notifier_block,
39342 CPUFREQ_TRANSITION_NOTIFIER);
39343@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39344
39345 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39346 (policy->governor->initialized == 1)) {
39347- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39348+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39349
39350 cpufreq_unregister_notifier(cs_ops->notifier_block,
39351 CPUFREQ_TRANSITION_NOTIFIER);
39352diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39353index cc401d1..8197340 100644
39354--- a/drivers/cpufreq/cpufreq_governor.h
39355+++ b/drivers/cpufreq/cpufreq_governor.h
39356@@ -212,7 +212,7 @@ struct common_dbs_data {
39357 void (*exit)(struct dbs_data *dbs_data);
39358
39359 /* Governor specific ops, see below */
39360- void *gov_ops;
39361+ const void *gov_ops;
39362 };
39363
39364 /* Governor Per policy data */
39365@@ -232,7 +232,7 @@ struct od_ops {
39366 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39367 unsigned int freq_next, unsigned int relation);
39368 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39369-};
39370+} __no_const;
39371
39372 struct cs_ops {
39373 struct notifier_block *notifier_block;
39374diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39375index ad3f38f..8f086cd 100644
39376--- a/drivers/cpufreq/cpufreq_ondemand.c
39377+++ b/drivers/cpufreq/cpufreq_ondemand.c
39378@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39379
39380 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39381
39382-static struct od_ops od_ops = {
39383+static struct od_ops od_ops __read_only = {
39384 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39385 .powersave_bias_target = generic_powersave_bias_target,
39386 .freq_increase = dbs_freq_increase,
39387@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39388 (struct cpufreq_policy *, unsigned int, unsigned int),
39389 unsigned int powersave_bias)
39390 {
39391- od_ops.powersave_bias_target = f;
39392+ pax_open_kernel();
39393+ *(void **)&od_ops.powersave_bias_target = f;
39394+ pax_close_kernel();
39395 od_set_powersave_bias(powersave_bias);
39396 }
39397 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39398
39399 void od_unregister_powersave_bias_handler(void)
39400 {
39401- od_ops.powersave_bias_target = generic_powersave_bias_target;
39402+ pax_open_kernel();
39403+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39404+ pax_close_kernel();
39405 od_set_powersave_bias(0);
39406 }
39407 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39408diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39409index 742eefb..e2fcfc8 100644
39410--- a/drivers/cpufreq/intel_pstate.c
39411+++ b/drivers/cpufreq/intel_pstate.c
39412@@ -133,10 +133,10 @@ struct pstate_funcs {
39413 struct cpu_defaults {
39414 struct pstate_adjust_policy pid_policy;
39415 struct pstate_funcs funcs;
39416-};
39417+} __do_const;
39418
39419 static struct pstate_adjust_policy pid_params;
39420-static struct pstate_funcs pstate_funcs;
39421+static struct pstate_funcs *pstate_funcs;
39422 static int hwp_active;
39423
39424 struct perf_limits {
39425@@ -653,18 +653,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39426
39427 cpu->pstate.current_pstate = pstate;
39428
39429- pstate_funcs.set(cpu, pstate);
39430+ pstate_funcs->set(cpu, pstate);
39431 }
39432
39433 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39434 {
39435- cpu->pstate.min_pstate = pstate_funcs.get_min();
39436- cpu->pstate.max_pstate = pstate_funcs.get_max();
39437- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39438- cpu->pstate.scaling = pstate_funcs.get_scaling();
39439+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39440+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39441+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39442+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39443
39444- if (pstate_funcs.get_vid)
39445- pstate_funcs.get_vid(cpu);
39446+ if (pstate_funcs->get_vid)
39447+ pstate_funcs->get_vid(cpu);
39448 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39449 }
39450
39451@@ -988,9 +988,9 @@ static int intel_pstate_msrs_not_valid(void)
39452 rdmsrl(MSR_IA32_APERF, aperf);
39453 rdmsrl(MSR_IA32_MPERF, mperf);
39454
39455- if (!pstate_funcs.get_max() ||
39456- !pstate_funcs.get_min() ||
39457- !pstate_funcs.get_turbo())
39458+ if (!pstate_funcs->get_max() ||
39459+ !pstate_funcs->get_min() ||
39460+ !pstate_funcs->get_turbo())
39461 return -ENODEV;
39462
39463 rdmsrl(MSR_IA32_APERF, tmp);
39464@@ -1004,7 +1004,7 @@ static int intel_pstate_msrs_not_valid(void)
39465 return 0;
39466 }
39467
39468-static void copy_pid_params(struct pstate_adjust_policy *policy)
39469+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39470 {
39471 pid_params.sample_rate_ms = policy->sample_rate_ms;
39472 pid_params.p_gain_pct = policy->p_gain_pct;
39473@@ -1016,12 +1016,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39474
39475 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39476 {
39477- pstate_funcs.get_max = funcs->get_max;
39478- pstate_funcs.get_min = funcs->get_min;
39479- pstate_funcs.get_turbo = funcs->get_turbo;
39480- pstate_funcs.get_scaling = funcs->get_scaling;
39481- pstate_funcs.set = funcs->set;
39482- pstate_funcs.get_vid = funcs->get_vid;
39483+ pstate_funcs = funcs;
39484 }
39485
39486 #if IS_ENABLED(CONFIG_ACPI)
39487diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39488index 529cfd9..0e28fff 100644
39489--- a/drivers/cpufreq/p4-clockmod.c
39490+++ b/drivers/cpufreq/p4-clockmod.c
39491@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39492 case 0x0F: /* Core Duo */
39493 case 0x16: /* Celeron Core */
39494 case 0x1C: /* Atom */
39495- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39496+ pax_open_kernel();
39497+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39498+ pax_close_kernel();
39499 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39500 case 0x0D: /* Pentium M (Dothan) */
39501- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39502+ pax_open_kernel();
39503+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39504+ pax_close_kernel();
39505 /* fall through */
39506 case 0x09: /* Pentium M (Banias) */
39507 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39508@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39509
39510 /* on P-4s, the TSC runs with constant frequency independent whether
39511 * throttling is active or not. */
39512- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39513+ pax_open_kernel();
39514+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39515+ pax_close_kernel();
39516
39517 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39518 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39519diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39520index 9bb42ba..b01b4a2 100644
39521--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39522+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39523@@ -18,14 +18,12 @@
39524 #include <asm/head.h>
39525 #include <asm/timer.h>
39526
39527-static struct cpufreq_driver *cpufreq_us3_driver;
39528-
39529 struct us3_freq_percpu_info {
39530 struct cpufreq_frequency_table table[4];
39531 };
39532
39533 /* Indexed by cpu number. */
39534-static struct us3_freq_percpu_info *us3_freq_table;
39535+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39536
39537 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39538 * in the Safari config register.
39539@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39540
39541 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39542 {
39543- if (cpufreq_us3_driver)
39544- us3_freq_target(policy, 0);
39545+ us3_freq_target(policy, 0);
39546
39547 return 0;
39548 }
39549
39550+static int __init us3_freq_init(void);
39551+static void __exit us3_freq_exit(void);
39552+
39553+static struct cpufreq_driver cpufreq_us3_driver = {
39554+ .init = us3_freq_cpu_init,
39555+ .verify = cpufreq_generic_frequency_table_verify,
39556+ .target_index = us3_freq_target,
39557+ .get = us3_freq_get,
39558+ .exit = us3_freq_cpu_exit,
39559+ .name = "UltraSPARC-III",
39560+
39561+};
39562+
39563 static int __init us3_freq_init(void)
39564 {
39565 unsigned long manuf, impl, ver;
39566- int ret;
39567
39568 if (tlb_type != cheetah && tlb_type != cheetah_plus)
39569 return -ENODEV;
39570@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
39571 (impl == CHEETAH_IMPL ||
39572 impl == CHEETAH_PLUS_IMPL ||
39573 impl == JAGUAR_IMPL ||
39574- impl == PANTHER_IMPL)) {
39575- struct cpufreq_driver *driver;
39576-
39577- ret = -ENOMEM;
39578- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39579- if (!driver)
39580- goto err_out;
39581-
39582- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39583- GFP_KERNEL);
39584- if (!us3_freq_table)
39585- goto err_out;
39586-
39587- driver->init = us3_freq_cpu_init;
39588- driver->verify = cpufreq_generic_frequency_table_verify;
39589- driver->target_index = us3_freq_target;
39590- driver->get = us3_freq_get;
39591- driver->exit = us3_freq_cpu_exit;
39592- strcpy(driver->name, "UltraSPARC-III");
39593-
39594- cpufreq_us3_driver = driver;
39595- ret = cpufreq_register_driver(driver);
39596- if (ret)
39597- goto err_out;
39598-
39599- return 0;
39600-
39601-err_out:
39602- if (driver) {
39603- kfree(driver);
39604- cpufreq_us3_driver = NULL;
39605- }
39606- kfree(us3_freq_table);
39607- us3_freq_table = NULL;
39608- return ret;
39609- }
39610+ impl == PANTHER_IMPL))
39611+ return cpufreq_register_driver(&cpufreq_us3_driver);
39612
39613 return -ENODEV;
39614 }
39615
39616 static void __exit us3_freq_exit(void)
39617 {
39618- if (cpufreq_us3_driver) {
39619- cpufreq_unregister_driver(cpufreq_us3_driver);
39620- kfree(cpufreq_us3_driver);
39621- cpufreq_us3_driver = NULL;
39622- kfree(us3_freq_table);
39623- us3_freq_table = NULL;
39624- }
39625+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39626 }
39627
39628 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39629diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39630index 7d4a315..21bb886 100644
39631--- a/drivers/cpufreq/speedstep-centrino.c
39632+++ b/drivers/cpufreq/speedstep-centrino.c
39633@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39634 !cpu_has(cpu, X86_FEATURE_EST))
39635 return -ENODEV;
39636
39637- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39638- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39639+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39640+ pax_open_kernel();
39641+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39642+ pax_close_kernel();
39643+ }
39644
39645 if (policy->cpu != 0)
39646 return -ENODEV;
39647diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39648index 2697e87..c32476c 100644
39649--- a/drivers/cpuidle/driver.c
39650+++ b/drivers/cpuidle/driver.c
39651@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
39652
39653 static void poll_idle_init(struct cpuidle_driver *drv)
39654 {
39655- struct cpuidle_state *state = &drv->states[0];
39656+ cpuidle_state_no_const *state = &drv->states[0];
39657
39658 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39659 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39660diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39661index fb9f511..213e6cc 100644
39662--- a/drivers/cpuidle/governor.c
39663+++ b/drivers/cpuidle/governor.c
39664@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39665 mutex_lock(&cpuidle_lock);
39666 if (__cpuidle_find_governor(gov->name) == NULL) {
39667 ret = 0;
39668- list_add_tail(&gov->governor_list, &cpuidle_governors);
39669+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39670 if (!cpuidle_curr_governor ||
39671 cpuidle_curr_governor->rating < gov->rating)
39672 cpuidle_switch_governor(gov);
39673diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39674index 97c5903..023ad23 100644
39675--- a/drivers/cpuidle/sysfs.c
39676+++ b/drivers/cpuidle/sysfs.c
39677@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39678 NULL
39679 };
39680
39681-static struct attribute_group cpuidle_attr_group = {
39682+static attribute_group_no_const cpuidle_attr_group = {
39683 .attrs = cpuidle_default_attrs,
39684 .name = "cpuidle",
39685 };
39686diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39687index 8d2a772..33826c9 100644
39688--- a/drivers/crypto/hifn_795x.c
39689+++ b/drivers/crypto/hifn_795x.c
39690@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39691 MODULE_PARM_DESC(hifn_pll_ref,
39692 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39693
39694-static atomic_t hifn_dev_number;
39695+static atomic_unchecked_t hifn_dev_number;
39696
39697 #define ACRYPTO_OP_DECRYPT 0
39698 #define ACRYPTO_OP_ENCRYPT 1
39699@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39700 goto err_out_disable_pci_device;
39701
39702 snprintf(name, sizeof(name), "hifn%d",
39703- atomic_inc_return(&hifn_dev_number)-1);
39704+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39705
39706 err = pci_request_regions(pdev, name);
39707 if (err)
39708diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39709index 30b538d8..1610d75 100644
39710--- a/drivers/devfreq/devfreq.c
39711+++ b/drivers/devfreq/devfreq.c
39712@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39713 goto err_out;
39714 }
39715
39716- list_add(&governor->node, &devfreq_governor_list);
39717+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39718
39719 list_for_each_entry(devfreq, &devfreq_list, node) {
39720 int ret = 0;
39721@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39722 }
39723 }
39724
39725- list_del(&governor->node);
39726+ pax_list_del((struct list_head *)&governor->node);
39727 err_out:
39728 mutex_unlock(&devfreq_list_lock);
39729
39730diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
39731index 3a2adb1..b3be9a3 100644
39732--- a/drivers/dma/sh/shdma-base.c
39733+++ b/drivers/dma/sh/shdma-base.c
39734@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
39735 schan->slave_id = -EINVAL;
39736 }
39737
39738- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
39739- sdev->desc_size, GFP_KERNEL);
39740+ schan->desc = kcalloc(sdev->desc_size,
39741+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
39742 if (!schan->desc) {
39743 ret = -ENOMEM;
39744 goto edescalloc;
39745diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39746index aec8a84..7b45a1f 100644
39747--- a/drivers/dma/sh/shdmac.c
39748+++ b/drivers/dma/sh/shdmac.c
39749@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39750 return ret;
39751 }
39752
39753-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39754+static struct notifier_block sh_dmae_nmi_notifier = {
39755 .notifier_call = sh_dmae_nmi_handler,
39756
39757 /* Run before NMI debug handler and KGDB */
39758diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
39759index 592af5f..bb1d583 100644
39760--- a/drivers/edac/edac_device.c
39761+++ b/drivers/edac/edac_device.c
39762@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
39763 */
39764 int edac_device_alloc_index(void)
39765 {
39766- static atomic_t device_indexes = ATOMIC_INIT(0);
39767+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
39768
39769- return atomic_inc_return(&device_indexes) - 1;
39770+ return atomic_inc_return_unchecked(&device_indexes) - 1;
39771 }
39772 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
39773
39774diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
39775index 670d282..6675f4d 100644
39776--- a/drivers/edac/edac_mc_sysfs.c
39777+++ b/drivers/edac/edac_mc_sysfs.c
39778@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
39779 struct dev_ch_attribute {
39780 struct device_attribute attr;
39781 int channel;
39782-};
39783+} __do_const;
39784
39785 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
39786 struct dev_ch_attribute dev_attr_legacy_##_name = \
39787@@ -1011,14 +1011,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
39788 }
39789
39790 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
39791+ pax_open_kernel();
39792 if (mci->get_sdram_scrub_rate) {
39793- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39794- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39795+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39796+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39797 }
39798 if (mci->set_sdram_scrub_rate) {
39799- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39800- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39801+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39802+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39803 }
39804+ pax_close_kernel();
39805 err = device_create_file(&mci->dev,
39806 &dev_attr_sdram_scrub_rate);
39807 if (err) {
39808diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
39809index 2cf44b4d..6dd2dc7 100644
39810--- a/drivers/edac/edac_pci.c
39811+++ b/drivers/edac/edac_pci.c
39812@@ -29,7 +29,7 @@
39813
39814 static DEFINE_MUTEX(edac_pci_ctls_mutex);
39815 static LIST_HEAD(edac_pci_list);
39816-static atomic_t pci_indexes = ATOMIC_INIT(0);
39817+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
39818
39819 /*
39820 * edac_pci_alloc_ctl_info
39821@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
39822 */
39823 int edac_pci_alloc_index(void)
39824 {
39825- return atomic_inc_return(&pci_indexes) - 1;
39826+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
39827 }
39828 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
39829
39830diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
39831index 24d877f..4e30133 100644
39832--- a/drivers/edac/edac_pci_sysfs.c
39833+++ b/drivers/edac/edac_pci_sysfs.c
39834@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
39835 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
39836 static int edac_pci_poll_msec = 1000; /* one second workq period */
39837
39838-static atomic_t pci_parity_count = ATOMIC_INIT(0);
39839-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
39840+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
39841+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
39842
39843 static struct kobject *edac_pci_top_main_kobj;
39844 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
39845@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
39846 void *value;
39847 ssize_t(*show) (void *, char *);
39848 ssize_t(*store) (void *, const char *, size_t);
39849-};
39850+} __do_const;
39851
39852 /* Set of show/store abstract level functions for PCI Parity object */
39853 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
39854@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39855 edac_printk(KERN_CRIT, EDAC_PCI,
39856 "Signaled System Error on %s\n",
39857 pci_name(dev));
39858- atomic_inc(&pci_nonparity_count);
39859+ atomic_inc_unchecked(&pci_nonparity_count);
39860 }
39861
39862 if (status & (PCI_STATUS_PARITY)) {
39863@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39864 "Master Data Parity Error on %s\n",
39865 pci_name(dev));
39866
39867- atomic_inc(&pci_parity_count);
39868+ atomic_inc_unchecked(&pci_parity_count);
39869 }
39870
39871 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39872@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39873 "Detected Parity Error on %s\n",
39874 pci_name(dev));
39875
39876- atomic_inc(&pci_parity_count);
39877+ atomic_inc_unchecked(&pci_parity_count);
39878 }
39879 }
39880
39881@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39882 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
39883 "Signaled System Error on %s\n",
39884 pci_name(dev));
39885- atomic_inc(&pci_nonparity_count);
39886+ atomic_inc_unchecked(&pci_nonparity_count);
39887 }
39888
39889 if (status & (PCI_STATUS_PARITY)) {
39890@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39891 "Master Data Parity Error on "
39892 "%s\n", pci_name(dev));
39893
39894- atomic_inc(&pci_parity_count);
39895+ atomic_inc_unchecked(&pci_parity_count);
39896 }
39897
39898 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39899@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39900 "Detected Parity Error on %s\n",
39901 pci_name(dev));
39902
39903- atomic_inc(&pci_parity_count);
39904+ atomic_inc_unchecked(&pci_parity_count);
39905 }
39906 }
39907 }
39908@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
39909 if (!check_pci_errors)
39910 return;
39911
39912- before_count = atomic_read(&pci_parity_count);
39913+ before_count = atomic_read_unchecked(&pci_parity_count);
39914
39915 /* scan all PCI devices looking for a Parity Error on devices and
39916 * bridges.
39917@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
39918 /* Only if operator has selected panic on PCI Error */
39919 if (edac_pci_get_panic_on_pe()) {
39920 /* If the count is different 'after' from 'before' */
39921- if (before_count != atomic_read(&pci_parity_count))
39922+ if (before_count != atomic_read_unchecked(&pci_parity_count))
39923 panic("EDAC: PCI Parity Error");
39924 }
39925 }
39926diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
39927index c2359a1..8bd119d 100644
39928--- a/drivers/edac/mce_amd.h
39929+++ b/drivers/edac/mce_amd.h
39930@@ -74,7 +74,7 @@ struct amd_decoder_ops {
39931 bool (*mc0_mce)(u16, u8);
39932 bool (*mc1_mce)(u16, u8);
39933 bool (*mc2_mce)(u16, u8);
39934-};
39935+} __no_const;
39936
39937 void amd_report_gart_errors(bool);
39938 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
39939diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
39940index 57ea7f4..af06b76 100644
39941--- a/drivers/firewire/core-card.c
39942+++ b/drivers/firewire/core-card.c
39943@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
39944 const struct fw_card_driver *driver,
39945 struct device *device)
39946 {
39947- static atomic_t index = ATOMIC_INIT(-1);
39948+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
39949
39950- card->index = atomic_inc_return(&index);
39951+ card->index = atomic_inc_return_unchecked(&index);
39952 card->driver = driver;
39953 card->device = device;
39954 card->current_tlabel = 0;
39955@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
39956
39957 void fw_core_remove_card(struct fw_card *card)
39958 {
39959- struct fw_card_driver dummy_driver = dummy_driver_template;
39960+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
39961
39962 card->driver->update_phy_reg(card, 4,
39963 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
39964diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
39965index f9e3aee..269dbdb 100644
39966--- a/drivers/firewire/core-device.c
39967+++ b/drivers/firewire/core-device.c
39968@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
39969 struct config_rom_attribute {
39970 struct device_attribute attr;
39971 u32 key;
39972-};
39973+} __do_const;
39974
39975 static ssize_t show_immediate(struct device *dev,
39976 struct device_attribute *dattr, char *buf)
39977diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
39978index eb6935c..3cc2bfa 100644
39979--- a/drivers/firewire/core-transaction.c
39980+++ b/drivers/firewire/core-transaction.c
39981@@ -38,6 +38,7 @@
39982 #include <linux/timer.h>
39983 #include <linux/types.h>
39984 #include <linux/workqueue.h>
39985+#include <linux/sched.h>
39986
39987 #include <asm/byteorder.h>
39988
39989diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
39990index e1480ff6..1a429bd 100644
39991--- a/drivers/firewire/core.h
39992+++ b/drivers/firewire/core.h
39993@@ -111,6 +111,7 @@ struct fw_card_driver {
39994
39995 int (*stop_iso)(struct fw_iso_context *ctx);
39996 };
39997+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
39998
39999 void fw_card_initialize(struct fw_card *card,
40000 const struct fw_card_driver *driver, struct device *device);
40001diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40002index aff9018..fc87ded 100644
40003--- a/drivers/firewire/ohci.c
40004+++ b/drivers/firewire/ohci.c
40005@@ -2054,10 +2054,12 @@ static void bus_reset_work(struct work_struct *work)
40006 be32_to_cpu(ohci->next_header));
40007 }
40008
40009+#ifndef CONFIG_GRKERNSEC
40010 if (param_remote_dma) {
40011 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40012 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40013 }
40014+#endif
40015
40016 spin_unlock_irq(&ohci->lock);
40017
40018@@ -2589,8 +2591,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40019 unsigned long flags;
40020 int n, ret = 0;
40021
40022+#ifndef CONFIG_GRKERNSEC
40023 if (param_remote_dma)
40024 return 0;
40025+#endif
40026
40027 /*
40028 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40029diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40030index 94a58a0..f5eba42 100644
40031--- a/drivers/firmware/dmi-id.c
40032+++ b/drivers/firmware/dmi-id.c
40033@@ -16,7 +16,7 @@
40034 struct dmi_device_attribute{
40035 struct device_attribute dev_attr;
40036 int field;
40037-};
40038+} __do_const;
40039 #define to_dmi_dev_attr(_dev_attr) \
40040 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40041
40042diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40043index 69fac06..820f0c9a 100644
40044--- a/drivers/firmware/dmi_scan.c
40045+++ b/drivers/firmware/dmi_scan.c
40046@@ -901,7 +901,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40047 if (buf == NULL)
40048 return -1;
40049
40050- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40051+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40052
40053 dmi_unmap(buf);
40054 return 0;
40055diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40056index 4fd9961..52d60ce 100644
40057--- a/drivers/firmware/efi/cper.c
40058+++ b/drivers/firmware/efi/cper.c
40059@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40060 */
40061 u64 cper_next_record_id(void)
40062 {
40063- static atomic64_t seq;
40064+ static atomic64_unchecked_t seq;
40065
40066- if (!atomic64_read(&seq))
40067- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40068+ if (!atomic64_read_unchecked(&seq))
40069+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40070
40071- return atomic64_inc_return(&seq);
40072+ return atomic64_inc_return_unchecked(&seq);
40073 }
40074 EXPORT_SYMBOL_GPL(cper_next_record_id);
40075
40076diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40077index 9035c1b..aff45f8 100644
40078--- a/drivers/firmware/efi/efi.c
40079+++ b/drivers/firmware/efi/efi.c
40080@@ -151,14 +151,16 @@ static struct attribute_group efi_subsys_attr_group = {
40081 };
40082
40083 static struct efivars generic_efivars;
40084-static struct efivar_operations generic_ops;
40085+static efivar_operations_no_const generic_ops __read_only;
40086
40087 static int generic_ops_register(void)
40088 {
40089- generic_ops.get_variable = efi.get_variable;
40090- generic_ops.set_variable = efi.set_variable;
40091- generic_ops.get_next_variable = efi.get_next_variable;
40092- generic_ops.query_variable_store = efi_query_variable_store;
40093+ pax_open_kernel();
40094+ *(void **)&generic_ops.get_variable = efi.get_variable;
40095+ *(void **)&generic_ops.set_variable = efi.set_variable;
40096+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40097+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40098+ pax_close_kernel();
40099
40100 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40101 }
40102diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40103index f256ecd..387dcb1 100644
40104--- a/drivers/firmware/efi/efivars.c
40105+++ b/drivers/firmware/efi/efivars.c
40106@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40107 static int
40108 create_efivars_bin_attributes(void)
40109 {
40110- struct bin_attribute *attr;
40111+ bin_attribute_no_const *attr;
40112 int error;
40113
40114 /* new_var */
40115diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40116index 2f569aa..c95f4fb 100644
40117--- a/drivers/firmware/google/memconsole.c
40118+++ b/drivers/firmware/google/memconsole.c
40119@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40120 if (!found_memconsole())
40121 return -ENODEV;
40122
40123- memconsole_bin_attr.size = memconsole_length;
40124+ pax_open_kernel();
40125+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40126+ pax_close_kernel();
40127+
40128 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40129 }
40130
40131diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40132index 3cfcfc6..09d6f117 100644
40133--- a/drivers/gpio/gpio-em.c
40134+++ b/drivers/gpio/gpio-em.c
40135@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40136 struct em_gio_priv *p;
40137 struct resource *io[2], *irq[2];
40138 struct gpio_chip *gpio_chip;
40139- struct irq_chip *irq_chip;
40140+ irq_chip_no_const *irq_chip;
40141 const char *name = dev_name(&pdev->dev);
40142 int ret;
40143
40144diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40145index 7818cd1..1be40e5 100644
40146--- a/drivers/gpio/gpio-ich.c
40147+++ b/drivers/gpio/gpio-ich.c
40148@@ -94,7 +94,7 @@ struct ichx_desc {
40149 * this option allows driver caching written output values
40150 */
40151 bool use_outlvl_cache;
40152-};
40153+} __do_const;
40154
40155 static struct {
40156 spinlock_t lock;
40157diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40158index f476ae2..05e1bdd 100644
40159--- a/drivers/gpio/gpio-omap.c
40160+++ b/drivers/gpio/gpio-omap.c
40161@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40162 const struct omap_gpio_platform_data *pdata;
40163 struct resource *res;
40164 struct gpio_bank *bank;
40165- struct irq_chip *irqc;
40166+ irq_chip_no_const *irqc;
40167 int ret;
40168
40169 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40170diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40171index 584484e..e26ebd6 100644
40172--- a/drivers/gpio/gpio-rcar.c
40173+++ b/drivers/gpio/gpio-rcar.c
40174@@ -366,7 +366,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40175 struct gpio_rcar_priv *p;
40176 struct resource *io, *irq;
40177 struct gpio_chip *gpio_chip;
40178- struct irq_chip *irq_chip;
40179+ irq_chip_no_const *irq_chip;
40180 struct device *dev = &pdev->dev;
40181 const char *name = dev_name(dev);
40182 int ret;
40183diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40184index c1caa45..f0f97d2 100644
40185--- a/drivers/gpio/gpio-vr41xx.c
40186+++ b/drivers/gpio/gpio-vr41xx.c
40187@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40188 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40189 maskl, pendl, maskh, pendh);
40190
40191- atomic_inc(&irq_err_count);
40192+ atomic_inc_unchecked(&irq_err_count);
40193
40194 return -EINVAL;
40195 }
40196diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40197index 568aa2b..d1204d8 100644
40198--- a/drivers/gpio/gpiolib.c
40199+++ b/drivers/gpio/gpiolib.c
40200@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40201 }
40202
40203 if (gpiochip->irqchip) {
40204- gpiochip->irqchip->irq_request_resources = NULL;
40205- gpiochip->irqchip->irq_release_resources = NULL;
40206+ pax_open_kernel();
40207+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40208+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40209+ pax_close_kernel();
40210 gpiochip->irqchip = NULL;
40211 }
40212 }
40213@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40214 gpiochip->irqchip = NULL;
40215 return -EINVAL;
40216 }
40217- irqchip->irq_request_resources = gpiochip_irq_reqres;
40218- irqchip->irq_release_resources = gpiochip_irq_relres;
40219+
40220+ pax_open_kernel();
40221+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40222+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40223+ pax_close_kernel();
40224
40225 /*
40226 * Prepare the mapping since the irqchip shall be orthogonal to
40227diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40228index 29168fa..c9baec6 100644
40229--- a/drivers/gpu/drm/drm_crtc.c
40230+++ b/drivers/gpu/drm/drm_crtc.c
40231@@ -3964,7 +3964,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40232 goto done;
40233 }
40234
40235- if (copy_to_user(&enum_ptr[copied].name,
40236+ if (copy_to_user(enum_ptr[copied].name,
40237 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40238 ret = -EFAULT;
40239 goto done;
40240diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40241index 4f41377..ee33f40 100644
40242--- a/drivers/gpu/drm/drm_drv.c
40243+++ b/drivers/gpu/drm/drm_drv.c
40244@@ -444,7 +444,7 @@ void drm_unplug_dev(struct drm_device *dev)
40245
40246 drm_device_set_unplugged(dev);
40247
40248- if (dev->open_count == 0) {
40249+ if (local_read(&dev->open_count) == 0) {
40250 drm_put_dev(dev);
40251 }
40252 mutex_unlock(&drm_global_mutex);
40253diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40254index 0b9514b..6acd174 100644
40255--- a/drivers/gpu/drm/drm_fops.c
40256+++ b/drivers/gpu/drm/drm_fops.c
40257@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40258 return PTR_ERR(minor);
40259
40260 dev = minor->dev;
40261- if (!dev->open_count++)
40262+ if (local_inc_return(&dev->open_count) == 1)
40263 need_setup = 1;
40264
40265 /* share address_space across all char-devs of a single device */
40266@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40267 return 0;
40268
40269 err_undo:
40270- dev->open_count--;
40271+ local_dec(&dev->open_count);
40272 drm_minor_release(minor);
40273 return retcode;
40274 }
40275@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40276
40277 mutex_lock(&drm_global_mutex);
40278
40279- DRM_DEBUG("open_count = %d\n", dev->open_count);
40280+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40281
40282 mutex_lock(&dev->struct_mutex);
40283 list_del(&file_priv->lhead);
40284@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40285 * Begin inline drm_release
40286 */
40287
40288- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40289+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40290 task_pid_nr(current),
40291 (long)old_encode_dev(file_priv->minor->kdev->devt),
40292- dev->open_count);
40293+ local_read(&dev->open_count));
40294
40295 /* Release any auth tokens that might point to this file_priv,
40296 (do that under the drm_global_mutex) */
40297@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
40298 * End inline drm_release
40299 */
40300
40301- if (!--dev->open_count) {
40302+ if (local_dec_and_test(&dev->open_count)) {
40303 retcode = drm_lastclose(dev);
40304 if (drm_device_is_unplugged(dev))
40305 drm_put_dev(dev);
40306diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40307index 3d2e91c..d31c4c9 100644
40308--- a/drivers/gpu/drm/drm_global.c
40309+++ b/drivers/gpu/drm/drm_global.c
40310@@ -36,7 +36,7 @@
40311 struct drm_global_item {
40312 struct mutex mutex;
40313 void *object;
40314- int refcount;
40315+ atomic_t refcount;
40316 };
40317
40318 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40319@@ -49,7 +49,7 @@ void drm_global_init(void)
40320 struct drm_global_item *item = &glob[i];
40321 mutex_init(&item->mutex);
40322 item->object = NULL;
40323- item->refcount = 0;
40324+ atomic_set(&item->refcount, 0);
40325 }
40326 }
40327
40328@@ -59,7 +59,7 @@ void drm_global_release(void)
40329 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40330 struct drm_global_item *item = &glob[i];
40331 BUG_ON(item->object != NULL);
40332- BUG_ON(item->refcount != 0);
40333+ BUG_ON(atomic_read(&item->refcount) != 0);
40334 }
40335 }
40336
40337@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40338 struct drm_global_item *item = &glob[ref->global_type];
40339
40340 mutex_lock(&item->mutex);
40341- if (item->refcount == 0) {
40342+ if (atomic_read(&item->refcount) == 0) {
40343 item->object = kzalloc(ref->size, GFP_KERNEL);
40344 if (unlikely(item->object == NULL)) {
40345 ret = -ENOMEM;
40346@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40347 goto out_err;
40348
40349 }
40350- ++item->refcount;
40351+ atomic_inc(&item->refcount);
40352 ref->object = item->object;
40353 mutex_unlock(&item->mutex);
40354 return 0;
40355@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
40356 struct drm_global_item *item = &glob[ref->global_type];
40357
40358 mutex_lock(&item->mutex);
40359- BUG_ON(item->refcount == 0);
40360+ BUG_ON(atomic_read(&item->refcount) == 0);
40361 BUG_ON(ref->object != item->object);
40362- if (--item->refcount == 0) {
40363+ if (atomic_dec_and_test(&item->refcount)) {
40364 ref->release(ref);
40365 item->object = NULL;
40366 }
40367diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
40368index 51efebd..2b70935 100644
40369--- a/drivers/gpu/drm/drm_info.c
40370+++ b/drivers/gpu/drm/drm_info.c
40371@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
40372 struct drm_local_map *map;
40373 struct drm_map_list *r_list;
40374
40375- /* Hardcoded from _DRM_FRAME_BUFFER,
40376- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
40377- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
40378- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
40379+ static const char * const types[] = {
40380+ [_DRM_FRAME_BUFFER] = "FB",
40381+ [_DRM_REGISTERS] = "REG",
40382+ [_DRM_SHM] = "SHM",
40383+ [_DRM_AGP] = "AGP",
40384+ [_DRM_SCATTER_GATHER] = "SG",
40385+ [_DRM_CONSISTENT] = "PCI"};
40386 const char *type;
40387 int i;
40388
40389@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
40390 map = r_list->map;
40391 if (!map)
40392 continue;
40393- if (map->type < 0 || map->type > 5)
40394+ if (map->type >= ARRAY_SIZE(types))
40395 type = "??";
40396 else
40397 type = types[map->type];
40398diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
40399index 2f4c4343..dd12cd2 100644
40400--- a/drivers/gpu/drm/drm_ioc32.c
40401+++ b/drivers/gpu/drm/drm_ioc32.c
40402@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
40403 request = compat_alloc_user_space(nbytes);
40404 if (!access_ok(VERIFY_WRITE, request, nbytes))
40405 return -EFAULT;
40406- list = (struct drm_buf_desc *) (request + 1);
40407+ list = (struct drm_buf_desc __user *) (request + 1);
40408
40409 if (__put_user(count, &request->count)
40410 || __put_user(list, &request->list))
40411@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
40412 request = compat_alloc_user_space(nbytes);
40413 if (!access_ok(VERIFY_WRITE, request, nbytes))
40414 return -EFAULT;
40415- list = (struct drm_buf_pub *) (request + 1);
40416+ list = (struct drm_buf_pub __user *) (request + 1);
40417
40418 if (__put_user(count, &request->count)
40419 || __put_user(list, &request->list))
40420@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
40421 return 0;
40422 }
40423
40424-drm_ioctl_compat_t *drm_compat_ioctls[] = {
40425+drm_ioctl_compat_t drm_compat_ioctls[] = {
40426 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
40427 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
40428 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
40429@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
40430 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40431 {
40432 unsigned int nr = DRM_IOCTL_NR(cmd);
40433- drm_ioctl_compat_t *fn;
40434 int ret;
40435
40436 /* Assume that ioctls without an explicit compat routine will just
40437@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40438 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
40439 return drm_ioctl(filp, cmd, arg);
40440
40441- fn = drm_compat_ioctls[nr];
40442-
40443- if (fn != NULL)
40444- ret = (*fn) (filp, cmd, arg);
40445+ if (drm_compat_ioctls[nr] != NULL)
40446+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
40447 else
40448 ret = drm_ioctl(filp, cmd, arg);
40449
40450diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
40451index 00587a1..57a65ca 100644
40452--- a/drivers/gpu/drm/drm_ioctl.c
40453+++ b/drivers/gpu/drm/drm_ioctl.c
40454@@ -642,7 +642,7 @@ long drm_ioctl(struct file *filp,
40455 struct drm_file *file_priv = filp->private_data;
40456 struct drm_device *dev;
40457 const struct drm_ioctl_desc *ioctl = NULL;
40458- drm_ioctl_t *func;
40459+ drm_ioctl_no_const_t func;
40460 unsigned int nr = DRM_IOCTL_NR(cmd);
40461 int retcode = -EINVAL;
40462 char stack_kdata[128];
40463diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
40464index 93ec5dc..82acbaf 100644
40465--- a/drivers/gpu/drm/i810/i810_drv.h
40466+++ b/drivers/gpu/drm/i810/i810_drv.h
40467@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
40468 int page_flipping;
40469
40470 wait_queue_head_t irq_queue;
40471- atomic_t irq_received;
40472- atomic_t irq_emitted;
40473+ atomic_unchecked_t irq_received;
40474+ atomic_unchecked_t irq_emitted;
40475
40476 int front_offset;
40477 } drm_i810_private_t;
40478diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
40479index ecee3bc..ad5ae67 100644
40480--- a/drivers/gpu/drm/i915/i915_dma.c
40481+++ b/drivers/gpu/drm/i915/i915_dma.c
40482@@ -356,7 +356,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
40483 * locking inversion with the driver load path. And the access here is
40484 * completely racy anyway. So don't bother with locking for now.
40485 */
40486- return dev->open_count == 0;
40487+ return local_read(&dev->open_count) == 0;
40488 }
40489
40490 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
40491diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40492index 1173831..7dfb389 100644
40493--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40494+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40495@@ -863,12 +863,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
40496 static int
40497 validate_exec_list(struct drm_device *dev,
40498 struct drm_i915_gem_exec_object2 *exec,
40499- int count)
40500+ unsigned int count)
40501 {
40502 unsigned relocs_total = 0;
40503 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
40504 unsigned invalid_flags;
40505- int i;
40506+ unsigned int i;
40507
40508 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
40509 if (USES_FULL_PPGTT(dev))
40510diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
40511index 176de63..1ef9ac7 100644
40512--- a/drivers/gpu/drm/i915/i915_ioc32.c
40513+++ b/drivers/gpu/drm/i915/i915_ioc32.c
40514@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
40515 (unsigned long)request);
40516 }
40517
40518-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40519+static drm_ioctl_compat_t i915_compat_ioctls[] = {
40520 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
40521 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
40522 [DRM_I915_GETPARAM] = compat_i915_getparam,
40523@@ -201,18 +201,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40524 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40525 {
40526 unsigned int nr = DRM_IOCTL_NR(cmd);
40527- drm_ioctl_compat_t *fn = NULL;
40528 int ret;
40529
40530 if (nr < DRM_COMMAND_BASE)
40531 return drm_compat_ioctl(filp, cmd, arg);
40532
40533- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
40534- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40535-
40536- if (fn != NULL)
40537+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
40538+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40539 ret = (*fn) (filp, cmd, arg);
40540- else
40541+ } else
40542 ret = drm_ioctl(filp, cmd, arg);
40543
40544 return ret;
40545diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
40546index c10b52e..e5e27ff 100644
40547--- a/drivers/gpu/drm/i915/intel_display.c
40548+++ b/drivers/gpu/drm/i915/intel_display.c
40549@@ -12935,13 +12935,13 @@ struct intel_quirk {
40550 int subsystem_vendor;
40551 int subsystem_device;
40552 void (*hook)(struct drm_device *dev);
40553-};
40554+} __do_const;
40555
40556 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
40557 struct intel_dmi_quirk {
40558 void (*hook)(struct drm_device *dev);
40559 const struct dmi_system_id (*dmi_id_list)[];
40560-};
40561+} __do_const;
40562
40563 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40564 {
40565@@ -12949,18 +12949,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40566 return 1;
40567 }
40568
40569-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40570+static const struct dmi_system_id intel_dmi_quirks_table[] = {
40571 {
40572- .dmi_id_list = &(const struct dmi_system_id[]) {
40573- {
40574- .callback = intel_dmi_reverse_brightness,
40575- .ident = "NCR Corporation",
40576- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40577- DMI_MATCH(DMI_PRODUCT_NAME, ""),
40578- },
40579- },
40580- { } /* terminating entry */
40581+ .callback = intel_dmi_reverse_brightness,
40582+ .ident = "NCR Corporation",
40583+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40584+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
40585 },
40586+ },
40587+ { } /* terminating entry */
40588+};
40589+
40590+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40591+ {
40592+ .dmi_id_list = &intel_dmi_quirks_table,
40593 .hook = quirk_invert_brightness,
40594 },
40595 };
40596diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
40597index b250130..98df2a4 100644
40598--- a/drivers/gpu/drm/imx/imx-drm-core.c
40599+++ b/drivers/gpu/drm/imx/imx-drm-core.c
40600@@ -356,7 +356,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
40601 if (imxdrm->pipes >= MAX_CRTC)
40602 return -EINVAL;
40603
40604- if (imxdrm->drm->open_count)
40605+ if (local_read(&imxdrm->drm->open_count))
40606 return -EBUSY;
40607
40608 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
40609diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
40610index b4a2014..219ab78 100644
40611--- a/drivers/gpu/drm/mga/mga_drv.h
40612+++ b/drivers/gpu/drm/mga/mga_drv.h
40613@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
40614 u32 clear_cmd;
40615 u32 maccess;
40616
40617- atomic_t vbl_received; /**< Number of vblanks received. */
40618+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
40619 wait_queue_head_t fence_queue;
40620- atomic_t last_fence_retired;
40621+ atomic_unchecked_t last_fence_retired;
40622 u32 next_fence_to_post;
40623
40624 unsigned int fb_cpp;
40625diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
40626index 729bfd5..ead8823 100644
40627--- a/drivers/gpu/drm/mga/mga_ioc32.c
40628+++ b/drivers/gpu/drm/mga/mga_ioc32.c
40629@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
40630 return 0;
40631 }
40632
40633-drm_ioctl_compat_t *mga_compat_ioctls[] = {
40634+drm_ioctl_compat_t mga_compat_ioctls[] = {
40635 [DRM_MGA_INIT] = compat_mga_init,
40636 [DRM_MGA_GETPARAM] = compat_mga_getparam,
40637 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
40638@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
40639 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40640 {
40641 unsigned int nr = DRM_IOCTL_NR(cmd);
40642- drm_ioctl_compat_t *fn = NULL;
40643 int ret;
40644
40645 if (nr < DRM_COMMAND_BASE)
40646 return drm_compat_ioctl(filp, cmd, arg);
40647
40648- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
40649- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40650-
40651- if (fn != NULL)
40652+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
40653+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40654 ret = (*fn) (filp, cmd, arg);
40655- else
40656+ } else
40657 ret = drm_ioctl(filp, cmd, arg);
40658
40659 return ret;
40660diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
40661index 1b071b8..de8601a 100644
40662--- a/drivers/gpu/drm/mga/mga_irq.c
40663+++ b/drivers/gpu/drm/mga/mga_irq.c
40664@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
40665 if (crtc != 0)
40666 return 0;
40667
40668- return atomic_read(&dev_priv->vbl_received);
40669+ return atomic_read_unchecked(&dev_priv->vbl_received);
40670 }
40671
40672
40673@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40674 /* VBLANK interrupt */
40675 if (status & MGA_VLINEPEN) {
40676 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
40677- atomic_inc(&dev_priv->vbl_received);
40678+ atomic_inc_unchecked(&dev_priv->vbl_received);
40679 drm_handle_vblank(dev, 0);
40680 handled = 1;
40681 }
40682@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40683 if ((prim_start & ~0x03) != (prim_end & ~0x03))
40684 MGA_WRITE(MGA_PRIMEND, prim_end);
40685
40686- atomic_inc(&dev_priv->last_fence_retired);
40687+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
40688 wake_up(&dev_priv->fence_queue);
40689 handled = 1;
40690 }
40691@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
40692 * using fences.
40693 */
40694 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
40695- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
40696+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
40697 - *sequence) <= (1 << 23)));
40698
40699 *sequence = cur_fence;
40700diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
40701index 7df6acc..84bbe52 100644
40702--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
40703+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
40704@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
40705 struct bit_table {
40706 const char id;
40707 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
40708-};
40709+} __no_const;
40710
40711 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
40712
40713diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
40714index 8ae36f2..1147a30 100644
40715--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
40716+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
40717@@ -121,7 +121,6 @@ struct nouveau_drm {
40718 struct drm_global_reference mem_global_ref;
40719 struct ttm_bo_global_ref bo_global_ref;
40720 struct ttm_bo_device bdev;
40721- atomic_t validate_sequence;
40722 int (*move)(struct nouveau_channel *,
40723 struct ttm_buffer_object *,
40724 struct ttm_mem_reg *, struct ttm_mem_reg *);
40725diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40726index 462679a..88e32a7 100644
40727--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40728+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40729@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
40730 unsigned long arg)
40731 {
40732 unsigned int nr = DRM_IOCTL_NR(cmd);
40733- drm_ioctl_compat_t *fn = NULL;
40734+ drm_ioctl_compat_t fn = NULL;
40735 int ret;
40736
40737 if (nr < DRM_COMMAND_BASE)
40738diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40739index 3d1cfcb..0542700 100644
40740--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
40741+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40742@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40743 }
40744
40745 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
40746- nouveau_vram_manager_init,
40747- nouveau_vram_manager_fini,
40748- nouveau_vram_manager_new,
40749- nouveau_vram_manager_del,
40750- nouveau_vram_manager_debug
40751+ .init = nouveau_vram_manager_init,
40752+ .takedown = nouveau_vram_manager_fini,
40753+ .get_node = nouveau_vram_manager_new,
40754+ .put_node = nouveau_vram_manager_del,
40755+ .debug = nouveau_vram_manager_debug
40756 };
40757
40758 static int
40759@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40760 }
40761
40762 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
40763- nouveau_gart_manager_init,
40764- nouveau_gart_manager_fini,
40765- nouveau_gart_manager_new,
40766- nouveau_gart_manager_del,
40767- nouveau_gart_manager_debug
40768+ .init = nouveau_gart_manager_init,
40769+ .takedown = nouveau_gart_manager_fini,
40770+ .get_node = nouveau_gart_manager_new,
40771+ .put_node = nouveau_gart_manager_del,
40772+ .debug = nouveau_gart_manager_debug
40773 };
40774
40775 /*XXX*/
40776@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40777 }
40778
40779 const struct ttm_mem_type_manager_func nv04_gart_manager = {
40780- nv04_gart_manager_init,
40781- nv04_gart_manager_fini,
40782- nv04_gart_manager_new,
40783- nv04_gart_manager_del,
40784- nv04_gart_manager_debug
40785+ .init = nv04_gart_manager_init,
40786+ .takedown = nv04_gart_manager_fini,
40787+ .get_node = nv04_gart_manager_new,
40788+ .put_node = nv04_gart_manager_del,
40789+ .debug = nv04_gart_manager_debug
40790 };
40791
40792 int
40793diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
40794index c7592ec..dd45ebc 100644
40795--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
40796+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
40797@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
40798 * locking inversion with the driver load path. And the access here is
40799 * completely racy anyway. So don't bother with locking for now.
40800 */
40801- return dev->open_count == 0;
40802+ return local_read(&dev->open_count) == 0;
40803 }
40804
40805 static const struct vga_switcheroo_client_ops
40806diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
40807index 9782364..89bd954 100644
40808--- a/drivers/gpu/drm/qxl/qxl_cmd.c
40809+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
40810@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
40811 int ret;
40812
40813 mutex_lock(&qdev->async_io_mutex);
40814- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40815+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40816 if (qdev->last_sent_io_cmd > irq_num) {
40817 if (intr)
40818 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40819- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40820+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40821 else
40822 ret = wait_event_timeout(qdev->io_cmd_event,
40823- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40824+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40825 /* 0 is timeout, just bail the "hw" has gone away */
40826 if (ret <= 0)
40827 goto out;
40828- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40829+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40830 }
40831 outb(val, addr);
40832 qdev->last_sent_io_cmd = irq_num + 1;
40833 if (intr)
40834 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40835- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40836+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40837 else
40838 ret = wait_event_timeout(qdev->io_cmd_event,
40839- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40840+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40841 out:
40842 if (ret > 0)
40843 ret = 0;
40844diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
40845index 6911b8c..89d6867 100644
40846--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
40847+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
40848@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
40849 struct drm_info_node *node = (struct drm_info_node *) m->private;
40850 struct qxl_device *qdev = node->minor->dev->dev_private;
40851
40852- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
40853- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
40854- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
40855- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
40856+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
40857+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
40858+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
40859+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
40860 seq_printf(m, "%d\n", qdev->irq_received_error);
40861 return 0;
40862 }
40863diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
40864index 7c6cafe..460f542 100644
40865--- a/drivers/gpu/drm/qxl/qxl_drv.h
40866+++ b/drivers/gpu/drm/qxl/qxl_drv.h
40867@@ -290,10 +290,10 @@ struct qxl_device {
40868 unsigned int last_sent_io_cmd;
40869
40870 /* interrupt handling */
40871- atomic_t irq_received;
40872- atomic_t irq_received_display;
40873- atomic_t irq_received_cursor;
40874- atomic_t irq_received_io_cmd;
40875+ atomic_unchecked_t irq_received;
40876+ atomic_unchecked_t irq_received_display;
40877+ atomic_unchecked_t irq_received_cursor;
40878+ atomic_unchecked_t irq_received_io_cmd;
40879 unsigned irq_received_error;
40880 wait_queue_head_t display_event;
40881 wait_queue_head_t cursor_event;
40882diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
40883index b110883..dd06418 100644
40884--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
40885+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
40886@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40887
40888 /* TODO copy slow path code from i915 */
40889 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
40890- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
40891+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
40892
40893 {
40894 struct qxl_drawable *draw = fb_cmd;
40895@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40896 struct drm_qxl_reloc reloc;
40897
40898 if (copy_from_user(&reloc,
40899- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
40900+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
40901 sizeof(reloc))) {
40902 ret = -EFAULT;
40903 goto out_free_bos;
40904@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
40905
40906 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
40907
40908- struct drm_qxl_command *commands =
40909- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
40910+ struct drm_qxl_command __user *commands =
40911+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
40912
40913- if (copy_from_user(&user_cmd, &commands[cmd_num],
40914+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
40915 sizeof(user_cmd)))
40916 return -EFAULT;
40917
40918diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
40919index 0bf1e20..42a7310 100644
40920--- a/drivers/gpu/drm/qxl/qxl_irq.c
40921+++ b/drivers/gpu/drm/qxl/qxl_irq.c
40922@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
40923 if (!pending)
40924 return IRQ_NONE;
40925
40926- atomic_inc(&qdev->irq_received);
40927+ atomic_inc_unchecked(&qdev->irq_received);
40928
40929 if (pending & QXL_INTERRUPT_DISPLAY) {
40930- atomic_inc(&qdev->irq_received_display);
40931+ atomic_inc_unchecked(&qdev->irq_received_display);
40932 wake_up_all(&qdev->display_event);
40933 qxl_queue_garbage_collect(qdev, false);
40934 }
40935 if (pending & QXL_INTERRUPT_CURSOR) {
40936- atomic_inc(&qdev->irq_received_cursor);
40937+ atomic_inc_unchecked(&qdev->irq_received_cursor);
40938 wake_up_all(&qdev->cursor_event);
40939 }
40940 if (pending & QXL_INTERRUPT_IO_CMD) {
40941- atomic_inc(&qdev->irq_received_io_cmd);
40942+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
40943 wake_up_all(&qdev->io_cmd_event);
40944 }
40945 if (pending & QXL_INTERRUPT_ERROR) {
40946@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
40947 init_waitqueue_head(&qdev->io_cmd_event);
40948 INIT_WORK(&qdev->client_monitors_config_work,
40949 qxl_client_monitors_config_work_func);
40950- atomic_set(&qdev->irq_received, 0);
40951- atomic_set(&qdev->irq_received_display, 0);
40952- atomic_set(&qdev->irq_received_cursor, 0);
40953- atomic_set(&qdev->irq_received_io_cmd, 0);
40954+ atomic_set_unchecked(&qdev->irq_received, 0);
40955+ atomic_set_unchecked(&qdev->irq_received_display, 0);
40956+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
40957+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
40958 qdev->irq_received_error = 0;
40959 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
40960 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
40961diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
40962index 0cbc4c9..0e46686 100644
40963--- a/drivers/gpu/drm/qxl/qxl_ttm.c
40964+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
40965@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
40966 }
40967 }
40968
40969-static struct vm_operations_struct qxl_ttm_vm_ops;
40970+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
40971 static const struct vm_operations_struct *ttm_vm_ops;
40972
40973 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40974@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
40975 return r;
40976 if (unlikely(ttm_vm_ops == NULL)) {
40977 ttm_vm_ops = vma->vm_ops;
40978+ pax_open_kernel();
40979 qxl_ttm_vm_ops = *ttm_vm_ops;
40980 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
40981+ pax_close_kernel();
40982 }
40983 vma->vm_ops = &qxl_ttm_vm_ops;
40984 return 0;
40985@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
40986 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
40987 {
40988 #if defined(CONFIG_DEBUG_FS)
40989- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
40990- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
40991- unsigned i;
40992+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
40993+ {
40994+ .name = "qxl_mem_mm",
40995+ .show = &qxl_mm_dump_table,
40996+ },
40997+ {
40998+ .name = "qxl_surf_mm",
40999+ .show = &qxl_mm_dump_table,
41000+ }
41001+ };
41002
41003- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41004- if (i == 0)
41005- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41006- else
41007- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41008- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41009- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41010- qxl_mem_types_list[i].driver_features = 0;
41011- if (i == 0)
41012- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41013- else
41014- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41015+ pax_open_kernel();
41016+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41017+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41018+ pax_close_kernel();
41019
41020- }
41021- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41022+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41023 #else
41024 return 0;
41025 #endif
41026diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41027index 2c45ac9..5d740f8 100644
41028--- a/drivers/gpu/drm/r128/r128_cce.c
41029+++ b/drivers/gpu/drm/r128/r128_cce.c
41030@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41031
41032 /* GH: Simple idle check.
41033 */
41034- atomic_set(&dev_priv->idle_count, 0);
41035+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41036
41037 /* We don't support anything other than bus-mastering ring mode,
41038 * but the ring can be in either AGP or PCI space for the ring
41039diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41040index 723e5d6..102dbaf 100644
41041--- a/drivers/gpu/drm/r128/r128_drv.h
41042+++ b/drivers/gpu/drm/r128/r128_drv.h
41043@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41044 int is_pci;
41045 unsigned long cce_buffers_offset;
41046
41047- atomic_t idle_count;
41048+ atomic_unchecked_t idle_count;
41049
41050 int page_flipping;
41051 int current_page;
41052 u32 crtc_offset;
41053 u32 crtc_offset_cntl;
41054
41055- atomic_t vbl_received;
41056+ atomic_unchecked_t vbl_received;
41057
41058 u32 color_fmt;
41059 unsigned int front_offset;
41060diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41061index 663f38c..c689495 100644
41062--- a/drivers/gpu/drm/r128/r128_ioc32.c
41063+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41064@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41065 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41066 }
41067
41068-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41069+drm_ioctl_compat_t r128_compat_ioctls[] = {
41070 [DRM_R128_INIT] = compat_r128_init,
41071 [DRM_R128_DEPTH] = compat_r128_depth,
41072 [DRM_R128_STIPPLE] = compat_r128_stipple,
41073@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41074 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41075 {
41076 unsigned int nr = DRM_IOCTL_NR(cmd);
41077- drm_ioctl_compat_t *fn = NULL;
41078 int ret;
41079
41080 if (nr < DRM_COMMAND_BASE)
41081 return drm_compat_ioctl(filp, cmd, arg);
41082
41083- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41084- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41085-
41086- if (fn != NULL)
41087+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
41088+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41089 ret = (*fn) (filp, cmd, arg);
41090- else
41091+ } else
41092 ret = drm_ioctl(filp, cmd, arg);
41093
41094 return ret;
41095diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41096index c2ae496..30b5993 100644
41097--- a/drivers/gpu/drm/r128/r128_irq.c
41098+++ b/drivers/gpu/drm/r128/r128_irq.c
41099@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41100 if (crtc != 0)
41101 return 0;
41102
41103- return atomic_read(&dev_priv->vbl_received);
41104+ return atomic_read_unchecked(&dev_priv->vbl_received);
41105 }
41106
41107 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41108@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41109 /* VBLANK interrupt */
41110 if (status & R128_CRTC_VBLANK_INT) {
41111 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41112- atomic_inc(&dev_priv->vbl_received);
41113+ atomic_inc_unchecked(&dev_priv->vbl_received);
41114 drm_handle_vblank(dev, 0);
41115 return IRQ_HANDLED;
41116 }
41117diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41118index 8fd2d9f..18c9660 100644
41119--- a/drivers/gpu/drm/r128/r128_state.c
41120+++ b/drivers/gpu/drm/r128/r128_state.c
41121@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41122
41123 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41124 {
41125- if (atomic_read(&dev_priv->idle_count) == 0)
41126+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41127 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41128 else
41129- atomic_set(&dev_priv->idle_count, 0);
41130+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41131 }
41132
41133 #endif
41134diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41135index b928c17..e5d9400 100644
41136--- a/drivers/gpu/drm/radeon/mkregtable.c
41137+++ b/drivers/gpu/drm/radeon/mkregtable.c
41138@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41139 regex_t mask_rex;
41140 regmatch_t match[4];
41141 char buf[1024];
41142- size_t end;
41143+ long end;
41144 int len;
41145 int done = 0;
41146 int r;
41147 unsigned o;
41148 struct offset *offset;
41149 char last_reg_s[10];
41150- int last_reg;
41151+ unsigned long last_reg;
41152
41153 if (regcomp
41154 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41155diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41156index bd7519f..e1c2cd95 100644
41157--- a/drivers/gpu/drm/radeon/radeon_device.c
41158+++ b/drivers/gpu/drm/radeon/radeon_device.c
41159@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41160 * locking inversion with the driver load path. And the access here is
41161 * completely racy anyway. So don't bother with locking for now.
41162 */
41163- return dev->open_count == 0;
41164+ return local_read(&dev->open_count) == 0;
41165 }
41166
41167 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41168diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41169index 46bd393..6ae4719 100644
41170--- a/drivers/gpu/drm/radeon/radeon_drv.h
41171+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41172@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41173
41174 /* SW interrupt */
41175 wait_queue_head_t swi_queue;
41176- atomic_t swi_emitted;
41177+ atomic_unchecked_t swi_emitted;
41178 int vblank_crtc;
41179 uint32_t irq_enable_reg;
41180 uint32_t r500_disp_irq_reg;
41181diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41182index 0b98ea1..0881827 100644
41183--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41184+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41185@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41186 request = compat_alloc_user_space(sizeof(*request));
41187 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41188 || __put_user(req32.param, &request->param)
41189- || __put_user((void __user *)(unsigned long)req32.value,
41190+ || __put_user((unsigned long)req32.value,
41191 &request->value))
41192 return -EFAULT;
41193
41194@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41195 #define compat_radeon_cp_setparam NULL
41196 #endif /* X86_64 || IA64 */
41197
41198-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41199+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41200 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41201 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41202 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41203@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41204 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41205 {
41206 unsigned int nr = DRM_IOCTL_NR(cmd);
41207- drm_ioctl_compat_t *fn = NULL;
41208 int ret;
41209
41210 if (nr < DRM_COMMAND_BASE)
41211 return drm_compat_ioctl(filp, cmd, arg);
41212
41213- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
41214- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41215-
41216- if (fn != NULL)
41217+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
41218+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41219 ret = (*fn) (filp, cmd, arg);
41220- else
41221+ } else
41222 ret = drm_ioctl(filp, cmd, arg);
41223
41224 return ret;
41225diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41226index 244b19b..c19226d 100644
41227--- a/drivers/gpu/drm/radeon/radeon_irq.c
41228+++ b/drivers/gpu/drm/radeon/radeon_irq.c
41229@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
41230 unsigned int ret;
41231 RING_LOCALS;
41232
41233- atomic_inc(&dev_priv->swi_emitted);
41234- ret = atomic_read(&dev_priv->swi_emitted);
41235+ atomic_inc_unchecked(&dev_priv->swi_emitted);
41236+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
41237
41238 BEGIN_RING(4);
41239 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
41240@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
41241 drm_radeon_private_t *dev_priv =
41242 (drm_radeon_private_t *) dev->dev_private;
41243
41244- atomic_set(&dev_priv->swi_emitted, 0);
41245+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
41246 init_waitqueue_head(&dev_priv->swi_queue);
41247
41248 dev->max_vblank_count = 0x001fffff;
41249diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
41250index 15aee72..cda326e 100644
41251--- a/drivers/gpu/drm/radeon/radeon_state.c
41252+++ b/drivers/gpu/drm/radeon/radeon_state.c
41253@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
41254 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
41255 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
41256
41257- if (copy_from_user(&depth_boxes, clear->depth_boxes,
41258+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
41259 sarea_priv->nbox * sizeof(depth_boxes[0])))
41260 return -EFAULT;
41261
41262@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
41263 {
41264 drm_radeon_private_t *dev_priv = dev->dev_private;
41265 drm_radeon_getparam_t *param = data;
41266- int value;
41267+ int value = 0;
41268
41269 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
41270
41271diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
41272index d02aa1d..ca19e2c 100644
41273--- a/drivers/gpu/drm/radeon/radeon_ttm.c
41274+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
41275@@ -959,7 +959,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
41276 man->size = size >> PAGE_SHIFT;
41277 }
41278
41279-static struct vm_operations_struct radeon_ttm_vm_ops;
41280+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
41281 static const struct vm_operations_struct *ttm_vm_ops = NULL;
41282
41283 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41284@@ -1000,8 +1000,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
41285 }
41286 if (unlikely(ttm_vm_ops == NULL)) {
41287 ttm_vm_ops = vma->vm_ops;
41288+ pax_open_kernel();
41289 radeon_ttm_vm_ops = *ttm_vm_ops;
41290 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
41291+ pax_close_kernel();
41292 }
41293 vma->vm_ops = &radeon_ttm_vm_ops;
41294 return 0;
41295diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41296index 978993f..e36e50e 100644
41297--- a/drivers/gpu/drm/tegra/dc.c
41298+++ b/drivers/gpu/drm/tegra/dc.c
41299@@ -1416,7 +1416,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41300 }
41301
41302 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41303- dc->debugfs_files[i].data = dc;
41304+ *(void **)&dc->debugfs_files[i].data = dc;
41305
41306 err = drm_debugfs_create_files(dc->debugfs_files,
41307 ARRAY_SIZE(debugfs_files),
41308diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
41309index 33f67fd..55ee9761 100644
41310--- a/drivers/gpu/drm/tegra/dsi.c
41311+++ b/drivers/gpu/drm/tegra/dsi.c
41312@@ -39,7 +39,7 @@ struct tegra_dsi {
41313 struct clk *clk_lp;
41314 struct clk *clk;
41315
41316- struct drm_info_list *debugfs_files;
41317+ drm_info_list_no_const *debugfs_files;
41318 struct drm_minor *minor;
41319 struct dentry *debugfs;
41320
41321diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41322index ffe2654..03c7b1c 100644
41323--- a/drivers/gpu/drm/tegra/hdmi.c
41324+++ b/drivers/gpu/drm/tegra/hdmi.c
41325@@ -60,7 +60,7 @@ struct tegra_hdmi {
41326 bool stereo;
41327 bool dvi;
41328
41329- struct drm_info_list *debugfs_files;
41330+ drm_info_list_no_const *debugfs_files;
41331 struct drm_minor *minor;
41332 struct dentry *debugfs;
41333 };
41334diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41335index aa0bd054..aea6a01 100644
41336--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41337+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41338@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41339 }
41340
41341 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41342- ttm_bo_man_init,
41343- ttm_bo_man_takedown,
41344- ttm_bo_man_get_node,
41345- ttm_bo_man_put_node,
41346- ttm_bo_man_debug
41347+ .init = ttm_bo_man_init,
41348+ .takedown = ttm_bo_man_takedown,
41349+ .get_node = ttm_bo_man_get_node,
41350+ .put_node = ttm_bo_man_put_node,
41351+ .debug = ttm_bo_man_debug
41352 };
41353 EXPORT_SYMBOL(ttm_bo_manager_func);
41354diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41355index a1803fb..c53f6b0 100644
41356--- a/drivers/gpu/drm/ttm/ttm_memory.c
41357+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41358@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41359 zone->glob = glob;
41360 glob->zone_kernel = zone;
41361 ret = kobject_init_and_add(
41362- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41363+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41364 if (unlikely(ret != 0)) {
41365 kobject_put(&zone->kobj);
41366 return ret;
41367@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41368 zone->glob = glob;
41369 glob->zone_dma32 = zone;
41370 ret = kobject_init_and_add(
41371- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41372+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41373 if (unlikely(ret != 0)) {
41374 kobject_put(&zone->kobj);
41375 return ret;
41376diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41377index 025c429..314062f 100644
41378--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41379+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41380@@ -54,7 +54,7 @@
41381
41382 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41383 #define SMALL_ALLOCATION 16
41384-#define FREE_ALL_PAGES (~0U)
41385+#define FREE_ALL_PAGES (~0UL)
41386 /* times are in msecs */
41387 #define PAGE_FREE_INTERVAL 1000
41388
41389@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
41390 * @free_all: If set to true will free all pages in pool
41391 * @use_static: Safe to use static buffer
41392 **/
41393-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
41394+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
41395 bool use_static)
41396 {
41397 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41398 unsigned long irq_flags;
41399 struct page *p;
41400 struct page **pages_to_free;
41401- unsigned freed_pages = 0,
41402- npages_to_free = nr_free;
41403+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41404
41405 if (NUM_PAGES_TO_ALLOC < nr_free)
41406 npages_to_free = NUM_PAGES_TO_ALLOC;
41407@@ -371,7 +370,8 @@ restart:
41408 __list_del(&p->lru, &pool->list);
41409
41410 ttm_pool_update_free_locked(pool, freed_pages);
41411- nr_free -= freed_pages;
41412+ if (likely(nr_free != FREE_ALL_PAGES))
41413+ nr_free -= freed_pages;
41414 }
41415
41416 spin_unlock_irqrestore(&pool->lock, irq_flags);
41417@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41418 unsigned i;
41419 unsigned pool_offset;
41420 struct ttm_page_pool *pool;
41421- int shrink_pages = sc->nr_to_scan;
41422+ unsigned long shrink_pages = sc->nr_to_scan;
41423 unsigned long freed = 0;
41424
41425 if (!mutex_trylock(&lock))
41426@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41427 pool_offset = ++start_pool % NUM_POOLS;
41428 /* select start pool in round robin fashion */
41429 for (i = 0; i < NUM_POOLS; ++i) {
41430- unsigned nr_free = shrink_pages;
41431+ unsigned long nr_free = shrink_pages;
41432 if (shrink_pages == 0)
41433 break;
41434 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
41435@@ -673,7 +673,7 @@ out:
41436 }
41437
41438 /* Put all pages in pages list to correct pool to wait for reuse */
41439-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
41440+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
41441 enum ttm_caching_state cstate)
41442 {
41443 unsigned long irq_flags;
41444@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
41445 struct list_head plist;
41446 struct page *p = NULL;
41447 gfp_t gfp_flags = GFP_USER;
41448- unsigned count;
41449+ unsigned long count;
41450 int r;
41451
41452 /* set zero flag for page allocation if required */
41453diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41454index 01e1d27..aaa018a 100644
41455--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41456+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41457@@ -56,7 +56,7 @@
41458
41459 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41460 #define SMALL_ALLOCATION 4
41461-#define FREE_ALL_PAGES (~0U)
41462+#define FREE_ALL_PAGES (~0UL)
41463 /* times are in msecs */
41464 #define IS_UNDEFINED (0)
41465 #define IS_WC (1<<1)
41466@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
41467 * @nr_free: If set to true will free all pages in pool
41468 * @use_static: Safe to use static buffer
41469 **/
41470-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41471+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
41472 bool use_static)
41473 {
41474 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41475@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41476 struct dma_page *dma_p, *tmp;
41477 struct page **pages_to_free;
41478 struct list_head d_pages;
41479- unsigned freed_pages = 0,
41480- npages_to_free = nr_free;
41481+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41482
41483 if (NUM_PAGES_TO_ALLOC < nr_free)
41484 npages_to_free = NUM_PAGES_TO_ALLOC;
41485@@ -499,7 +498,8 @@ restart:
41486 /* remove range of pages from the pool */
41487 if (freed_pages) {
41488 ttm_pool_update_free_locked(pool, freed_pages);
41489- nr_free -= freed_pages;
41490+ if (likely(nr_free != FREE_ALL_PAGES))
41491+ nr_free -= freed_pages;
41492 }
41493
41494 spin_unlock_irqrestore(&pool->lock, irq_flags);
41495@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
41496 struct dma_page *d_page, *next;
41497 enum pool_type type;
41498 bool is_cached = false;
41499- unsigned count = 0, i, npages = 0;
41500+ unsigned long count = 0, i, npages = 0;
41501 unsigned long irq_flags;
41502
41503 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
41504@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41505 static unsigned start_pool;
41506 unsigned idx = 0;
41507 unsigned pool_offset;
41508- unsigned shrink_pages = sc->nr_to_scan;
41509+ unsigned long shrink_pages = sc->nr_to_scan;
41510 struct device_pools *p;
41511 unsigned long freed = 0;
41512
41513@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41514 goto out;
41515 pool_offset = ++start_pool % _manager->npools;
41516 list_for_each_entry(p, &_manager->pools, pools) {
41517- unsigned nr_free;
41518+ unsigned long nr_free;
41519
41520 if (!p->dev)
41521 continue;
41522@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41523 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
41524 freed += nr_free - shrink_pages;
41525
41526- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
41527+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
41528 p->pool->dev_name, p->pool->name, current->pid,
41529 nr_free, shrink_pages);
41530 }
41531diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
41532index 8cbcb45..a4d9cf7 100644
41533--- a/drivers/gpu/drm/udl/udl_fb.c
41534+++ b/drivers/gpu/drm/udl/udl_fb.c
41535@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
41536 fb_deferred_io_cleanup(info);
41537 kfree(info->fbdefio);
41538 info->fbdefio = NULL;
41539- info->fbops->fb_mmap = udl_fb_mmap;
41540 }
41541
41542 pr_warn("released /dev/fb%d user=%d count=%d\n",
41543diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
41544index ef8c500..01030c8 100644
41545--- a/drivers/gpu/drm/via/via_drv.h
41546+++ b/drivers/gpu/drm/via/via_drv.h
41547@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
41548 typedef uint32_t maskarray_t[5];
41549
41550 typedef struct drm_via_irq {
41551- atomic_t irq_received;
41552+ atomic_unchecked_t irq_received;
41553 uint32_t pending_mask;
41554 uint32_t enable_mask;
41555 wait_queue_head_t irq_queue;
41556@@ -77,7 +77,7 @@ typedef struct drm_via_private {
41557 struct timeval last_vblank;
41558 int last_vblank_valid;
41559 unsigned usec_per_vblank;
41560- atomic_t vbl_received;
41561+ atomic_unchecked_t vbl_received;
41562 drm_via_state_t hc_state;
41563 char pci_buf[VIA_PCI_BUF_SIZE];
41564 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
41565diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
41566index 1319433..a993b0c 100644
41567--- a/drivers/gpu/drm/via/via_irq.c
41568+++ b/drivers/gpu/drm/via/via_irq.c
41569@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
41570 if (crtc != 0)
41571 return 0;
41572
41573- return atomic_read(&dev_priv->vbl_received);
41574+ return atomic_read_unchecked(&dev_priv->vbl_received);
41575 }
41576
41577 irqreturn_t via_driver_irq_handler(int irq, void *arg)
41578@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41579
41580 status = VIA_READ(VIA_REG_INTERRUPT);
41581 if (status & VIA_IRQ_VBLANK_PENDING) {
41582- atomic_inc(&dev_priv->vbl_received);
41583- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
41584+ atomic_inc_unchecked(&dev_priv->vbl_received);
41585+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
41586 do_gettimeofday(&cur_vblank);
41587 if (dev_priv->last_vblank_valid) {
41588 dev_priv->usec_per_vblank =
41589@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41590 dev_priv->last_vblank = cur_vblank;
41591 dev_priv->last_vblank_valid = 1;
41592 }
41593- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
41594+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
41595 DRM_DEBUG("US per vblank is: %u\n",
41596 dev_priv->usec_per_vblank);
41597 }
41598@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41599
41600 for (i = 0; i < dev_priv->num_irqs; ++i) {
41601 if (status & cur_irq->pending_mask) {
41602- atomic_inc(&cur_irq->irq_received);
41603+ atomic_inc_unchecked(&cur_irq->irq_received);
41604 wake_up(&cur_irq->irq_queue);
41605 handled = 1;
41606 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
41607@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
41608 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41609 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
41610 masks[irq][4]));
41611- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
41612+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
41613 } else {
41614 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41615 (((cur_irq_sequence =
41616- atomic_read(&cur_irq->irq_received)) -
41617+ atomic_read_unchecked(&cur_irq->irq_received)) -
41618 *sequence) <= (1 << 23)));
41619 }
41620 *sequence = cur_irq_sequence;
41621@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
41622 }
41623
41624 for (i = 0; i < dev_priv->num_irqs; ++i) {
41625- atomic_set(&cur_irq->irq_received, 0);
41626+ atomic_set_unchecked(&cur_irq->irq_received, 0);
41627 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
41628 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
41629 init_waitqueue_head(&cur_irq->irq_queue);
41630@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
41631 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
41632 case VIA_IRQ_RELATIVE:
41633 irqwait->request.sequence +=
41634- atomic_read(&cur_irq->irq_received);
41635+ atomic_read_unchecked(&cur_irq->irq_received);
41636 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
41637 case VIA_IRQ_ABSOLUTE:
41638 break;
41639diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41640index d26a6da..5fa41ed 100644
41641--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41642+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41643@@ -447,7 +447,7 @@ struct vmw_private {
41644 * Fencing and IRQs.
41645 */
41646
41647- atomic_t marker_seq;
41648+ atomic_unchecked_t marker_seq;
41649 wait_queue_head_t fence_queue;
41650 wait_queue_head_t fifo_queue;
41651 spinlock_t waiter_lock;
41652diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41653index 39f2b03..d1b0a64 100644
41654--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41655+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41656@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
41657 (unsigned int) min,
41658 (unsigned int) fifo->capabilities);
41659
41660- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41661+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41662 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
41663 vmw_marker_queue_init(&fifo->marker_queue);
41664 return vmw_fifo_send_fence(dev_priv, &dummy);
41665@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
41666 if (reserveable)
41667 iowrite32(bytes, fifo_mem +
41668 SVGA_FIFO_RESERVED);
41669- return fifo_mem + (next_cmd >> 2);
41670+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
41671 } else {
41672 need_bounce = true;
41673 }
41674@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41675
41676 fm = vmw_fifo_reserve(dev_priv, bytes);
41677 if (unlikely(fm == NULL)) {
41678- *seqno = atomic_read(&dev_priv->marker_seq);
41679+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41680 ret = -ENOMEM;
41681 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
41682 false, 3*HZ);
41683@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41684 }
41685
41686 do {
41687- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
41688+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
41689 } while (*seqno == 0);
41690
41691 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
41692diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41693index 170b61b..fec7348 100644
41694--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41695+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41696@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
41697 }
41698
41699 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
41700- vmw_gmrid_man_init,
41701- vmw_gmrid_man_takedown,
41702- vmw_gmrid_man_get_node,
41703- vmw_gmrid_man_put_node,
41704- vmw_gmrid_man_debug
41705+ .init = vmw_gmrid_man_init,
41706+ .takedown = vmw_gmrid_man_takedown,
41707+ .get_node = vmw_gmrid_man_get_node,
41708+ .put_node = vmw_gmrid_man_put_node,
41709+ .debug = vmw_gmrid_man_debug
41710 };
41711diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41712index 69c8ce2..cacb0ab 100644
41713--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41714+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41715@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
41716 int ret;
41717
41718 num_clips = arg->num_clips;
41719- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41720+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41721
41722 if (unlikely(num_clips == 0))
41723 return 0;
41724@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
41725 int ret;
41726
41727 num_clips = arg->num_clips;
41728- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41729+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41730
41731 if (unlikely(num_clips == 0))
41732 return 0;
41733diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41734index 9fe9827..0aa2fc0 100644
41735--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41736+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41737@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
41738 * emitted. Then the fence is stale and signaled.
41739 */
41740
41741- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
41742+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
41743 > VMW_FENCE_WRAP);
41744
41745 return ret;
41746@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
41747
41748 if (fifo_idle)
41749 down_read(&fifo_state->rwsem);
41750- signal_seq = atomic_read(&dev_priv->marker_seq);
41751+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
41752 ret = 0;
41753
41754 for (;;) {
41755diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41756index efd1ffd..0ae13ca 100644
41757--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41758+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41759@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
41760 while (!vmw_lag_lt(queue, us)) {
41761 spin_lock(&queue->lock);
41762 if (list_empty(&queue->head))
41763- seqno = atomic_read(&dev_priv->marker_seq);
41764+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41765 else {
41766 marker = list_first_entry(&queue->head,
41767 struct vmw_marker, head);
41768diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
41769index 37ac7b5..d52a5c9 100644
41770--- a/drivers/gpu/vga/vga_switcheroo.c
41771+++ b/drivers/gpu/vga/vga_switcheroo.c
41772@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
41773
41774 /* this version is for the case where the power switch is separate
41775 to the device being powered down. */
41776-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
41777+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
41778 {
41779 /* copy over all the bus versions */
41780 if (dev->bus && dev->bus->pm) {
41781@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
41782 return ret;
41783 }
41784
41785-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
41786+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
41787 {
41788 /* copy over all the bus versions */
41789 if (dev->bus && dev->bus->pm) {
41790diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
41791index 8b63879..a5a5e72 100644
41792--- a/drivers/hid/hid-core.c
41793+++ b/drivers/hid/hid-core.c
41794@@ -2508,7 +2508,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
41795
41796 int hid_add_device(struct hid_device *hdev)
41797 {
41798- static atomic_t id = ATOMIC_INIT(0);
41799+ static atomic_unchecked_t id = ATOMIC_INIT(0);
41800 int ret;
41801
41802 if (WARN_ON(hdev->status & HID_STAT_ADDED))
41803@@ -2551,7 +2551,7 @@ int hid_add_device(struct hid_device *hdev)
41804 /* XXX hack, any other cleaner solution after the driver core
41805 * is converted to allow more than 20 bytes as the device name? */
41806 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
41807- hdev->vendor, hdev->product, atomic_inc_return(&id));
41808+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
41809
41810 hid_debug_register(hdev, dev_name(&hdev->dev));
41811 ret = device_add(&hdev->dev);
41812diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
41813index 5bc6d80..e47b55a 100644
41814--- a/drivers/hid/hid-logitech-dj.c
41815+++ b/drivers/hid/hid-logitech-dj.c
41816@@ -853,6 +853,12 @@ static int logi_dj_dj_event(struct hid_device *hdev,
41817 * case we forward it to the correct hid device (via hid_input_report()
41818 * ) and return 1 so hid-core does not anything else with it.
41819 */
41820+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41821+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41822+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
41823+ __func__, dj_report->device_index);
41824+ return false;
41825+ }
41826
41827 if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41828 (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41829diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
41830index c13fb5b..55a3802 100644
41831--- a/drivers/hid/hid-wiimote-debug.c
41832+++ b/drivers/hid/hid-wiimote-debug.c
41833@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
41834 else if (size == 0)
41835 return -EIO;
41836
41837- if (copy_to_user(u, buf, size))
41838+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
41839 return -EFAULT;
41840
41841 *off += size;
41842diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
41843index 433f72a..2926005 100644
41844--- a/drivers/hv/channel.c
41845+++ b/drivers/hv/channel.c
41846@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
41847 unsigned long flags;
41848 int ret = 0;
41849
41850- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
41851- atomic_inc(&vmbus_connection.next_gpadl_handle);
41852+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
41853+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
41854
41855 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
41856 if (ret)
41857diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
41858index 3e4235c..877d0e5 100644
41859--- a/drivers/hv/hv.c
41860+++ b/drivers/hv/hv.c
41861@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
41862 u64 output_address = (output) ? virt_to_phys(output) : 0;
41863 u32 output_address_hi = output_address >> 32;
41864 u32 output_address_lo = output_address & 0xFFFFFFFF;
41865- void *hypercall_page = hv_context.hypercall_page;
41866+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
41867
41868 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
41869 "=a"(hv_status_lo) : "d" (control_hi),
41870@@ -156,7 +156,7 @@ int hv_init(void)
41871 /* See if the hypercall page is already set */
41872 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
41873
41874- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
41875+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
41876
41877 if (!virtaddr)
41878 goto cleanup;
41879diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
41880index b958ded..b2452bb 100644
41881--- a/drivers/hv/hv_balloon.c
41882+++ b/drivers/hv/hv_balloon.c
41883@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
41884
41885 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
41886 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
41887-static atomic_t trans_id = ATOMIC_INIT(0);
41888+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
41889
41890 static int dm_ring_size = (5 * PAGE_SIZE);
41891
41892@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
41893 pr_info("Memory hot add failed\n");
41894
41895 dm->state = DM_INITIALIZED;
41896- resp.hdr.trans_id = atomic_inc_return(&trans_id);
41897+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41898 vmbus_sendpacket(dm->dev->channel, &resp,
41899 sizeof(struct dm_hot_add_response),
41900 (unsigned long)NULL,
41901@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
41902 memset(&status, 0, sizeof(struct dm_status));
41903 status.hdr.type = DM_STATUS_REPORT;
41904 status.hdr.size = sizeof(struct dm_status);
41905- status.hdr.trans_id = atomic_inc_return(&trans_id);
41906+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41907
41908 /*
41909 * The host expects the guest to report free memory.
41910@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
41911 * send the status. This can happen if we were interrupted
41912 * after we picked our transaction ID.
41913 */
41914- if (status.hdr.trans_id != atomic_read(&trans_id))
41915+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
41916 return;
41917
41918 /*
41919@@ -1133,7 +1133,7 @@ static void balloon_up(struct work_struct *dummy)
41920 */
41921
41922 do {
41923- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
41924+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41925 ret = vmbus_sendpacket(dm_device.dev->channel,
41926 bl_resp,
41927 bl_resp->hdr.size,
41928@@ -1179,7 +1179,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
41929
41930 memset(&resp, 0, sizeof(struct dm_unballoon_response));
41931 resp.hdr.type = DM_UNBALLOON_RESPONSE;
41932- resp.hdr.trans_id = atomic_inc_return(&trans_id);
41933+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41934 resp.hdr.size = sizeof(struct dm_unballoon_response);
41935
41936 vmbus_sendpacket(dm_device.dev->channel, &resp,
41937@@ -1243,7 +1243,7 @@ static void version_resp(struct hv_dynmem_device *dm,
41938 memset(&version_req, 0, sizeof(struct dm_version_request));
41939 version_req.hdr.type = DM_VERSION_REQUEST;
41940 version_req.hdr.size = sizeof(struct dm_version_request);
41941- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
41942+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41943 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
41944 version_req.is_last_attempt = 1;
41945
41946@@ -1413,7 +1413,7 @@ static int balloon_probe(struct hv_device *dev,
41947 memset(&version_req, 0, sizeof(struct dm_version_request));
41948 version_req.hdr.type = DM_VERSION_REQUEST;
41949 version_req.hdr.size = sizeof(struct dm_version_request);
41950- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
41951+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41952 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
41953 version_req.is_last_attempt = 0;
41954
41955@@ -1444,7 +1444,7 @@ static int balloon_probe(struct hv_device *dev,
41956 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
41957 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
41958 cap_msg.hdr.size = sizeof(struct dm_capabilities);
41959- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
41960+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41961
41962 cap_msg.caps.cap_bits.balloon = 1;
41963 cap_msg.caps.cap_bits.hot_add = 1;
41964diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
41965index c386d8d..d6004c4 100644
41966--- a/drivers/hv/hyperv_vmbus.h
41967+++ b/drivers/hv/hyperv_vmbus.h
41968@@ -611,7 +611,7 @@ enum vmbus_connect_state {
41969 struct vmbus_connection {
41970 enum vmbus_connect_state conn_state;
41971
41972- atomic_t next_gpadl_handle;
41973+ atomic_unchecked_t next_gpadl_handle;
41974
41975 /*
41976 * Represents channel interrupts. Each bit position represents a
41977diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
41978index 4d6b269..2e23b86 100644
41979--- a/drivers/hv/vmbus_drv.c
41980+++ b/drivers/hv/vmbus_drv.c
41981@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
41982 {
41983 int ret = 0;
41984
41985- static atomic_t device_num = ATOMIC_INIT(0);
41986+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
41987
41988 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
41989- atomic_inc_return(&device_num));
41990+ atomic_inc_return_unchecked(&device_num));
41991
41992 child_device_obj->device.bus = &hv_bus;
41993 child_device_obj->device.parent = &hv_acpi_dev->dev;
41994diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
41995index 579bdf9..75118b5 100644
41996--- a/drivers/hwmon/acpi_power_meter.c
41997+++ b/drivers/hwmon/acpi_power_meter.c
41998@@ -116,7 +116,7 @@ struct sensor_template {
41999 struct device_attribute *devattr,
42000 const char *buf, size_t count);
42001 int index;
42002-};
42003+} __do_const;
42004
42005 /* Averaging interval */
42006 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42007@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42008 struct sensor_template *attrs)
42009 {
42010 struct device *dev = &resource->acpi_dev->dev;
42011- struct sensor_device_attribute *sensors =
42012+ sensor_device_attribute_no_const *sensors =
42013 &resource->sensors[resource->num_sensors];
42014 int res = 0;
42015
42016diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42017index 0af63da..05a183a 100644
42018--- a/drivers/hwmon/applesmc.c
42019+++ b/drivers/hwmon/applesmc.c
42020@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42021 {
42022 struct applesmc_node_group *grp;
42023 struct applesmc_dev_attr *node;
42024- struct attribute *attr;
42025+ attribute_no_const *attr;
42026 int ret, i;
42027
42028 for (grp = groups; grp->format; grp++) {
42029diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42030index cccef87..06ce8ec 100644
42031--- a/drivers/hwmon/asus_atk0110.c
42032+++ b/drivers/hwmon/asus_atk0110.c
42033@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42034 struct atk_sensor_data {
42035 struct list_head list;
42036 struct atk_data *data;
42037- struct device_attribute label_attr;
42038- struct device_attribute input_attr;
42039- struct device_attribute limit1_attr;
42040- struct device_attribute limit2_attr;
42041+ device_attribute_no_const label_attr;
42042+ device_attribute_no_const input_attr;
42043+ device_attribute_no_const limit1_attr;
42044+ device_attribute_no_const limit2_attr;
42045 char label_attr_name[ATTR_NAME_SIZE];
42046 char input_attr_name[ATTR_NAME_SIZE];
42047 char limit1_attr_name[ATTR_NAME_SIZE];
42048@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42049 static struct device_attribute atk_name_attr =
42050 __ATTR(name, 0444, atk_name_show, NULL);
42051
42052-static void atk_init_attribute(struct device_attribute *attr, char *name,
42053+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42054 sysfs_show_func show)
42055 {
42056 sysfs_attr_init(&attr->attr);
42057diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42058index 5b7fec8..05c957a 100644
42059--- a/drivers/hwmon/coretemp.c
42060+++ b/drivers/hwmon/coretemp.c
42061@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42062 return NOTIFY_OK;
42063 }
42064
42065-static struct notifier_block coretemp_cpu_notifier __refdata = {
42066+static struct notifier_block coretemp_cpu_notifier = {
42067 .notifier_call = coretemp_cpu_callback,
42068 };
42069
42070diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42071index 7a8a6fb..015c1fd 100644
42072--- a/drivers/hwmon/ibmaem.c
42073+++ b/drivers/hwmon/ibmaem.c
42074@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42075 struct aem_rw_sensor_template *rw)
42076 {
42077 struct device *dev = &data->pdev->dev;
42078- struct sensor_device_attribute *sensors = data->sensors;
42079+ sensor_device_attribute_no_const *sensors = data->sensors;
42080 int err;
42081
42082 /* Set up read-only sensors */
42083diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42084index 17ae2eb..21b71dd 100644
42085--- a/drivers/hwmon/iio_hwmon.c
42086+++ b/drivers/hwmon/iio_hwmon.c
42087@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42088 {
42089 struct device *dev = &pdev->dev;
42090 struct iio_hwmon_state *st;
42091- struct sensor_device_attribute *a;
42092+ sensor_device_attribute_no_const *a;
42093 int ret, i;
42094 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42095 enum iio_chan_type type;
42096diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42097index f3830db..9f4d6d5 100644
42098--- a/drivers/hwmon/nct6683.c
42099+++ b/drivers/hwmon/nct6683.c
42100@@ -397,11 +397,11 @@ static struct attribute_group *
42101 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42102 int repeat)
42103 {
42104- struct sensor_device_attribute_2 *a2;
42105- struct sensor_device_attribute *a;
42106+ sensor_device_attribute_2_no_const *a2;
42107+ sensor_device_attribute_no_const *a;
42108 struct sensor_device_template **t;
42109 struct sensor_device_attr_u *su;
42110- struct attribute_group *group;
42111+ attribute_group_no_const *group;
42112 struct attribute **attrs;
42113 int i, j, count;
42114
42115diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42116index 1be4117..88ae1e1 100644
42117--- a/drivers/hwmon/nct6775.c
42118+++ b/drivers/hwmon/nct6775.c
42119@@ -952,10 +952,10 @@ static struct attribute_group *
42120 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42121 int repeat)
42122 {
42123- struct attribute_group *group;
42124+ attribute_group_no_const *group;
42125 struct sensor_device_attr_u *su;
42126- struct sensor_device_attribute *a;
42127- struct sensor_device_attribute_2 *a2;
42128+ sensor_device_attribute_no_const *a;
42129+ sensor_device_attribute_2_no_const *a2;
42130 struct attribute **attrs;
42131 struct sensor_device_template **t;
42132 int i, count;
42133diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42134index f2e47c7..45d7941 100644
42135--- a/drivers/hwmon/pmbus/pmbus_core.c
42136+++ b/drivers/hwmon/pmbus/pmbus_core.c
42137@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42138 return 0;
42139 }
42140
42141-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42142+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42143 const char *name,
42144 umode_t mode,
42145 ssize_t (*show)(struct device *dev,
42146@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42147 dev_attr->store = store;
42148 }
42149
42150-static void pmbus_attr_init(struct sensor_device_attribute *a,
42151+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42152 const char *name,
42153 umode_t mode,
42154 ssize_t (*show)(struct device *dev,
42155@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42156 u16 reg, u8 mask)
42157 {
42158 struct pmbus_boolean *boolean;
42159- struct sensor_device_attribute *a;
42160+ sensor_device_attribute_no_const *a;
42161
42162 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42163 if (!boolean)
42164@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42165 bool update, bool readonly)
42166 {
42167 struct pmbus_sensor *sensor;
42168- struct device_attribute *a;
42169+ device_attribute_no_const *a;
42170
42171 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42172 if (!sensor)
42173@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42174 const char *lstring, int index)
42175 {
42176 struct pmbus_label *label;
42177- struct device_attribute *a;
42178+ device_attribute_no_const *a;
42179
42180 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42181 if (!label)
42182diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42183index d4f0935..7420593 100644
42184--- a/drivers/hwmon/sht15.c
42185+++ b/drivers/hwmon/sht15.c
42186@@ -169,7 +169,7 @@ struct sht15_data {
42187 int supply_uv;
42188 bool supply_uv_valid;
42189 struct work_struct update_supply_work;
42190- atomic_t interrupt_handled;
42191+ atomic_unchecked_t interrupt_handled;
42192 };
42193
42194 /**
42195@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42196 ret = gpio_direction_input(data->pdata->gpio_data);
42197 if (ret)
42198 return ret;
42199- atomic_set(&data->interrupt_handled, 0);
42200+ atomic_set_unchecked(&data->interrupt_handled, 0);
42201
42202 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42203 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42204 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42205 /* Only relevant if the interrupt hasn't occurred. */
42206- if (!atomic_read(&data->interrupt_handled))
42207+ if (!atomic_read_unchecked(&data->interrupt_handled))
42208 schedule_work(&data->read_work);
42209 }
42210 ret = wait_event_timeout(data->wait_queue,
42211@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42212
42213 /* First disable the interrupt */
42214 disable_irq_nosync(irq);
42215- atomic_inc(&data->interrupt_handled);
42216+ atomic_inc_unchecked(&data->interrupt_handled);
42217 /* Then schedule a reading work struct */
42218 if (data->state != SHT15_READING_NOTHING)
42219 schedule_work(&data->read_work);
42220@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42221 * If not, then start the interrupt again - care here as could
42222 * have gone low in meantime so verify it hasn't!
42223 */
42224- atomic_set(&data->interrupt_handled, 0);
42225+ atomic_set_unchecked(&data->interrupt_handled, 0);
42226 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42227 /* If still not occurred or another handler was scheduled */
42228 if (gpio_get_value(data->pdata->gpio_data)
42229- || atomic_read(&data->interrupt_handled))
42230+ || atomic_read_unchecked(&data->interrupt_handled))
42231 return;
42232 }
42233
42234diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42235index ac91c07..8e69663 100644
42236--- a/drivers/hwmon/via-cputemp.c
42237+++ b/drivers/hwmon/via-cputemp.c
42238@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42239 return NOTIFY_OK;
42240 }
42241
42242-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42243+static struct notifier_block via_cputemp_cpu_notifier = {
42244 .notifier_call = via_cputemp_cpu_callback,
42245 };
42246
42247diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42248index 65e3240..e6c511d 100644
42249--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42250+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42251@@ -39,7 +39,7 @@
42252 extern struct i2c_adapter amd756_smbus;
42253
42254 static struct i2c_adapter *s4882_adapter;
42255-static struct i2c_algorithm *s4882_algo;
42256+static i2c_algorithm_no_const *s4882_algo;
42257
42258 /* Wrapper access functions for multiplexed SMBus */
42259 static DEFINE_MUTEX(amd756_lock);
42260diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42261index b19a310..d6eece0 100644
42262--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42263+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42264@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42265 /* usb layer */
42266
42267 /* Send command to device, and get response. */
42268-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42269+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42270 {
42271 int ret = 0;
42272 int actual;
42273diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42274index 88eda09..cf40434 100644
42275--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42276+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42277@@ -37,7 +37,7 @@
42278 extern struct i2c_adapter *nforce2_smbus;
42279
42280 static struct i2c_adapter *s4985_adapter;
42281-static struct i2c_algorithm *s4985_algo;
42282+static i2c_algorithm_no_const *s4985_algo;
42283
42284 /* Wrapper access functions for multiplexed SMBus */
42285 static DEFINE_MUTEX(nforce2_lock);
42286diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42287index 71c7a39..71dd3e0 100644
42288--- a/drivers/i2c/i2c-dev.c
42289+++ b/drivers/i2c/i2c-dev.c
42290@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42291 break;
42292 }
42293
42294- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
42295+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
42296 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
42297 if (IS_ERR(rdwr_pa[i].buf)) {
42298 res = PTR_ERR(rdwr_pa[i].buf);
42299diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
42300index 0b510ba..4fbb5085 100644
42301--- a/drivers/ide/ide-cd.c
42302+++ b/drivers/ide/ide-cd.c
42303@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
42304 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
42305 if ((unsigned long)buf & alignment
42306 || blk_rq_bytes(rq) & q->dma_pad_mask
42307- || object_is_on_stack(buf))
42308+ || object_starts_on_stack(buf))
42309 drive->dma = 0;
42310 }
42311 }
42312diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
42313index af3e76d..96dfe5e 100644
42314--- a/drivers/iio/industrialio-core.c
42315+++ b/drivers/iio/industrialio-core.c
42316@@ -555,7 +555,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
42317 }
42318
42319 static
42320-int __iio_device_attr_init(struct device_attribute *dev_attr,
42321+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
42322 const char *postfix,
42323 struct iio_chan_spec const *chan,
42324 ssize_t (*readfunc)(struct device *dev,
42325diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
42326index e28a494..f7c2671 100644
42327--- a/drivers/infiniband/core/cm.c
42328+++ b/drivers/infiniband/core/cm.c
42329@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
42330
42331 struct cm_counter_group {
42332 struct kobject obj;
42333- atomic_long_t counter[CM_ATTR_COUNT];
42334+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
42335 };
42336
42337 struct cm_counter_attribute {
42338@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
42339 struct ib_mad_send_buf *msg = NULL;
42340 int ret;
42341
42342- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42343+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42344 counter[CM_REQ_COUNTER]);
42345
42346 /* Quick state check to discard duplicate REQs. */
42347@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
42348 if (!cm_id_priv)
42349 return;
42350
42351- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42352+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42353 counter[CM_REP_COUNTER]);
42354 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
42355 if (ret)
42356@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
42357 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
42358 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
42359 spin_unlock_irq(&cm_id_priv->lock);
42360- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42361+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42362 counter[CM_RTU_COUNTER]);
42363 goto out;
42364 }
42365@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
42366 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
42367 dreq_msg->local_comm_id);
42368 if (!cm_id_priv) {
42369- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42370+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42371 counter[CM_DREQ_COUNTER]);
42372 cm_issue_drep(work->port, work->mad_recv_wc);
42373 return -EINVAL;
42374@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
42375 case IB_CM_MRA_REP_RCVD:
42376 break;
42377 case IB_CM_TIMEWAIT:
42378- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42379+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42380 counter[CM_DREQ_COUNTER]);
42381 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42382 goto unlock;
42383@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
42384 cm_free_msg(msg);
42385 goto deref;
42386 case IB_CM_DREQ_RCVD:
42387- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42388+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42389 counter[CM_DREQ_COUNTER]);
42390 goto unlock;
42391 default:
42392@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
42393 ib_modify_mad(cm_id_priv->av.port->mad_agent,
42394 cm_id_priv->msg, timeout)) {
42395 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
42396- atomic_long_inc(&work->port->
42397+ atomic_long_inc_unchecked(&work->port->
42398 counter_group[CM_RECV_DUPLICATES].
42399 counter[CM_MRA_COUNTER]);
42400 goto out;
42401@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
42402 break;
42403 case IB_CM_MRA_REQ_RCVD:
42404 case IB_CM_MRA_REP_RCVD:
42405- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42406+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42407 counter[CM_MRA_COUNTER]);
42408 /* fall through */
42409 default:
42410@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
42411 case IB_CM_LAP_IDLE:
42412 break;
42413 case IB_CM_MRA_LAP_SENT:
42414- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42415+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42416 counter[CM_LAP_COUNTER]);
42417 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42418 goto unlock;
42419@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
42420 cm_free_msg(msg);
42421 goto deref;
42422 case IB_CM_LAP_RCVD:
42423- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42424+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42425 counter[CM_LAP_COUNTER]);
42426 goto unlock;
42427 default:
42428@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
42429 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
42430 if (cur_cm_id_priv) {
42431 spin_unlock_irq(&cm.lock);
42432- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42433+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42434 counter[CM_SIDR_REQ_COUNTER]);
42435 goto out; /* Duplicate message. */
42436 }
42437@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
42438 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
42439 msg->retries = 1;
42440
42441- atomic_long_add(1 + msg->retries,
42442+ atomic_long_add_unchecked(1 + msg->retries,
42443 &port->counter_group[CM_XMIT].counter[attr_index]);
42444 if (msg->retries)
42445- atomic_long_add(msg->retries,
42446+ atomic_long_add_unchecked(msg->retries,
42447 &port->counter_group[CM_XMIT_RETRIES].
42448 counter[attr_index]);
42449
42450@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
42451 }
42452
42453 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
42454- atomic_long_inc(&port->counter_group[CM_RECV].
42455+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
42456 counter[attr_id - CM_ATTR_ID_OFFSET]);
42457
42458 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
42459@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
42460 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
42461
42462 return sprintf(buf, "%ld\n",
42463- atomic_long_read(&group->counter[cm_attr->index]));
42464+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
42465 }
42466
42467 static const struct sysfs_ops cm_counter_ops = {
42468diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
42469index 9f5ad7c..588cd84 100644
42470--- a/drivers/infiniband/core/fmr_pool.c
42471+++ b/drivers/infiniband/core/fmr_pool.c
42472@@ -98,8 +98,8 @@ struct ib_fmr_pool {
42473
42474 struct task_struct *thread;
42475
42476- atomic_t req_ser;
42477- atomic_t flush_ser;
42478+ atomic_unchecked_t req_ser;
42479+ atomic_unchecked_t flush_ser;
42480
42481 wait_queue_head_t force_wait;
42482 };
42483@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42484 struct ib_fmr_pool *pool = pool_ptr;
42485
42486 do {
42487- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
42488+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
42489 ib_fmr_batch_release(pool);
42490
42491- atomic_inc(&pool->flush_ser);
42492+ atomic_inc_unchecked(&pool->flush_ser);
42493 wake_up_interruptible(&pool->force_wait);
42494
42495 if (pool->flush_function)
42496@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42497 }
42498
42499 set_current_state(TASK_INTERRUPTIBLE);
42500- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
42501+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
42502 !kthread_should_stop())
42503 schedule();
42504 __set_current_state(TASK_RUNNING);
42505@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
42506 pool->dirty_watermark = params->dirty_watermark;
42507 pool->dirty_len = 0;
42508 spin_lock_init(&pool->pool_lock);
42509- atomic_set(&pool->req_ser, 0);
42510- atomic_set(&pool->flush_ser, 0);
42511+ atomic_set_unchecked(&pool->req_ser, 0);
42512+ atomic_set_unchecked(&pool->flush_ser, 0);
42513 init_waitqueue_head(&pool->force_wait);
42514
42515 pool->thread = kthread_run(ib_fmr_cleanup_thread,
42516@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
42517 }
42518 spin_unlock_irq(&pool->pool_lock);
42519
42520- serial = atomic_inc_return(&pool->req_ser);
42521+ serial = atomic_inc_return_unchecked(&pool->req_ser);
42522 wake_up_process(pool->thread);
42523
42524 if (wait_event_interruptible(pool->force_wait,
42525- atomic_read(&pool->flush_ser) - serial >= 0))
42526+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
42527 return -EINTR;
42528
42529 return 0;
42530@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
42531 } else {
42532 list_add_tail(&fmr->list, &pool->dirty_list);
42533 if (++pool->dirty_len >= pool->dirty_watermark) {
42534- atomic_inc(&pool->req_ser);
42535+ atomic_inc_unchecked(&pool->req_ser);
42536 wake_up_process(pool->thread);
42537 }
42538 }
42539diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
42540index aec7a6a..8c014b5 100644
42541--- a/drivers/infiniband/core/umem.c
42542+++ b/drivers/infiniband/core/umem.c
42543@@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
42544 if (dmasync)
42545 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
42546
42547+ /*
42548+ * If the combination of the addr and size requested for this memory
42549+ * region causes an integer overflow, return error.
42550+ */
42551+ if ((PAGE_ALIGN(addr + size) <= size) ||
42552+ (PAGE_ALIGN(addr + size) <= addr))
42553+ return ERR_PTR(-EINVAL);
42554+
42555 if (!can_do_mlock())
42556 return ERR_PTR(-EPERM);
42557
42558diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
42559index cb43c22..2e12dd7 100644
42560--- a/drivers/infiniband/hw/cxgb4/mem.c
42561+++ b/drivers/infiniband/hw/cxgb4/mem.c
42562@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42563 int err;
42564 struct fw_ri_tpte tpt;
42565 u32 stag_idx;
42566- static atomic_t key;
42567+ static atomic_unchecked_t key;
42568
42569 if (c4iw_fatal_error(rdev))
42570 return -EIO;
42571@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42572 if (rdev->stats.stag.cur > rdev->stats.stag.max)
42573 rdev->stats.stag.max = rdev->stats.stag.cur;
42574 mutex_unlock(&rdev->stats.lock);
42575- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
42576+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
42577 }
42578 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
42579 __func__, stag_state, type, pdid, stag_idx);
42580diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
42581index 79b3dbc..96e5fcc 100644
42582--- a/drivers/infiniband/hw/ipath/ipath_rc.c
42583+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
42584@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42585 struct ib_atomic_eth *ateth;
42586 struct ipath_ack_entry *e;
42587 u64 vaddr;
42588- atomic64_t *maddr;
42589+ atomic64_unchecked_t *maddr;
42590 u64 sdata;
42591 u32 rkey;
42592 u8 next;
42593@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42594 IB_ACCESS_REMOTE_ATOMIC)))
42595 goto nack_acc_unlck;
42596 /* Perform atomic OP and save result. */
42597- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42598+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42599 sdata = be64_to_cpu(ateth->swap_data);
42600 e = &qp->s_ack_queue[qp->r_head_ack_queue];
42601 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
42602- (u64) atomic64_add_return(sdata, maddr) - sdata :
42603+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42604 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42605 be64_to_cpu(ateth->compare_data),
42606 sdata);
42607diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
42608index 1f95bba..9530f87 100644
42609--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
42610+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
42611@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
42612 unsigned long flags;
42613 struct ib_wc wc;
42614 u64 sdata;
42615- atomic64_t *maddr;
42616+ atomic64_unchecked_t *maddr;
42617 enum ib_wc_status send_status;
42618
42619 /*
42620@@ -382,11 +382,11 @@ again:
42621 IB_ACCESS_REMOTE_ATOMIC)))
42622 goto acc_err;
42623 /* Perform atomic OP and save result. */
42624- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42625+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42626 sdata = wqe->wr.wr.atomic.compare_add;
42627 *(u64 *) sqp->s_sge.sge.vaddr =
42628 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
42629- (u64) atomic64_add_return(sdata, maddr) - sdata :
42630+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42631 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42632 sdata, wqe->wr.wr.atomic.swap);
42633 goto send_comp;
42634diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
42635index 82a7dd8..8fb6ba6 100644
42636--- a/drivers/infiniband/hw/mlx4/mad.c
42637+++ b/drivers/infiniband/hw/mlx4/mad.c
42638@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
42639
42640 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
42641 {
42642- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
42643+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
42644 cpu_to_be64(0xff00000000000000LL);
42645 }
42646
42647diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
42648index ed327e6..ca1739e0 100644
42649--- a/drivers/infiniband/hw/mlx4/mcg.c
42650+++ b/drivers/infiniband/hw/mlx4/mcg.c
42651@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
42652 {
42653 char name[20];
42654
42655- atomic_set(&ctx->tid, 0);
42656+ atomic_set_unchecked(&ctx->tid, 0);
42657 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
42658 ctx->mcg_wq = create_singlethread_workqueue(name);
42659 if (!ctx->mcg_wq)
42660diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42661index 6eb743f..a7b0f6d 100644
42662--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
42663+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42664@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
42665 struct list_head mcg_mgid0_list;
42666 struct workqueue_struct *mcg_wq;
42667 struct mlx4_ib_demux_pv_ctx **tun;
42668- atomic_t tid;
42669+ atomic_unchecked_t tid;
42670 int flushing; /* flushing the work queue */
42671 };
42672
42673diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
42674index 9d3e5c1..6f166df 100644
42675--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
42676+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
42677@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
42678 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
42679 }
42680
42681-int mthca_QUERY_FW(struct mthca_dev *dev)
42682+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
42683 {
42684 struct mthca_mailbox *mailbox;
42685 u32 *outbox;
42686@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42687 CMD_TIME_CLASS_B);
42688 }
42689
42690-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42691+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42692 int num_mtt)
42693 {
42694 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
42695@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
42696 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
42697 }
42698
42699-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42700+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42701 int eq_num)
42702 {
42703 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
42704@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
42705 CMD_TIME_CLASS_B);
42706 }
42707
42708-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42709+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42710 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
42711 void *in_mad, void *response_mad)
42712 {
42713diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
42714index ded76c1..0cf0a08 100644
42715--- a/drivers/infiniband/hw/mthca/mthca_main.c
42716+++ b/drivers/infiniband/hw/mthca/mthca_main.c
42717@@ -692,7 +692,7 @@ err_close:
42718 return err;
42719 }
42720
42721-static int mthca_setup_hca(struct mthca_dev *dev)
42722+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
42723 {
42724 int err;
42725
42726diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
42727index ed9a989..6aa5dc2 100644
42728--- a/drivers/infiniband/hw/mthca/mthca_mr.c
42729+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
42730@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
42731 * through the bitmaps)
42732 */
42733
42734-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42735+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42736 {
42737 int o;
42738 int m;
42739@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
42740 return key;
42741 }
42742
42743-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42744+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42745 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
42746 {
42747 struct mthca_mailbox *mailbox;
42748@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
42749 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
42750 }
42751
42752-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42753+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42754 u64 *buffer_list, int buffer_size_shift,
42755 int list_len, u64 iova, u64 total_size,
42756 u32 access, struct mthca_mr *mr)
42757diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
42758index 415f8e1..e34214e 100644
42759--- a/drivers/infiniband/hw/mthca/mthca_provider.c
42760+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
42761@@ -764,7 +764,7 @@ unlock:
42762 return 0;
42763 }
42764
42765-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42766+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42767 {
42768 struct mthca_dev *dev = to_mdev(ibcq->device);
42769 struct mthca_cq *cq = to_mcq(ibcq);
42770diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
42771index 3b2a6dc..bce26ff 100644
42772--- a/drivers/infiniband/hw/nes/nes.c
42773+++ b/drivers/infiniband/hw/nes/nes.c
42774@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
42775 LIST_HEAD(nes_adapter_list);
42776 static LIST_HEAD(nes_dev_list);
42777
42778-atomic_t qps_destroyed;
42779+atomic_unchecked_t qps_destroyed;
42780
42781 static unsigned int ee_flsh_adapter;
42782 static unsigned int sysfs_nonidx_addr;
42783@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
42784 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
42785 struct nes_adapter *nesadapter = nesdev->nesadapter;
42786
42787- atomic_inc(&qps_destroyed);
42788+ atomic_inc_unchecked(&qps_destroyed);
42789
42790 /* Free the control structures */
42791
42792diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
42793index bd9d132..70d84f4 100644
42794--- a/drivers/infiniband/hw/nes/nes.h
42795+++ b/drivers/infiniband/hw/nes/nes.h
42796@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
42797 extern unsigned int wqm_quanta;
42798 extern struct list_head nes_adapter_list;
42799
42800-extern atomic_t cm_connects;
42801-extern atomic_t cm_accepts;
42802-extern atomic_t cm_disconnects;
42803-extern atomic_t cm_closes;
42804-extern atomic_t cm_connecteds;
42805-extern atomic_t cm_connect_reqs;
42806-extern atomic_t cm_rejects;
42807-extern atomic_t mod_qp_timouts;
42808-extern atomic_t qps_created;
42809-extern atomic_t qps_destroyed;
42810-extern atomic_t sw_qps_destroyed;
42811+extern atomic_unchecked_t cm_connects;
42812+extern atomic_unchecked_t cm_accepts;
42813+extern atomic_unchecked_t cm_disconnects;
42814+extern atomic_unchecked_t cm_closes;
42815+extern atomic_unchecked_t cm_connecteds;
42816+extern atomic_unchecked_t cm_connect_reqs;
42817+extern atomic_unchecked_t cm_rejects;
42818+extern atomic_unchecked_t mod_qp_timouts;
42819+extern atomic_unchecked_t qps_created;
42820+extern atomic_unchecked_t qps_destroyed;
42821+extern atomic_unchecked_t sw_qps_destroyed;
42822 extern u32 mh_detected;
42823 extern u32 mh_pauses_sent;
42824 extern u32 cm_packets_sent;
42825@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
42826 extern u32 cm_packets_received;
42827 extern u32 cm_packets_dropped;
42828 extern u32 cm_packets_retrans;
42829-extern atomic_t cm_listens_created;
42830-extern atomic_t cm_listens_destroyed;
42831+extern atomic_unchecked_t cm_listens_created;
42832+extern atomic_unchecked_t cm_listens_destroyed;
42833 extern u32 cm_backlog_drops;
42834-extern atomic_t cm_loopbacks;
42835-extern atomic_t cm_nodes_created;
42836-extern atomic_t cm_nodes_destroyed;
42837-extern atomic_t cm_accel_dropped_pkts;
42838-extern atomic_t cm_resets_recvd;
42839-extern atomic_t pau_qps_created;
42840-extern atomic_t pau_qps_destroyed;
42841+extern atomic_unchecked_t cm_loopbacks;
42842+extern atomic_unchecked_t cm_nodes_created;
42843+extern atomic_unchecked_t cm_nodes_destroyed;
42844+extern atomic_unchecked_t cm_accel_dropped_pkts;
42845+extern atomic_unchecked_t cm_resets_recvd;
42846+extern atomic_unchecked_t pau_qps_created;
42847+extern atomic_unchecked_t pau_qps_destroyed;
42848
42849 extern u32 int_mod_timer_init;
42850 extern u32 int_mod_cq_depth_256;
42851diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
42852index 6f09a72..cf4399d 100644
42853--- a/drivers/infiniband/hw/nes/nes_cm.c
42854+++ b/drivers/infiniband/hw/nes/nes_cm.c
42855@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
42856 u32 cm_packets_retrans;
42857 u32 cm_packets_created;
42858 u32 cm_packets_received;
42859-atomic_t cm_listens_created;
42860-atomic_t cm_listens_destroyed;
42861+atomic_unchecked_t cm_listens_created;
42862+atomic_unchecked_t cm_listens_destroyed;
42863 u32 cm_backlog_drops;
42864-atomic_t cm_loopbacks;
42865-atomic_t cm_nodes_created;
42866-atomic_t cm_nodes_destroyed;
42867-atomic_t cm_accel_dropped_pkts;
42868-atomic_t cm_resets_recvd;
42869+atomic_unchecked_t cm_loopbacks;
42870+atomic_unchecked_t cm_nodes_created;
42871+atomic_unchecked_t cm_nodes_destroyed;
42872+atomic_unchecked_t cm_accel_dropped_pkts;
42873+atomic_unchecked_t cm_resets_recvd;
42874
42875 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
42876 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
42877@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
42878 /* instance of function pointers for client API */
42879 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
42880 static struct nes_cm_ops nes_cm_api = {
42881- mini_cm_accelerated,
42882- mini_cm_listen,
42883- mini_cm_del_listen,
42884- mini_cm_connect,
42885- mini_cm_close,
42886- mini_cm_accept,
42887- mini_cm_reject,
42888- mini_cm_recv_pkt,
42889- mini_cm_dealloc_core,
42890- mini_cm_get,
42891- mini_cm_set
42892+ .accelerated = mini_cm_accelerated,
42893+ .listen = mini_cm_listen,
42894+ .stop_listener = mini_cm_del_listen,
42895+ .connect = mini_cm_connect,
42896+ .close = mini_cm_close,
42897+ .accept = mini_cm_accept,
42898+ .reject = mini_cm_reject,
42899+ .recv_pkt = mini_cm_recv_pkt,
42900+ .destroy_cm_core = mini_cm_dealloc_core,
42901+ .get = mini_cm_get,
42902+ .set = mini_cm_set
42903 };
42904
42905 static struct nes_cm_core *g_cm_core;
42906
42907-atomic_t cm_connects;
42908-atomic_t cm_accepts;
42909-atomic_t cm_disconnects;
42910-atomic_t cm_closes;
42911-atomic_t cm_connecteds;
42912-atomic_t cm_connect_reqs;
42913-atomic_t cm_rejects;
42914+atomic_unchecked_t cm_connects;
42915+atomic_unchecked_t cm_accepts;
42916+atomic_unchecked_t cm_disconnects;
42917+atomic_unchecked_t cm_closes;
42918+atomic_unchecked_t cm_connecteds;
42919+atomic_unchecked_t cm_connect_reqs;
42920+atomic_unchecked_t cm_rejects;
42921
42922 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
42923 {
42924@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
42925 kfree(listener);
42926 listener = NULL;
42927 ret = 0;
42928- atomic_inc(&cm_listens_destroyed);
42929+ atomic_inc_unchecked(&cm_listens_destroyed);
42930 } else {
42931 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
42932 }
42933@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
42934 cm_node->rem_mac);
42935
42936 add_hte_node(cm_core, cm_node);
42937- atomic_inc(&cm_nodes_created);
42938+ atomic_inc_unchecked(&cm_nodes_created);
42939
42940 return cm_node;
42941 }
42942@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
42943 }
42944
42945 atomic_dec(&cm_core->node_cnt);
42946- atomic_inc(&cm_nodes_destroyed);
42947+ atomic_inc_unchecked(&cm_nodes_destroyed);
42948 nesqp = cm_node->nesqp;
42949 if (nesqp) {
42950 nesqp->cm_node = NULL;
42951@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
42952
42953 static void drop_packet(struct sk_buff *skb)
42954 {
42955- atomic_inc(&cm_accel_dropped_pkts);
42956+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
42957 dev_kfree_skb_any(skb);
42958 }
42959
42960@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
42961 {
42962
42963 int reset = 0; /* whether to send reset in case of err.. */
42964- atomic_inc(&cm_resets_recvd);
42965+ atomic_inc_unchecked(&cm_resets_recvd);
42966 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
42967 " refcnt=%d\n", cm_node, cm_node->state,
42968 atomic_read(&cm_node->ref_count));
42969@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
42970 rem_ref_cm_node(cm_node->cm_core, cm_node);
42971 return NULL;
42972 }
42973- atomic_inc(&cm_loopbacks);
42974+ atomic_inc_unchecked(&cm_loopbacks);
42975 loopbackremotenode->loopbackpartner = cm_node;
42976 loopbackremotenode->tcp_cntxt.rcv_wscale =
42977 NES_CM_DEFAULT_RCV_WND_SCALE;
42978@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
42979 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
42980 else {
42981 rem_ref_cm_node(cm_core, cm_node);
42982- atomic_inc(&cm_accel_dropped_pkts);
42983+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
42984 dev_kfree_skb_any(skb);
42985 }
42986 break;
42987@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42988
42989 if ((cm_id) && (cm_id->event_handler)) {
42990 if (issue_disconn) {
42991- atomic_inc(&cm_disconnects);
42992+ atomic_inc_unchecked(&cm_disconnects);
42993 cm_event.event = IW_CM_EVENT_DISCONNECT;
42994 cm_event.status = disconn_status;
42995 cm_event.local_addr = cm_id->local_addr;
42996@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42997 }
42998
42999 if (issue_close) {
43000- atomic_inc(&cm_closes);
43001+ atomic_inc_unchecked(&cm_closes);
43002 nes_disconnect(nesqp, 1);
43003
43004 cm_id->provider_data = nesqp;
43005@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43006
43007 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43008 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43009- atomic_inc(&cm_accepts);
43010+ atomic_inc_unchecked(&cm_accepts);
43011
43012 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43013 netdev_refcnt_read(nesvnic->netdev));
43014@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43015 struct nes_cm_core *cm_core;
43016 u8 *start_buff;
43017
43018- atomic_inc(&cm_rejects);
43019+ atomic_inc_unchecked(&cm_rejects);
43020 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43021 loopback = cm_node->loopbackpartner;
43022 cm_core = cm_node->cm_core;
43023@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43024 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43025 ntohs(laddr->sin_port));
43026
43027- atomic_inc(&cm_connects);
43028+ atomic_inc_unchecked(&cm_connects);
43029 nesqp->active_conn = 1;
43030
43031 /* cache the cm_id in the qp */
43032@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43033 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43034 return err;
43035 }
43036- atomic_inc(&cm_listens_created);
43037+ atomic_inc_unchecked(&cm_listens_created);
43038 }
43039
43040 cm_id->add_ref(cm_id);
43041@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43042
43043 if (nesqp->destroyed)
43044 return;
43045- atomic_inc(&cm_connecteds);
43046+ atomic_inc_unchecked(&cm_connecteds);
43047 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43048 " local port 0x%04X. jiffies = %lu.\n",
43049 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43050@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43051
43052 cm_id->add_ref(cm_id);
43053 ret = cm_id->event_handler(cm_id, &cm_event);
43054- atomic_inc(&cm_closes);
43055+ atomic_inc_unchecked(&cm_closes);
43056 cm_event.event = IW_CM_EVENT_CLOSE;
43057 cm_event.status = 0;
43058 cm_event.provider_data = cm_id->provider_data;
43059@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43060 return;
43061 cm_id = cm_node->cm_id;
43062
43063- atomic_inc(&cm_connect_reqs);
43064+ atomic_inc_unchecked(&cm_connect_reqs);
43065 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43066 cm_node, cm_id, jiffies);
43067
43068@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43069 return;
43070 cm_id = cm_node->cm_id;
43071
43072- atomic_inc(&cm_connect_reqs);
43073+ atomic_inc_unchecked(&cm_connect_reqs);
43074 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43075 cm_node, cm_id, jiffies);
43076
43077diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43078index 4166452..fc952c3 100644
43079--- a/drivers/infiniband/hw/nes/nes_mgt.c
43080+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43081@@ -40,8 +40,8 @@
43082 #include "nes.h"
43083 #include "nes_mgt.h"
43084
43085-atomic_t pau_qps_created;
43086-atomic_t pau_qps_destroyed;
43087+atomic_unchecked_t pau_qps_created;
43088+atomic_unchecked_t pau_qps_destroyed;
43089
43090 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43091 {
43092@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43093 {
43094 struct sk_buff *skb;
43095 unsigned long flags;
43096- atomic_inc(&pau_qps_destroyed);
43097+ atomic_inc_unchecked(&pau_qps_destroyed);
43098
43099 /* Free packets that have not yet been forwarded */
43100 /* Lock is acquired by skb_dequeue when removing the skb */
43101@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43102 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43103 skb_queue_head_init(&nesqp->pau_list);
43104 spin_lock_init(&nesqp->pau_lock);
43105- atomic_inc(&pau_qps_created);
43106+ atomic_inc_unchecked(&pau_qps_created);
43107 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43108 }
43109
43110diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43111index 49eb511..a774366 100644
43112--- a/drivers/infiniband/hw/nes/nes_nic.c
43113+++ b/drivers/infiniband/hw/nes/nes_nic.c
43114@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43115 target_stat_values[++index] = mh_detected;
43116 target_stat_values[++index] = mh_pauses_sent;
43117 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43118- target_stat_values[++index] = atomic_read(&cm_connects);
43119- target_stat_values[++index] = atomic_read(&cm_accepts);
43120- target_stat_values[++index] = atomic_read(&cm_disconnects);
43121- target_stat_values[++index] = atomic_read(&cm_connecteds);
43122- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43123- target_stat_values[++index] = atomic_read(&cm_rejects);
43124- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43125- target_stat_values[++index] = atomic_read(&qps_created);
43126- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43127- target_stat_values[++index] = atomic_read(&qps_destroyed);
43128- target_stat_values[++index] = atomic_read(&cm_closes);
43129+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43130+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43131+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43132+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43133+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43134+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43135+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43136+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43137+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43138+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43139+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43140 target_stat_values[++index] = cm_packets_sent;
43141 target_stat_values[++index] = cm_packets_bounced;
43142 target_stat_values[++index] = cm_packets_created;
43143 target_stat_values[++index] = cm_packets_received;
43144 target_stat_values[++index] = cm_packets_dropped;
43145 target_stat_values[++index] = cm_packets_retrans;
43146- target_stat_values[++index] = atomic_read(&cm_listens_created);
43147- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43148+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43149+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43150 target_stat_values[++index] = cm_backlog_drops;
43151- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43152- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43153- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43154- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43155- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43156+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43157+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43158+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43159+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43160+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43161 target_stat_values[++index] = nesadapter->free_4kpbl;
43162 target_stat_values[++index] = nesadapter->free_256pbl;
43163 target_stat_values[++index] = int_mod_timer_init;
43164 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43165 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43166 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43167- target_stat_values[++index] = atomic_read(&pau_qps_created);
43168- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43169+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43170+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43171 }
43172
43173 /**
43174diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43175index c0d0296..3185f57 100644
43176--- a/drivers/infiniband/hw/nes/nes_verbs.c
43177+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43178@@ -46,9 +46,9 @@
43179
43180 #include <rdma/ib_umem.h>
43181
43182-atomic_t mod_qp_timouts;
43183-atomic_t qps_created;
43184-atomic_t sw_qps_destroyed;
43185+atomic_unchecked_t mod_qp_timouts;
43186+atomic_unchecked_t qps_created;
43187+atomic_unchecked_t sw_qps_destroyed;
43188
43189 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43190
43191@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43192 if (init_attr->create_flags)
43193 return ERR_PTR(-EINVAL);
43194
43195- atomic_inc(&qps_created);
43196+ atomic_inc_unchecked(&qps_created);
43197 switch (init_attr->qp_type) {
43198 case IB_QPT_RC:
43199 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43200@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43201 struct iw_cm_event cm_event;
43202 int ret = 0;
43203
43204- atomic_inc(&sw_qps_destroyed);
43205+ atomic_inc_unchecked(&sw_qps_destroyed);
43206 nesqp->destroyed = 1;
43207
43208 /* Blow away the connection if it exists. */
43209diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43210index b218254..1d1aa3c 100644
43211--- a/drivers/infiniband/hw/qib/qib.h
43212+++ b/drivers/infiniband/hw/qib/qib.h
43213@@ -52,6 +52,7 @@
43214 #include <linux/kref.h>
43215 #include <linux/sched.h>
43216 #include <linux/kthread.h>
43217+#include <linux/slab.h>
43218
43219 #include "qib_common.h"
43220 #include "qib_verbs.h"
43221diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43222index cdc7df4..a2fdfdb 100644
43223--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43224+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43225@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
43226 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
43227 }
43228
43229-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
43230+static struct rtnl_link_ops ipoib_link_ops = {
43231 .kind = "ipoib",
43232 .maxtype = IFLA_IPOIB_MAX,
43233 .policy = ipoib_policy,
43234diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43235index e853a21..56fc5a8 100644
43236--- a/drivers/input/gameport/gameport.c
43237+++ b/drivers/input/gameport/gameport.c
43238@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43239 */
43240 static void gameport_init_port(struct gameport *gameport)
43241 {
43242- static atomic_t gameport_no = ATOMIC_INIT(-1);
43243+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
43244
43245 __module_get(THIS_MODULE);
43246
43247 mutex_init(&gameport->drv_mutex);
43248 device_initialize(&gameport->dev);
43249 dev_set_name(&gameport->dev, "gameport%lu",
43250- (unsigned long)atomic_inc_return(&gameport_no));
43251+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
43252 gameport->dev.bus = &gameport_bus;
43253 gameport->dev.release = gameport_release_port;
43254 if (gameport->parent)
43255diff --git a/drivers/input/input.c b/drivers/input/input.c
43256index 213e3a1..4fea837 100644
43257--- a/drivers/input/input.c
43258+++ b/drivers/input/input.c
43259@@ -1775,7 +1775,7 @@ EXPORT_SYMBOL_GPL(input_class);
43260 */
43261 struct input_dev *input_allocate_device(void)
43262 {
43263- static atomic_t input_no = ATOMIC_INIT(-1);
43264+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
43265 struct input_dev *dev;
43266
43267 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43268@@ -1790,7 +1790,7 @@ struct input_dev *input_allocate_device(void)
43269 INIT_LIST_HEAD(&dev->node);
43270
43271 dev_set_name(&dev->dev, "input%lu",
43272- (unsigned long)atomic_inc_return(&input_no));
43273+ (unsigned long)atomic_inc_return_unchecked(&input_no));
43274
43275 __module_get(THIS_MODULE);
43276 }
43277diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43278index 4a95b22..874c182 100644
43279--- a/drivers/input/joystick/sidewinder.c
43280+++ b/drivers/input/joystick/sidewinder.c
43281@@ -30,6 +30,7 @@
43282 #include <linux/kernel.h>
43283 #include <linux/module.h>
43284 #include <linux/slab.h>
43285+#include <linux/sched.h>
43286 #include <linux/input.h>
43287 #include <linux/gameport.h>
43288 #include <linux/jiffies.h>
43289diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43290index 3aa2f3f..53c00ea 100644
43291--- a/drivers/input/joystick/xpad.c
43292+++ b/drivers/input/joystick/xpad.c
43293@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43294
43295 static int xpad_led_probe(struct usb_xpad *xpad)
43296 {
43297- static atomic_t led_seq = ATOMIC_INIT(-1);
43298+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
43299 unsigned long led_no;
43300 struct xpad_led *led;
43301 struct led_classdev *led_cdev;
43302@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43303 if (!led)
43304 return -ENOMEM;
43305
43306- led_no = atomic_inc_return(&led_seq);
43307+ led_no = atomic_inc_return_unchecked(&led_seq);
43308
43309 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
43310 led->xpad = xpad;
43311diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
43312index ac1fa5f..5f7502c 100644
43313--- a/drivers/input/misc/ims-pcu.c
43314+++ b/drivers/input/misc/ims-pcu.c
43315@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
43316
43317 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43318 {
43319- static atomic_t device_no = ATOMIC_INIT(-1);
43320+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
43321
43322 const struct ims_pcu_device_info *info;
43323 int error;
43324@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43325 }
43326
43327 /* Device appears to be operable, complete initialization */
43328- pcu->device_no = atomic_inc_return(&device_no);
43329+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
43330
43331 /*
43332 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
43333diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
43334index f4cf664..3204fda 100644
43335--- a/drivers/input/mouse/psmouse.h
43336+++ b/drivers/input/mouse/psmouse.h
43337@@ -117,7 +117,7 @@ struct psmouse_attribute {
43338 ssize_t (*set)(struct psmouse *psmouse, void *data,
43339 const char *buf, size_t count);
43340 bool protect;
43341-};
43342+} __do_const;
43343 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
43344
43345 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
43346diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
43347index b604564..3f14ae4 100644
43348--- a/drivers/input/mousedev.c
43349+++ b/drivers/input/mousedev.c
43350@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
43351
43352 spin_unlock_irq(&client->packet_lock);
43353
43354- if (copy_to_user(buffer, data, count))
43355+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
43356 return -EFAULT;
43357
43358 return count;
43359diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
43360index a05a517..323a2fd 100644
43361--- a/drivers/input/serio/serio.c
43362+++ b/drivers/input/serio/serio.c
43363@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
43364 */
43365 static void serio_init_port(struct serio *serio)
43366 {
43367- static atomic_t serio_no = ATOMIC_INIT(-1);
43368+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
43369
43370 __module_get(THIS_MODULE);
43371
43372@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
43373 mutex_init(&serio->drv_mutex);
43374 device_initialize(&serio->dev);
43375 dev_set_name(&serio->dev, "serio%lu",
43376- (unsigned long)atomic_inc_return(&serio_no));
43377+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
43378 serio->dev.bus = &serio_bus;
43379 serio->dev.release = serio_release_port;
43380 serio->dev.groups = serio_device_attr_groups;
43381diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
43382index 71ef5d6..93380a9 100644
43383--- a/drivers/input/serio/serio_raw.c
43384+++ b/drivers/input/serio/serio_raw.c
43385@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
43386
43387 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43388 {
43389- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
43390+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
43391 struct serio_raw *serio_raw;
43392 int err;
43393
43394@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43395 }
43396
43397 snprintf(serio_raw->name, sizeof(serio_raw->name),
43398- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
43399+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
43400 kref_init(&serio_raw->kref);
43401 INIT_LIST_HEAD(&serio_raw->client_list);
43402 init_waitqueue_head(&serio_raw->wait);
43403diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
43404index 9802485..2e9941d 100644
43405--- a/drivers/iommu/amd_iommu.c
43406+++ b/drivers/iommu/amd_iommu.c
43407@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
43408
43409 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
43410 {
43411+ phys_addr_t physaddr;
43412 WARN_ON(address & 0x7ULL);
43413
43414 memset(cmd, 0, sizeof(*cmd));
43415- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
43416- cmd->data[1] = upper_32_bits(__pa(address));
43417+
43418+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
43419+ if (object_starts_on_stack((void *)address)) {
43420+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
43421+ physaddr = __pa((u64)adjbuf);
43422+ } else
43423+#endif
43424+ physaddr = __pa(address);
43425+
43426+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
43427+ cmd->data[1] = upper_32_bits(physaddr);
43428 cmd->data[2] = 1;
43429 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
43430 }
43431diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
43432index 6cd47b7..264d14a 100644
43433--- a/drivers/iommu/arm-smmu.c
43434+++ b/drivers/iommu/arm-smmu.c
43435@@ -968,7 +968,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
43436 cfg->irptndx = cfg->cbndx;
43437 }
43438
43439- ACCESS_ONCE(smmu_domain->smmu) = smmu;
43440+ ACCESS_ONCE_RW(smmu_domain->smmu) = smmu;
43441 arm_smmu_init_context_bank(smmu_domain);
43442 spin_unlock_irqrestore(&smmu_domain->lock, flags);
43443
43444diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
43445index f7718d7..3ef740b 100644
43446--- a/drivers/iommu/iommu.c
43447+++ b/drivers/iommu/iommu.c
43448@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
43449 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
43450 {
43451 int err;
43452- struct notifier_block *nb;
43453+ notifier_block_no_const *nb;
43454 struct iommu_callback_data cb = {
43455 .ops = ops,
43456 };
43457diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
43458index 89c4846..1de796f 100644
43459--- a/drivers/iommu/irq_remapping.c
43460+++ b/drivers/iommu/irq_remapping.c
43461@@ -353,7 +353,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
43462 void panic_if_irq_remap(const char *msg)
43463 {
43464 if (irq_remapping_enabled)
43465- panic(msg);
43466+ panic("%s", msg);
43467 }
43468
43469 static void ir_ack_apic_edge(struct irq_data *data)
43470@@ -374,10 +374,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
43471
43472 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
43473 {
43474- chip->irq_print_chip = ir_print_prefix;
43475- chip->irq_ack = ir_ack_apic_edge;
43476- chip->irq_eoi = ir_ack_apic_level;
43477- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43478+ pax_open_kernel();
43479+ *(void **)&chip->irq_print_chip = ir_print_prefix;
43480+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
43481+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
43482+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43483+ pax_close_kernel();
43484 }
43485
43486 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
43487diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
43488index d617ee5..df8be8b 100644
43489--- a/drivers/irqchip/irq-gic.c
43490+++ b/drivers/irqchip/irq-gic.c
43491@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
43492 * Supported arch specific GIC irq extension.
43493 * Default make them NULL.
43494 */
43495-struct irq_chip gic_arch_extn = {
43496+irq_chip_no_const gic_arch_extn = {
43497 .irq_eoi = NULL,
43498 .irq_mask = NULL,
43499 .irq_unmask = NULL,
43500@@ -311,7 +311,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
43501 chained_irq_exit(chip, desc);
43502 }
43503
43504-static struct irq_chip gic_chip = {
43505+static irq_chip_no_const gic_chip __read_only = {
43506 .name = "GIC",
43507 .irq_mask = gic_mask_irq,
43508 .irq_unmask = gic_unmask_irq,
43509diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
43510index 078cac5..fb0f846 100644
43511--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
43512+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
43513@@ -353,7 +353,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
43514 struct intc_irqpin_iomem *i;
43515 struct resource *io[INTC_IRQPIN_REG_NR];
43516 struct resource *irq;
43517- struct irq_chip *irq_chip;
43518+ irq_chip_no_const *irq_chip;
43519 void (*enable_fn)(struct irq_data *d);
43520 void (*disable_fn)(struct irq_data *d);
43521 const char *name = dev_name(dev);
43522diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
43523index 384e6ed..7a771b2 100644
43524--- a/drivers/irqchip/irq-renesas-irqc.c
43525+++ b/drivers/irqchip/irq-renesas-irqc.c
43526@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
43527 struct irqc_priv *p;
43528 struct resource *io;
43529 struct resource *irq;
43530- struct irq_chip *irq_chip;
43531+ irq_chip_no_const *irq_chip;
43532 const char *name = dev_name(&pdev->dev);
43533 int ret;
43534 int k;
43535diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
43536index 6a2df32..dc962f1 100644
43537--- a/drivers/isdn/capi/capi.c
43538+++ b/drivers/isdn/capi/capi.c
43539@@ -81,8 +81,8 @@ struct capiminor {
43540
43541 struct capi20_appl *ap;
43542 u32 ncci;
43543- atomic_t datahandle;
43544- atomic_t msgid;
43545+ atomic_unchecked_t datahandle;
43546+ atomic_unchecked_t msgid;
43547
43548 struct tty_port port;
43549 int ttyinstop;
43550@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
43551 capimsg_setu16(s, 2, mp->ap->applid);
43552 capimsg_setu8 (s, 4, CAPI_DATA_B3);
43553 capimsg_setu8 (s, 5, CAPI_RESP);
43554- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
43555+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
43556 capimsg_setu32(s, 8, mp->ncci);
43557 capimsg_setu16(s, 12, datahandle);
43558 }
43559@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
43560 mp->outbytes -= len;
43561 spin_unlock_bh(&mp->outlock);
43562
43563- datahandle = atomic_inc_return(&mp->datahandle);
43564+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
43565 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
43566 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43567 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43568 capimsg_setu16(skb->data, 2, mp->ap->applid);
43569 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
43570 capimsg_setu8 (skb->data, 5, CAPI_REQ);
43571- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
43572+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
43573 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
43574 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
43575 capimsg_setu16(skb->data, 16, len); /* Data length */
43576diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
43577index aecec6d..11e13c5 100644
43578--- a/drivers/isdn/gigaset/bas-gigaset.c
43579+++ b/drivers/isdn/gigaset/bas-gigaset.c
43580@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
43581
43582
43583 static const struct gigaset_ops gigops = {
43584- gigaset_write_cmd,
43585- gigaset_write_room,
43586- gigaset_chars_in_buffer,
43587- gigaset_brkchars,
43588- gigaset_init_bchannel,
43589- gigaset_close_bchannel,
43590- gigaset_initbcshw,
43591- gigaset_freebcshw,
43592- gigaset_reinitbcshw,
43593- gigaset_initcshw,
43594- gigaset_freecshw,
43595- gigaset_set_modem_ctrl,
43596- gigaset_baud_rate,
43597- gigaset_set_line_ctrl,
43598- gigaset_isoc_send_skb,
43599- gigaset_isoc_input,
43600+ .write_cmd = gigaset_write_cmd,
43601+ .write_room = gigaset_write_room,
43602+ .chars_in_buffer = gigaset_chars_in_buffer,
43603+ .brkchars = gigaset_brkchars,
43604+ .init_bchannel = gigaset_init_bchannel,
43605+ .close_bchannel = gigaset_close_bchannel,
43606+ .initbcshw = gigaset_initbcshw,
43607+ .freebcshw = gigaset_freebcshw,
43608+ .reinitbcshw = gigaset_reinitbcshw,
43609+ .initcshw = gigaset_initcshw,
43610+ .freecshw = gigaset_freecshw,
43611+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43612+ .baud_rate = gigaset_baud_rate,
43613+ .set_line_ctrl = gigaset_set_line_ctrl,
43614+ .send_skb = gigaset_isoc_send_skb,
43615+ .handle_input = gigaset_isoc_input,
43616 };
43617
43618 /* bas_gigaset_init
43619diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
43620index 600c79b..3752bab 100644
43621--- a/drivers/isdn/gigaset/interface.c
43622+++ b/drivers/isdn/gigaset/interface.c
43623@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
43624 }
43625 tty->driver_data = cs;
43626
43627- ++cs->port.count;
43628+ atomic_inc(&cs->port.count);
43629
43630- if (cs->port.count == 1) {
43631+ if (atomic_read(&cs->port.count) == 1) {
43632 tty_port_tty_set(&cs->port, tty);
43633 cs->port.low_latency = 1;
43634 }
43635@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
43636
43637 if (!cs->connected)
43638 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
43639- else if (!cs->port.count)
43640+ else if (!atomic_read(&cs->port.count))
43641 dev_warn(cs->dev, "%s: device not opened\n", __func__);
43642- else if (!--cs->port.count)
43643+ else if (!atomic_dec_return(&cs->port.count))
43644 tty_port_tty_set(&cs->port, NULL);
43645
43646 mutex_unlock(&cs->mutex);
43647diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
43648index 8c91fd5..14f13ce 100644
43649--- a/drivers/isdn/gigaset/ser-gigaset.c
43650+++ b/drivers/isdn/gigaset/ser-gigaset.c
43651@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
43652 }
43653
43654 static const struct gigaset_ops ops = {
43655- gigaset_write_cmd,
43656- gigaset_write_room,
43657- gigaset_chars_in_buffer,
43658- gigaset_brkchars,
43659- gigaset_init_bchannel,
43660- gigaset_close_bchannel,
43661- gigaset_initbcshw,
43662- gigaset_freebcshw,
43663- gigaset_reinitbcshw,
43664- gigaset_initcshw,
43665- gigaset_freecshw,
43666- gigaset_set_modem_ctrl,
43667- gigaset_baud_rate,
43668- gigaset_set_line_ctrl,
43669- gigaset_m10x_send_skb, /* asyncdata.c */
43670- gigaset_m10x_input, /* asyncdata.c */
43671+ .write_cmd = gigaset_write_cmd,
43672+ .write_room = gigaset_write_room,
43673+ .chars_in_buffer = gigaset_chars_in_buffer,
43674+ .brkchars = gigaset_brkchars,
43675+ .init_bchannel = gigaset_init_bchannel,
43676+ .close_bchannel = gigaset_close_bchannel,
43677+ .initbcshw = gigaset_initbcshw,
43678+ .freebcshw = gigaset_freebcshw,
43679+ .reinitbcshw = gigaset_reinitbcshw,
43680+ .initcshw = gigaset_initcshw,
43681+ .freecshw = gigaset_freecshw,
43682+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43683+ .baud_rate = gigaset_baud_rate,
43684+ .set_line_ctrl = gigaset_set_line_ctrl,
43685+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
43686+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
43687 };
43688
43689
43690diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
43691index 5f306e2..5342f88 100644
43692--- a/drivers/isdn/gigaset/usb-gigaset.c
43693+++ b/drivers/isdn/gigaset/usb-gigaset.c
43694@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
43695 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
43696 memcpy(cs->hw.usb->bchars, buf, 6);
43697 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
43698- 0, 0, &buf, 6, 2000);
43699+ 0, 0, buf, 6, 2000);
43700 }
43701
43702 static void gigaset_freebcshw(struct bc_state *bcs)
43703@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
43704 }
43705
43706 static const struct gigaset_ops ops = {
43707- gigaset_write_cmd,
43708- gigaset_write_room,
43709- gigaset_chars_in_buffer,
43710- gigaset_brkchars,
43711- gigaset_init_bchannel,
43712- gigaset_close_bchannel,
43713- gigaset_initbcshw,
43714- gigaset_freebcshw,
43715- gigaset_reinitbcshw,
43716- gigaset_initcshw,
43717- gigaset_freecshw,
43718- gigaset_set_modem_ctrl,
43719- gigaset_baud_rate,
43720- gigaset_set_line_ctrl,
43721- gigaset_m10x_send_skb,
43722- gigaset_m10x_input,
43723+ .write_cmd = gigaset_write_cmd,
43724+ .write_room = gigaset_write_room,
43725+ .chars_in_buffer = gigaset_chars_in_buffer,
43726+ .brkchars = gigaset_brkchars,
43727+ .init_bchannel = gigaset_init_bchannel,
43728+ .close_bchannel = gigaset_close_bchannel,
43729+ .initbcshw = gigaset_initbcshw,
43730+ .freebcshw = gigaset_freebcshw,
43731+ .reinitbcshw = gigaset_reinitbcshw,
43732+ .initcshw = gigaset_initcshw,
43733+ .freecshw = gigaset_freecshw,
43734+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43735+ .baud_rate = gigaset_baud_rate,
43736+ .set_line_ctrl = gigaset_set_line_ctrl,
43737+ .send_skb = gigaset_m10x_send_skb,
43738+ .handle_input = gigaset_m10x_input,
43739 };
43740
43741 /*
43742diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
43743index 4d9b195..455075c 100644
43744--- a/drivers/isdn/hardware/avm/b1.c
43745+++ b/drivers/isdn/hardware/avm/b1.c
43746@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
43747 }
43748 if (left) {
43749 if (t4file->user) {
43750- if (copy_from_user(buf, dp, left))
43751+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43752 return -EFAULT;
43753 } else {
43754 memcpy(buf, dp, left);
43755@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
43756 }
43757 if (left) {
43758 if (config->user) {
43759- if (copy_from_user(buf, dp, left))
43760+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43761 return -EFAULT;
43762 } else {
43763 memcpy(buf, dp, left);
43764diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
43765index 9b856e1..fa03c92 100644
43766--- a/drivers/isdn/i4l/isdn_common.c
43767+++ b/drivers/isdn/i4l/isdn_common.c
43768@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
43769 } else
43770 return -EINVAL;
43771 case IIOCDBGVAR:
43772+ if (!capable(CAP_SYS_RAWIO))
43773+ return -EPERM;
43774 if (arg) {
43775 if (copy_to_user(argp, &dev, sizeof(ulong)))
43776 return -EFAULT;
43777diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
43778index 91d5730..336523e 100644
43779--- a/drivers/isdn/i4l/isdn_concap.c
43780+++ b/drivers/isdn/i4l/isdn_concap.c
43781@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
43782 }
43783
43784 struct concap_device_ops isdn_concap_reliable_dl_dops = {
43785- &isdn_concap_dl_data_req,
43786- &isdn_concap_dl_connect_req,
43787- &isdn_concap_dl_disconn_req
43788+ .data_req = &isdn_concap_dl_data_req,
43789+ .connect_req = &isdn_concap_dl_connect_req,
43790+ .disconn_req = &isdn_concap_dl_disconn_req
43791 };
43792
43793 /* The following should better go into a dedicated source file such that
43794diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
43795index bc91261..2ef7e36 100644
43796--- a/drivers/isdn/i4l/isdn_tty.c
43797+++ b/drivers/isdn/i4l/isdn_tty.c
43798@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
43799
43800 #ifdef ISDN_DEBUG_MODEM_OPEN
43801 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
43802- port->count);
43803+ atomic_read(&port->count));
43804 #endif
43805- port->count++;
43806+ atomic_inc(&port->count);
43807 port->tty = tty;
43808 /*
43809 * Start up serial port
43810@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43811 #endif
43812 return;
43813 }
43814- if ((tty->count == 1) && (port->count != 1)) {
43815+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
43816 /*
43817 * Uh, oh. tty->count is 1, which means that the tty
43818 * structure will be freed. Info->count should always
43819@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43820 * serial port won't be shutdown.
43821 */
43822 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
43823- "info->count is %d\n", port->count);
43824- port->count = 1;
43825+ "info->count is %d\n", atomic_read(&port->count));
43826+ atomic_set(&port->count, 1);
43827 }
43828- if (--port->count < 0) {
43829+ if (atomic_dec_return(&port->count) < 0) {
43830 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
43831- info->line, port->count);
43832- port->count = 0;
43833+ info->line, atomic_read(&port->count));
43834+ atomic_set(&port->count, 0);
43835 }
43836- if (port->count) {
43837+ if (atomic_read(&port->count)) {
43838 #ifdef ISDN_DEBUG_MODEM_OPEN
43839 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
43840 #endif
43841@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
43842 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
43843 return;
43844 isdn_tty_shutdown(info);
43845- port->count = 0;
43846+ atomic_set(&port->count, 0);
43847 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43848 port->tty = NULL;
43849 wake_up_interruptible(&port->open_wait);
43850@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
43851 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
43852 modem_info *info = &dev->mdm.info[i];
43853
43854- if (info->port.count == 0)
43855+ if (atomic_read(&info->port.count) == 0)
43856 continue;
43857 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
43858 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
43859diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
43860index e2d4e58..40cd045 100644
43861--- a/drivers/isdn/i4l/isdn_x25iface.c
43862+++ b/drivers/isdn/i4l/isdn_x25iface.c
43863@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
43864
43865
43866 static struct concap_proto_ops ix25_pops = {
43867- &isdn_x25iface_proto_new,
43868- &isdn_x25iface_proto_del,
43869- &isdn_x25iface_proto_restart,
43870- &isdn_x25iface_proto_close,
43871- &isdn_x25iface_xmit,
43872- &isdn_x25iface_receive,
43873- &isdn_x25iface_connect_ind,
43874- &isdn_x25iface_disconn_ind
43875+ .proto_new = &isdn_x25iface_proto_new,
43876+ .proto_del = &isdn_x25iface_proto_del,
43877+ .restart = &isdn_x25iface_proto_restart,
43878+ .close = &isdn_x25iface_proto_close,
43879+ .encap_and_xmit = &isdn_x25iface_xmit,
43880+ .data_ind = &isdn_x25iface_receive,
43881+ .connect_ind = &isdn_x25iface_connect_ind,
43882+ .disconn_ind = &isdn_x25iface_disconn_ind
43883 };
43884
43885 /* error message helper function */
43886diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
43887index 6a7447c..b4987ea 100644
43888--- a/drivers/isdn/icn/icn.c
43889+++ b/drivers/isdn/icn/icn.c
43890@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
43891 if (count > len)
43892 count = len;
43893 if (user) {
43894- if (copy_from_user(msg, buf, count))
43895+ if (count > sizeof msg || copy_from_user(msg, buf, count))
43896 return -EFAULT;
43897 } else
43898 memcpy(msg, buf, count);
43899@@ -1609,7 +1609,7 @@ icn_setup(char *line)
43900 if (ints[0] > 1)
43901 membase = (unsigned long)ints[2];
43902 if (str && *str) {
43903- strcpy(sid, str);
43904+ strlcpy(sid, str, sizeof(sid));
43905 icn_id = sid;
43906 if ((p = strchr(sid, ','))) {
43907 *p++ = 0;
43908diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
43909index 87f7dff..7300125 100644
43910--- a/drivers/isdn/mISDN/dsp_cmx.c
43911+++ b/drivers/isdn/mISDN/dsp_cmx.c
43912@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
43913 static u16 dsp_count; /* last sample count */
43914 static int dsp_count_valid; /* if we have last sample count */
43915
43916-void
43917+void __intentional_overflow(-1)
43918 dsp_cmx_send(void *arg)
43919 {
43920 struct dsp_conf *conf;
43921diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
43922index 0f9ed1e..2715d6f 100644
43923--- a/drivers/leds/leds-clevo-mail.c
43924+++ b/drivers/leds/leds-clevo-mail.c
43925@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
43926 * detected as working, but in reality it is not) as low as
43927 * possible.
43928 */
43929-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
43930+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
43931 {
43932 .callback = clevo_mail_led_dmi_callback,
43933 .ident = "Clevo D410J",
43934diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
43935index 046cb70..6b20d39 100644
43936--- a/drivers/leds/leds-ss4200.c
43937+++ b/drivers/leds/leds-ss4200.c
43938@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
43939 * detected as working, but in reality it is not) as low as
43940 * possible.
43941 */
43942-static struct dmi_system_id nas_led_whitelist[] __initdata = {
43943+static struct dmi_system_id nas_led_whitelist[] __initconst = {
43944 {
43945 .callback = ss4200_led_dmi_callback,
43946 .ident = "Intel SS4200-E",
43947diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
43948index 6590558..a74c5dd 100644
43949--- a/drivers/lguest/core.c
43950+++ b/drivers/lguest/core.c
43951@@ -96,9 +96,17 @@ static __init int map_switcher(void)
43952 * The end address needs +1 because __get_vm_area allocates an
43953 * extra guard page, so we need space for that.
43954 */
43955+
43956+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
43957+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
43958+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
43959+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
43960+#else
43961 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
43962 VM_ALLOC, switcher_addr, switcher_addr
43963 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
43964+#endif
43965+
43966 if (!switcher_vma) {
43967 err = -ENOMEM;
43968 printk("lguest: could not map switcher pages high\n");
43969@@ -121,7 +129,7 @@ static __init int map_switcher(void)
43970 * Now the Switcher is mapped at the right address, we can't fail!
43971 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
43972 */
43973- memcpy(switcher_vma->addr, start_switcher_text,
43974+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
43975 end_switcher_text - start_switcher_text);
43976
43977 printk(KERN_INFO "lguest: mapped switcher at %p\n",
43978diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
43979index e8b55c3..3514c37 100644
43980--- a/drivers/lguest/page_tables.c
43981+++ b/drivers/lguest/page_tables.c
43982@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
43983 /*:*/
43984
43985 #ifdef CONFIG_X86_PAE
43986-static void release_pmd(pmd_t *spmd)
43987+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
43988 {
43989 /* If the entry's not present, there's nothing to release. */
43990 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
43991diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
43992index 922a1ac..9dd0c2a 100644
43993--- a/drivers/lguest/x86/core.c
43994+++ b/drivers/lguest/x86/core.c
43995@@ -59,7 +59,7 @@ static struct {
43996 /* Offset from where switcher.S was compiled to where we've copied it */
43997 static unsigned long switcher_offset(void)
43998 {
43999- return switcher_addr - (unsigned long)start_switcher_text;
44000+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
44001 }
44002
44003 /* This cpu's struct lguest_pages (after the Switcher text page) */
44004@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
44005 * These copies are pretty cheap, so we do them unconditionally: */
44006 /* Save the current Host top-level page directory.
44007 */
44008+
44009+#ifdef CONFIG_PAX_PER_CPU_PGD
44010+ pages->state.host_cr3 = read_cr3();
44011+#else
44012 pages->state.host_cr3 = __pa(current->mm->pgd);
44013+#endif
44014+
44015 /*
44016 * Set up the Guest's page tables to see this CPU's pages (and no
44017 * other CPU's pages).
44018@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
44019 * compiled-in switcher code and the high-mapped copy we just made.
44020 */
44021 for (i = 0; i < IDT_ENTRIES; i++)
44022- default_idt_entries[i] += switcher_offset();
44023+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
44024
44025 /*
44026 * Set up the Switcher's per-cpu areas.
44027@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
44028 * it will be undisturbed when we switch. To change %cs and jump we
44029 * need this structure to feed to Intel's "lcall" instruction.
44030 */
44031- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
44032+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
44033 lguest_entry.segment = LGUEST_CS;
44034
44035 /*
44036diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
44037index 40634b0..4f5855e 100644
44038--- a/drivers/lguest/x86/switcher_32.S
44039+++ b/drivers/lguest/x86/switcher_32.S
44040@@ -87,6 +87,7 @@
44041 #include <asm/page.h>
44042 #include <asm/segment.h>
44043 #include <asm/lguest.h>
44044+#include <asm/processor-flags.h>
44045
44046 // We mark the start of the code to copy
44047 // It's placed in .text tho it's never run here
44048@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
44049 // Changes type when we load it: damn Intel!
44050 // For after we switch over our page tables
44051 // That entry will be read-only: we'd crash.
44052+
44053+#ifdef CONFIG_PAX_KERNEXEC
44054+ mov %cr0, %edx
44055+ xor $X86_CR0_WP, %edx
44056+ mov %edx, %cr0
44057+#endif
44058+
44059 movl $(GDT_ENTRY_TSS*8), %edx
44060 ltr %dx
44061
44062@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
44063 // Let's clear it again for our return.
44064 // The GDT descriptor of the Host
44065 // Points to the table after two "size" bytes
44066- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
44067+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
44068 // Clear "used" from type field (byte 5, bit 2)
44069- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
44070+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
44071+
44072+#ifdef CONFIG_PAX_KERNEXEC
44073+ mov %cr0, %eax
44074+ xor $X86_CR0_WP, %eax
44075+ mov %eax, %cr0
44076+#endif
44077
44078 // Once our page table's switched, the Guest is live!
44079 // The Host fades as we run this final step.
44080@@ -295,13 +309,12 @@ deliver_to_host:
44081 // I consulted gcc, and it gave
44082 // These instructions, which I gladly credit:
44083 leal (%edx,%ebx,8), %eax
44084- movzwl (%eax),%edx
44085- movl 4(%eax), %eax
44086- xorw %ax, %ax
44087- orl %eax, %edx
44088+ movl 4(%eax), %edx
44089+ movw (%eax), %dx
44090 // Now the address of the handler's in %edx
44091 // We call it now: its "iret" drops us home.
44092- jmp *%edx
44093+ ljmp $__KERNEL_CS, $1f
44094+1: jmp *%edx
44095
44096 // Every interrupt can come to us here
44097 // But we must truly tell each apart.
44098diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
44099index a08e3ee..df8ade2 100644
44100--- a/drivers/md/bcache/closure.h
44101+++ b/drivers/md/bcache/closure.h
44102@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
44103 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
44104 struct workqueue_struct *wq)
44105 {
44106- BUG_ON(object_is_on_stack(cl));
44107+ BUG_ON(object_starts_on_stack(cl));
44108 closure_set_ip(cl);
44109 cl->fn = fn;
44110 cl->wq = wq;
44111diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
44112index 1695ee5..89f18ab 100644
44113--- a/drivers/md/bitmap.c
44114+++ b/drivers/md/bitmap.c
44115@@ -1784,7 +1784,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
44116 chunk_kb ? "KB" : "B");
44117 if (bitmap->storage.file) {
44118 seq_printf(seq, ", file: ");
44119- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
44120+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
44121 }
44122
44123 seq_printf(seq, "\n");
44124diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
44125index 73f791b..8c5d3ac 100644
44126--- a/drivers/md/dm-ioctl.c
44127+++ b/drivers/md/dm-ioctl.c
44128@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
44129 cmd == DM_LIST_VERSIONS_CMD)
44130 return 0;
44131
44132- if ((cmd == DM_DEV_CREATE_CMD)) {
44133+ if (cmd == DM_DEV_CREATE_CMD) {
44134 if (!*param->name) {
44135 DMWARN("name not supplied when creating device");
44136 return -EINVAL;
44137diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
44138index 089d627..ef7352e 100644
44139--- a/drivers/md/dm-raid1.c
44140+++ b/drivers/md/dm-raid1.c
44141@@ -40,7 +40,7 @@ enum dm_raid1_error {
44142
44143 struct mirror {
44144 struct mirror_set *ms;
44145- atomic_t error_count;
44146+ atomic_unchecked_t error_count;
44147 unsigned long error_type;
44148 struct dm_dev *dev;
44149 sector_t offset;
44150@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
44151 struct mirror *m;
44152
44153 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
44154- if (!atomic_read(&m->error_count))
44155+ if (!atomic_read_unchecked(&m->error_count))
44156 return m;
44157
44158 return NULL;
44159@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
44160 * simple way to tell if a device has encountered
44161 * errors.
44162 */
44163- atomic_inc(&m->error_count);
44164+ atomic_inc_unchecked(&m->error_count);
44165
44166 if (test_and_set_bit(error_type, &m->error_type))
44167 return;
44168@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
44169 struct mirror *m = get_default_mirror(ms);
44170
44171 do {
44172- if (likely(!atomic_read(&m->error_count)))
44173+ if (likely(!atomic_read_unchecked(&m->error_count)))
44174 return m;
44175
44176 if (m-- == ms->mirror)
44177@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
44178 {
44179 struct mirror *default_mirror = get_default_mirror(m->ms);
44180
44181- return !atomic_read(&default_mirror->error_count);
44182+ return !atomic_read_unchecked(&default_mirror->error_count);
44183 }
44184
44185 static int mirror_available(struct mirror_set *ms, struct bio *bio)
44186@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
44187 */
44188 if (likely(region_in_sync(ms, region, 1)))
44189 m = choose_mirror(ms, bio->bi_iter.bi_sector);
44190- else if (m && atomic_read(&m->error_count))
44191+ else if (m && atomic_read_unchecked(&m->error_count))
44192 m = NULL;
44193
44194 if (likely(m))
44195@@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
44196 }
44197
44198 ms->mirror[mirror].ms = ms;
44199- atomic_set(&(ms->mirror[mirror].error_count), 0);
44200+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
44201 ms->mirror[mirror].error_type = 0;
44202 ms->mirror[mirror].offset = offset;
44203
44204@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
44205 */
44206 static char device_status_char(struct mirror *m)
44207 {
44208- if (!atomic_read(&(m->error_count)))
44209+ if (!atomic_read_unchecked(&(m->error_count)))
44210 return 'A';
44211
44212 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
44213diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
44214index f478a4c..4b8e5ef 100644
44215--- a/drivers/md/dm-stats.c
44216+++ b/drivers/md/dm-stats.c
44217@@ -382,7 +382,7 @@ do_sync_free:
44218 synchronize_rcu_expedited();
44219 dm_stat_free(&s->rcu_head);
44220 } else {
44221- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
44222+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
44223 call_rcu(&s->rcu_head, dm_stat_free);
44224 }
44225 return 0;
44226@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
44227 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
44228 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
44229 ));
44230- ACCESS_ONCE(last->last_sector) = end_sector;
44231- ACCESS_ONCE(last->last_rw) = bi_rw;
44232+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
44233+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
44234 }
44235
44236 rcu_read_lock();
44237diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
44238index f8b37d4..5c5cafd 100644
44239--- a/drivers/md/dm-stripe.c
44240+++ b/drivers/md/dm-stripe.c
44241@@ -21,7 +21,7 @@ struct stripe {
44242 struct dm_dev *dev;
44243 sector_t physical_start;
44244
44245- atomic_t error_count;
44246+ atomic_unchecked_t error_count;
44247 };
44248
44249 struct stripe_c {
44250@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
44251 kfree(sc);
44252 return r;
44253 }
44254- atomic_set(&(sc->stripe[i].error_count), 0);
44255+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
44256 }
44257
44258 ti->private = sc;
44259@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
44260 DMEMIT("%d ", sc->stripes);
44261 for (i = 0; i < sc->stripes; i++) {
44262 DMEMIT("%s ", sc->stripe[i].dev->name);
44263- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
44264+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
44265 'D' : 'A';
44266 }
44267 buffer[i] = '\0';
44268@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
44269 */
44270 for (i = 0; i < sc->stripes; i++)
44271 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
44272- atomic_inc(&(sc->stripe[i].error_count));
44273- if (atomic_read(&(sc->stripe[i].error_count)) <
44274+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
44275+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
44276 DM_IO_ERROR_THRESHOLD)
44277 schedule_work(&sc->trigger_event);
44278 }
44279diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
44280index 3afae9e..4e1c954 100644
44281--- a/drivers/md/dm-table.c
44282+++ b/drivers/md/dm-table.c
44283@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
44284 if (!dev_size)
44285 return 0;
44286
44287- if ((start >= dev_size) || (start + len > dev_size)) {
44288+ if ((start >= dev_size) || (len > dev_size - start)) {
44289 DMWARN("%s: %s too small for target: "
44290 "start=%llu, len=%llu, dev_size=%llu",
44291 dm_device_name(ti->table->md), bdevname(bdev, b),
44292diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
44293index 43adbb8..7b34305 100644
44294--- a/drivers/md/dm-thin-metadata.c
44295+++ b/drivers/md/dm-thin-metadata.c
44296@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44297 {
44298 pmd->info.tm = pmd->tm;
44299 pmd->info.levels = 2;
44300- pmd->info.value_type.context = pmd->data_sm;
44301+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44302 pmd->info.value_type.size = sizeof(__le64);
44303 pmd->info.value_type.inc = data_block_inc;
44304 pmd->info.value_type.dec = data_block_dec;
44305@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44306
44307 pmd->bl_info.tm = pmd->tm;
44308 pmd->bl_info.levels = 1;
44309- pmd->bl_info.value_type.context = pmd->data_sm;
44310+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44311 pmd->bl_info.value_type.size = sizeof(__le64);
44312 pmd->bl_info.value_type.inc = data_block_inc;
44313 pmd->bl_info.value_type.dec = data_block_dec;
44314diff --git a/drivers/md/dm.c b/drivers/md/dm.c
44315index 64b10e0..07db8f4 100644
44316--- a/drivers/md/dm.c
44317+++ b/drivers/md/dm.c
44318@@ -185,9 +185,9 @@ struct mapped_device {
44319 /*
44320 * Event handling.
44321 */
44322- atomic_t event_nr;
44323+ atomic_unchecked_t event_nr;
44324 wait_queue_head_t eventq;
44325- atomic_t uevent_seq;
44326+ atomic_unchecked_t uevent_seq;
44327 struct list_head uevent_list;
44328 spinlock_t uevent_lock; /* Protect access to uevent_list */
44329
44330@@ -2070,8 +2070,8 @@ static struct mapped_device *alloc_dev(int minor)
44331 spin_lock_init(&md->deferred_lock);
44332 atomic_set(&md->holders, 1);
44333 atomic_set(&md->open_count, 0);
44334- atomic_set(&md->event_nr, 0);
44335- atomic_set(&md->uevent_seq, 0);
44336+ atomic_set_unchecked(&md->event_nr, 0);
44337+ atomic_set_unchecked(&md->uevent_seq, 0);
44338 INIT_LIST_HEAD(&md->uevent_list);
44339 INIT_LIST_HEAD(&md->table_devices);
44340 spin_lock_init(&md->uevent_lock);
44341@@ -2227,7 +2227,7 @@ static void event_callback(void *context)
44342
44343 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
44344
44345- atomic_inc(&md->event_nr);
44346+ atomic_inc_unchecked(&md->event_nr);
44347 wake_up(&md->eventq);
44348 }
44349
44350@@ -3034,18 +3034,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
44351
44352 uint32_t dm_next_uevent_seq(struct mapped_device *md)
44353 {
44354- return atomic_add_return(1, &md->uevent_seq);
44355+ return atomic_add_return_unchecked(1, &md->uevent_seq);
44356 }
44357
44358 uint32_t dm_get_event_nr(struct mapped_device *md)
44359 {
44360- return atomic_read(&md->event_nr);
44361+ return atomic_read_unchecked(&md->event_nr);
44362 }
44363
44364 int dm_wait_event(struct mapped_device *md, int event_nr)
44365 {
44366 return wait_event_interruptible(md->eventq,
44367- (event_nr != atomic_read(&md->event_nr)));
44368+ (event_nr != atomic_read_unchecked(&md->event_nr)));
44369 }
44370
44371 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
44372diff --git a/drivers/md/md.c b/drivers/md/md.c
44373index 709755f..5bc3fa4 100644
44374--- a/drivers/md/md.c
44375+++ b/drivers/md/md.c
44376@@ -190,10 +190,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
44377 * start build, activate spare
44378 */
44379 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
44380-static atomic_t md_event_count;
44381+static atomic_unchecked_t md_event_count;
44382 void md_new_event(struct mddev *mddev)
44383 {
44384- atomic_inc(&md_event_count);
44385+ atomic_inc_unchecked(&md_event_count);
44386 wake_up(&md_event_waiters);
44387 }
44388 EXPORT_SYMBOL_GPL(md_new_event);
44389@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
44390 */
44391 static void md_new_event_inintr(struct mddev *mddev)
44392 {
44393- atomic_inc(&md_event_count);
44394+ atomic_inc_unchecked(&md_event_count);
44395 wake_up(&md_event_waiters);
44396 }
44397
44398@@ -1422,7 +1422,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
44399 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
44400 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
44401 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
44402- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44403+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44404
44405 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
44406 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
44407@@ -1673,7 +1673,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
44408 else
44409 sb->resync_offset = cpu_to_le64(0);
44410
44411- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
44412+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
44413
44414 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
44415 sb->size = cpu_to_le64(mddev->dev_sectors);
44416@@ -2543,7 +2543,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
44417 static ssize_t
44418 errors_show(struct md_rdev *rdev, char *page)
44419 {
44420- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
44421+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
44422 }
44423
44424 static ssize_t
44425@@ -2552,7 +2552,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
44426 char *e;
44427 unsigned long n = simple_strtoul(buf, &e, 10);
44428 if (*buf && (*e == 0 || *e == '\n')) {
44429- atomic_set(&rdev->corrected_errors, n);
44430+ atomic_set_unchecked(&rdev->corrected_errors, n);
44431 return len;
44432 }
44433 return -EINVAL;
44434@@ -2997,8 +2997,8 @@ int md_rdev_init(struct md_rdev *rdev)
44435 rdev->sb_loaded = 0;
44436 rdev->bb_page = NULL;
44437 atomic_set(&rdev->nr_pending, 0);
44438- atomic_set(&rdev->read_errors, 0);
44439- atomic_set(&rdev->corrected_errors, 0);
44440+ atomic_set_unchecked(&rdev->read_errors, 0);
44441+ atomic_set_unchecked(&rdev->corrected_errors, 0);
44442
44443 INIT_LIST_HEAD(&rdev->same_set);
44444 init_waitqueue_head(&rdev->blocked_wait);
44445@@ -6865,7 +6865,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
44446
44447 spin_unlock(&pers_lock);
44448 seq_printf(seq, "\n");
44449- seq->poll_event = atomic_read(&md_event_count);
44450+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44451 return 0;
44452 }
44453 if (v == (void*)2) {
44454@@ -6968,7 +6968,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
44455 return error;
44456
44457 seq = file->private_data;
44458- seq->poll_event = atomic_read(&md_event_count);
44459+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44460 return error;
44461 }
44462
44463@@ -6985,7 +6985,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
44464 /* always allow read */
44465 mask = POLLIN | POLLRDNORM;
44466
44467- if (seq->poll_event != atomic_read(&md_event_count))
44468+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
44469 mask |= POLLERR | POLLPRI;
44470 return mask;
44471 }
44472@@ -7032,7 +7032,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
44473 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
44474 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
44475 (int)part_stat_read(&disk->part0, sectors[1]) -
44476- atomic_read(&disk->sync_io);
44477+ atomic_read_unchecked(&disk->sync_io);
44478 /* sync IO will cause sync_io to increase before the disk_stats
44479 * as sync_io is counted when a request starts, and
44480 * disk_stats is counted when it completes.
44481diff --git a/drivers/md/md.h b/drivers/md/md.h
44482index 03cec5b..0a658c1 100644
44483--- a/drivers/md/md.h
44484+++ b/drivers/md/md.h
44485@@ -94,13 +94,13 @@ struct md_rdev {
44486 * only maintained for arrays that
44487 * support hot removal
44488 */
44489- atomic_t read_errors; /* number of consecutive read errors that
44490+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
44491 * we have tried to ignore.
44492 */
44493 struct timespec last_read_error; /* monotonic time since our
44494 * last read error
44495 */
44496- atomic_t corrected_errors; /* number of corrected read errors,
44497+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
44498 * for reporting to userspace and storing
44499 * in superblock.
44500 */
44501@@ -448,7 +448,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
44502
44503 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
44504 {
44505- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44506+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44507 }
44508
44509 struct md_personality
44510diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
44511index e8a9042..35bd145 100644
44512--- a/drivers/md/persistent-data/dm-space-map-metadata.c
44513+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
44514@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
44515 * Flick into a mode where all blocks get allocated in the new area.
44516 */
44517 smm->begin = old_len;
44518- memcpy(sm, &bootstrap_ops, sizeof(*sm));
44519+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
44520
44521 /*
44522 * Extend.
44523@@ -714,7 +714,7 @@ out:
44524 /*
44525 * Switch back to normal behaviour.
44526 */
44527- memcpy(sm, &ops, sizeof(*sm));
44528+ memcpy((void *)sm, &ops, sizeof(*sm));
44529 return r;
44530 }
44531
44532diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
44533index 3e6d115..ffecdeb 100644
44534--- a/drivers/md/persistent-data/dm-space-map.h
44535+++ b/drivers/md/persistent-data/dm-space-map.h
44536@@ -71,6 +71,7 @@ struct dm_space_map {
44537 dm_sm_threshold_fn fn,
44538 void *context);
44539 };
44540+typedef struct dm_space_map __no_const dm_space_map_no_const;
44541
44542 /*----------------------------------------------------------------*/
44543
44544diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
44545index 2f2f38f..f6a8ebe 100644
44546--- a/drivers/md/raid1.c
44547+++ b/drivers/md/raid1.c
44548@@ -1932,7 +1932,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
44549 if (r1_sync_page_io(rdev, sect, s,
44550 bio->bi_io_vec[idx].bv_page,
44551 READ) != 0)
44552- atomic_add(s, &rdev->corrected_errors);
44553+ atomic_add_unchecked(s, &rdev->corrected_errors);
44554 }
44555 sectors -= s;
44556 sect += s;
44557@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
44558 !test_bit(Faulty, &rdev->flags)) {
44559 if (r1_sync_page_io(rdev, sect, s,
44560 conf->tmppage, READ)) {
44561- atomic_add(s, &rdev->corrected_errors);
44562+ atomic_add_unchecked(s, &rdev->corrected_errors);
44563 printk(KERN_INFO
44564 "md/raid1:%s: read error corrected "
44565 "(%d sectors at %llu on %s)\n",
44566diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
44567index 32e282f..5cec803 100644
44568--- a/drivers/md/raid10.c
44569+++ b/drivers/md/raid10.c
44570@@ -1944,7 +1944,7 @@ static void end_sync_read(struct bio *bio, int error)
44571 /* The write handler will notice the lack of
44572 * R10BIO_Uptodate and record any errors etc
44573 */
44574- atomic_add(r10_bio->sectors,
44575+ atomic_add_unchecked(r10_bio->sectors,
44576 &conf->mirrors[d].rdev->corrected_errors);
44577
44578 /* for reconstruct, we always reschedule after a read.
44579@@ -2301,7 +2301,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44580 {
44581 struct timespec cur_time_mon;
44582 unsigned long hours_since_last;
44583- unsigned int read_errors = atomic_read(&rdev->read_errors);
44584+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
44585
44586 ktime_get_ts(&cur_time_mon);
44587
44588@@ -2323,9 +2323,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44589 * overflowing the shift of read_errors by hours_since_last.
44590 */
44591 if (hours_since_last >= 8 * sizeof(read_errors))
44592- atomic_set(&rdev->read_errors, 0);
44593+ atomic_set_unchecked(&rdev->read_errors, 0);
44594 else
44595- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
44596+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
44597 }
44598
44599 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
44600@@ -2379,8 +2379,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44601 return;
44602
44603 check_decay_read_errors(mddev, rdev);
44604- atomic_inc(&rdev->read_errors);
44605- if (atomic_read(&rdev->read_errors) > max_read_errors) {
44606+ atomic_inc_unchecked(&rdev->read_errors);
44607+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
44608 char b[BDEVNAME_SIZE];
44609 bdevname(rdev->bdev, b);
44610
44611@@ -2388,7 +2388,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44612 "md/raid10:%s: %s: Raid device exceeded "
44613 "read_error threshold [cur %d:max %d]\n",
44614 mdname(mddev), b,
44615- atomic_read(&rdev->read_errors), max_read_errors);
44616+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
44617 printk(KERN_NOTICE
44618 "md/raid10:%s: %s: Failing raid device\n",
44619 mdname(mddev), b);
44620@@ -2543,7 +2543,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44621 sect +
44622 choose_data_offset(r10_bio, rdev)),
44623 bdevname(rdev->bdev, b));
44624- atomic_add(s, &rdev->corrected_errors);
44625+ atomic_add_unchecked(s, &rdev->corrected_errors);
44626 }
44627
44628 rdev_dec_pending(rdev, mddev);
44629diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
44630index 8577cc7..e80e05d 100644
44631--- a/drivers/md/raid5.c
44632+++ b/drivers/md/raid5.c
44633@@ -1730,6 +1730,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
44634 return 1;
44635 }
44636
44637+#ifdef CONFIG_GRKERNSEC_HIDESYM
44638+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
44639+#endif
44640+
44641 static int grow_stripes(struct r5conf *conf, int num)
44642 {
44643 struct kmem_cache *sc;
44644@@ -1741,7 +1745,11 @@ static int grow_stripes(struct r5conf *conf, int num)
44645 "raid%d-%s", conf->level, mdname(conf->mddev));
44646 else
44647 sprintf(conf->cache_name[0],
44648+#ifdef CONFIG_GRKERNSEC_HIDESYM
44649+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
44650+#else
44651 "raid%d-%p", conf->level, conf->mddev);
44652+#endif
44653 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
44654
44655 conf->active_name = 0;
44656@@ -2017,21 +2025,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
44657 mdname(conf->mddev), STRIPE_SECTORS,
44658 (unsigned long long)s,
44659 bdevname(rdev->bdev, b));
44660- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
44661+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
44662 clear_bit(R5_ReadError, &sh->dev[i].flags);
44663 clear_bit(R5_ReWrite, &sh->dev[i].flags);
44664 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
44665 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
44666
44667- if (atomic_read(&rdev->read_errors))
44668- atomic_set(&rdev->read_errors, 0);
44669+ if (atomic_read_unchecked(&rdev->read_errors))
44670+ atomic_set_unchecked(&rdev->read_errors, 0);
44671 } else {
44672 const char *bdn = bdevname(rdev->bdev, b);
44673 int retry = 0;
44674 int set_bad = 0;
44675
44676 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
44677- atomic_inc(&rdev->read_errors);
44678+ atomic_inc_unchecked(&rdev->read_errors);
44679 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
44680 printk_ratelimited(
44681 KERN_WARNING
44682@@ -2059,7 +2067,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
44683 mdname(conf->mddev),
44684 (unsigned long long)s,
44685 bdn);
44686- } else if (atomic_read(&rdev->read_errors)
44687+ } else if (atomic_read_unchecked(&rdev->read_errors)
44688 > conf->max_nr_stripes)
44689 printk(KERN_WARNING
44690 "md/raid:%s: Too many read errors, failing device %s.\n",
44691diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
44692index 983db75..ef9248c 100644
44693--- a/drivers/media/dvb-core/dvbdev.c
44694+++ b/drivers/media/dvb-core/dvbdev.c
44695@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
44696 const struct dvb_device *template, void *priv, int type)
44697 {
44698 struct dvb_device *dvbdev;
44699- struct file_operations *dvbdevfops;
44700+ file_operations_no_const *dvbdevfops;
44701 struct device *clsdev;
44702 int minor;
44703 int id;
44704diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
44705index 6ad22b6..6e90e2a 100644
44706--- a/drivers/media/dvb-frontends/af9033.h
44707+++ b/drivers/media/dvb-frontends/af9033.h
44708@@ -96,6 +96,6 @@ struct af9033_ops {
44709 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
44710 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
44711 int onoff);
44712-};
44713+} __no_const;
44714
44715 #endif /* AF9033_H */
44716diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
44717index 9b6c3bb..baeb5c7 100644
44718--- a/drivers/media/dvb-frontends/dib3000.h
44719+++ b/drivers/media/dvb-frontends/dib3000.h
44720@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
44721 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
44722 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
44723 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
44724-};
44725+} __no_const;
44726
44727 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
44728 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
44729diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
44730index 1fea0e9..321ce8f 100644
44731--- a/drivers/media/dvb-frontends/dib7000p.h
44732+++ b/drivers/media/dvb-frontends/dib7000p.h
44733@@ -64,7 +64,7 @@ struct dib7000p_ops {
44734 int (*get_adc_power)(struct dvb_frontend *fe);
44735 int (*slave_reset)(struct dvb_frontend *fe);
44736 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
44737-};
44738+} __no_const;
44739
44740 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
44741 void *dib7000p_attach(struct dib7000p_ops *ops);
44742diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
44743index 84cc103..5780c54 100644
44744--- a/drivers/media/dvb-frontends/dib8000.h
44745+++ b/drivers/media/dvb-frontends/dib8000.h
44746@@ -61,7 +61,7 @@ struct dib8000_ops {
44747 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
44748 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
44749 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
44750-};
44751+} __no_const;
44752
44753 #if IS_ENABLED(CONFIG_DVB_DIB8000)
44754 void *dib8000_attach(struct dib8000_ops *ops);
44755diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
44756index 860c98fc..497fa25 100644
44757--- a/drivers/media/pci/cx88/cx88-video.c
44758+++ b/drivers/media/pci/cx88/cx88-video.c
44759@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
44760
44761 /* ------------------------------------------------------------------ */
44762
44763-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44764-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44765-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44766+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44767+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44768+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44769
44770 module_param_array(video_nr, int, NULL, 0444);
44771 module_param_array(vbi_nr, int, NULL, 0444);
44772diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
44773index 802642d..5534900 100644
44774--- a/drivers/media/pci/ivtv/ivtv-driver.c
44775+++ b/drivers/media/pci/ivtv/ivtv-driver.c
44776@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
44777 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
44778
44779 /* ivtv instance counter */
44780-static atomic_t ivtv_instance = ATOMIC_INIT(0);
44781+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
44782
44783 /* Parameter declarations */
44784 static int cardtype[IVTV_MAX_CARDS];
44785diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
44786index 8cbe6b4..ea3601c 100644
44787--- a/drivers/media/pci/solo6x10/solo6x10-core.c
44788+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
44789@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
44790
44791 static int solo_sysfs_init(struct solo_dev *solo_dev)
44792 {
44793- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
44794+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
44795 struct device *dev = &solo_dev->dev;
44796 const char *driver;
44797 int i;
44798diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
44799index c7141f2..5301fec 100644
44800--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
44801+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
44802@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
44803
44804 int solo_g723_init(struct solo_dev *solo_dev)
44805 {
44806- static struct snd_device_ops ops = { NULL };
44807+ static struct snd_device_ops ops = { };
44808 struct snd_card *card;
44809 struct snd_kcontrol_new kctl;
44810 char name[32];
44811diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44812index 8c84846..27b4f83 100644
44813--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
44814+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44815@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
44816
44817 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
44818 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
44819- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
44820+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
44821 if (p2m_id < 0)
44822 p2m_id = -p2m_id;
44823 }
44824diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
44825index bd8edfa..e82ed85 100644
44826--- a/drivers/media/pci/solo6x10/solo6x10.h
44827+++ b/drivers/media/pci/solo6x10/solo6x10.h
44828@@ -220,7 +220,7 @@ struct solo_dev {
44829
44830 /* P2M DMA Engine */
44831 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
44832- atomic_t p2m_count;
44833+ atomic_unchecked_t p2m_count;
44834 int p2m_jiffies;
44835 unsigned int p2m_timeouts;
44836
44837diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
44838index c135165..dc69499 100644
44839--- a/drivers/media/pci/tw68/tw68-core.c
44840+++ b/drivers/media/pci/tw68/tw68-core.c
44841@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
44842 module_param_array(card, int, NULL, 0444);
44843 MODULE_PARM_DESC(card, "card type");
44844
44845-static atomic_t tw68_instance = ATOMIC_INIT(0);
44846+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
44847
44848 /* ------------------------------------------------------------------ */
44849
44850diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
44851index ba2d8f9..1566684 100644
44852--- a/drivers/media/platform/omap/omap_vout.c
44853+++ b/drivers/media/platform/omap/omap_vout.c
44854@@ -63,7 +63,6 @@ enum omap_vout_channels {
44855 OMAP_VIDEO2,
44856 };
44857
44858-static struct videobuf_queue_ops video_vbq_ops;
44859 /* Variables configurable through module params*/
44860 static u32 video1_numbuffers = 3;
44861 static u32 video2_numbuffers = 3;
44862@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
44863 {
44864 struct videobuf_queue *q;
44865 struct omap_vout_device *vout = NULL;
44866+ static struct videobuf_queue_ops video_vbq_ops = {
44867+ .buf_setup = omap_vout_buffer_setup,
44868+ .buf_prepare = omap_vout_buffer_prepare,
44869+ .buf_release = omap_vout_buffer_release,
44870+ .buf_queue = omap_vout_buffer_queue,
44871+ };
44872
44873 vout = video_drvdata(file);
44874 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
44875@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
44876 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
44877
44878 q = &vout->vbq;
44879- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
44880- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
44881- video_vbq_ops.buf_release = omap_vout_buffer_release;
44882- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
44883 spin_lock_init(&vout->vbq_lock);
44884
44885 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
44886diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
44887index fb2acc5..a2fcbdc4 100644
44888--- a/drivers/media/platform/s5p-tv/mixer.h
44889+++ b/drivers/media/platform/s5p-tv/mixer.h
44890@@ -156,7 +156,7 @@ struct mxr_layer {
44891 /** layer index (unique identifier) */
44892 int idx;
44893 /** callbacks for layer methods */
44894- struct mxr_layer_ops ops;
44895+ struct mxr_layer_ops *ops;
44896 /** format array */
44897 const struct mxr_format **fmt_array;
44898 /** size of format array */
44899diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44900index 74344c7..a39e70e 100644
44901--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44902+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44903@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
44904 {
44905 struct mxr_layer *layer;
44906 int ret;
44907- struct mxr_layer_ops ops = {
44908+ static struct mxr_layer_ops ops = {
44909 .release = mxr_graph_layer_release,
44910 .buffer_set = mxr_graph_buffer_set,
44911 .stream_set = mxr_graph_stream_set,
44912diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
44913index b713403..53cb5ad 100644
44914--- a/drivers/media/platform/s5p-tv/mixer_reg.c
44915+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
44916@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
44917 layer->update_buf = next;
44918 }
44919
44920- layer->ops.buffer_set(layer, layer->update_buf);
44921+ layer->ops->buffer_set(layer, layer->update_buf);
44922
44923 if (done && done != layer->shadow_buf)
44924 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
44925diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
44926index b4d2696..91df48e 100644
44927--- a/drivers/media/platform/s5p-tv/mixer_video.c
44928+++ b/drivers/media/platform/s5p-tv/mixer_video.c
44929@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
44930 layer->geo.src.height = layer->geo.src.full_height;
44931
44932 mxr_geometry_dump(mdev, &layer->geo);
44933- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44934+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44935 mxr_geometry_dump(mdev, &layer->geo);
44936 }
44937
44938@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
44939 layer->geo.dst.full_width = mbus_fmt.width;
44940 layer->geo.dst.full_height = mbus_fmt.height;
44941 layer->geo.dst.field = mbus_fmt.field;
44942- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44943+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44944
44945 mxr_geometry_dump(mdev, &layer->geo);
44946 }
44947@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
44948 /* set source size to highest accepted value */
44949 geo->src.full_width = max(geo->dst.full_width, pix->width);
44950 geo->src.full_height = max(geo->dst.full_height, pix->height);
44951- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44952+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44953 mxr_geometry_dump(mdev, &layer->geo);
44954 /* set cropping to total visible screen */
44955 geo->src.width = pix->width;
44956@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
44957 geo->src.x_offset = 0;
44958 geo->src.y_offset = 0;
44959 /* assure consistency of geometry */
44960- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44961+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44962 mxr_geometry_dump(mdev, &layer->geo);
44963 /* set full size to lowest possible value */
44964 geo->src.full_width = 0;
44965 geo->src.full_height = 0;
44966- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44967+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44968 mxr_geometry_dump(mdev, &layer->geo);
44969
44970 /* returning results */
44971@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
44972 target->width = s->r.width;
44973 target->height = s->r.height;
44974
44975- layer->ops.fix_geometry(layer, stage, s->flags);
44976+ layer->ops->fix_geometry(layer, stage, s->flags);
44977
44978 /* retrieve update selection rectangle */
44979 res.left = target->x_offset;
44980@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
44981 mxr_output_get(mdev);
44982
44983 mxr_layer_update_output(layer);
44984- layer->ops.format_set(layer);
44985+ layer->ops->format_set(layer);
44986 /* enabling layer in hardware */
44987 spin_lock_irqsave(&layer->enq_slock, flags);
44988 layer->state = MXR_LAYER_STREAMING;
44989 spin_unlock_irqrestore(&layer->enq_slock, flags);
44990
44991- layer->ops.stream_set(layer, MXR_ENABLE);
44992+ layer->ops->stream_set(layer, MXR_ENABLE);
44993 mxr_streamer_get(mdev);
44994
44995 return 0;
44996@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
44997 spin_unlock_irqrestore(&layer->enq_slock, flags);
44998
44999 /* disabling layer in hardware */
45000- layer->ops.stream_set(layer, MXR_DISABLE);
45001+ layer->ops->stream_set(layer, MXR_DISABLE);
45002 /* remove one streamer */
45003 mxr_streamer_put(mdev);
45004 /* allow changes in output configuration */
45005@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
45006
45007 void mxr_layer_release(struct mxr_layer *layer)
45008 {
45009- if (layer->ops.release)
45010- layer->ops.release(layer);
45011+ if (layer->ops->release)
45012+ layer->ops->release(layer);
45013 }
45014
45015 void mxr_base_layer_release(struct mxr_layer *layer)
45016@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
45017
45018 layer->mdev = mdev;
45019 layer->idx = idx;
45020- layer->ops = *ops;
45021+ layer->ops = ops;
45022
45023 spin_lock_init(&layer->enq_slock);
45024 INIT_LIST_HEAD(&layer->enq_list);
45025diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45026index c9388c4..ce71ece 100644
45027--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45028+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45029@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
45030 {
45031 struct mxr_layer *layer;
45032 int ret;
45033- struct mxr_layer_ops ops = {
45034+ static struct mxr_layer_ops ops = {
45035 .release = mxr_vp_layer_release,
45036 .buffer_set = mxr_vp_buffer_set,
45037 .stream_set = mxr_vp_stream_set,
45038diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
45039index 82affae..42833ec 100644
45040--- a/drivers/media/radio/radio-cadet.c
45041+++ b/drivers/media/radio/radio-cadet.c
45042@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45043 unsigned char readbuf[RDS_BUFFER];
45044 int i = 0;
45045
45046+ if (count > RDS_BUFFER)
45047+ return -EFAULT;
45048 mutex_lock(&dev->lock);
45049 if (dev->rdsstat == 0)
45050 cadet_start_rds(dev);
45051@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45052 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
45053 mutex_unlock(&dev->lock);
45054
45055- if (i && copy_to_user(data, readbuf, i))
45056- return -EFAULT;
45057+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
45058+ i = -EFAULT;
45059+
45060 return i;
45061 }
45062
45063diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
45064index 5236035..c622c74 100644
45065--- a/drivers/media/radio/radio-maxiradio.c
45066+++ b/drivers/media/radio/radio-maxiradio.c
45067@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
45068 /* TEA5757 pin mappings */
45069 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
45070
45071-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
45072+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
45073
45074 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
45075 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
45076diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
45077index 050b3bb..79f62b9 100644
45078--- a/drivers/media/radio/radio-shark.c
45079+++ b/drivers/media/radio/radio-shark.c
45080@@ -79,7 +79,7 @@ struct shark_device {
45081 u32 last_val;
45082 };
45083
45084-static atomic_t shark_instance = ATOMIC_INIT(0);
45085+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45086
45087 static void shark_write_val(struct snd_tea575x *tea, u32 val)
45088 {
45089diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
45090index 8654e0d..0608a64 100644
45091--- a/drivers/media/radio/radio-shark2.c
45092+++ b/drivers/media/radio/radio-shark2.c
45093@@ -74,7 +74,7 @@ struct shark_device {
45094 u8 *transfer_buffer;
45095 };
45096
45097-static atomic_t shark_instance = ATOMIC_INIT(0);
45098+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45099
45100 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
45101 {
45102diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
45103index dccf586..d5db411 100644
45104--- a/drivers/media/radio/radio-si476x.c
45105+++ b/drivers/media/radio/radio-si476x.c
45106@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
45107 struct si476x_radio *radio;
45108 struct v4l2_ctrl *ctrl;
45109
45110- static atomic_t instance = ATOMIC_INIT(0);
45111+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
45112
45113 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
45114 if (!radio)
45115diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
45116index 704397f..4d05977 100644
45117--- a/drivers/media/radio/wl128x/fmdrv_common.c
45118+++ b/drivers/media/radio/wl128x/fmdrv_common.c
45119@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
45120 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
45121
45122 /* Radio Nr */
45123-static u32 radio_nr = -1;
45124+static int radio_nr = -1;
45125 module_param(radio_nr, int, 0444);
45126 MODULE_PARM_DESC(radio_nr, "Radio Nr");
45127
45128diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45129index 9fd1527..8927230 100644
45130--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
45131+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45132@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
45133
45134 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
45135 {
45136- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
45137- char result[64];
45138- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
45139- sizeof(result), 0);
45140+ char *buf;
45141+ char *result;
45142+ int retval;
45143+
45144+ buf = kmalloc(2, GFP_KERNEL);
45145+ if (buf == NULL)
45146+ return -ENOMEM;
45147+ result = kmalloc(64, GFP_KERNEL);
45148+ if (result == NULL) {
45149+ kfree(buf);
45150+ return -ENOMEM;
45151+ }
45152+
45153+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
45154+ buf[1] = enable ? 1 : 0;
45155+
45156+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
45157+
45158+ kfree(buf);
45159+ kfree(result);
45160+ return retval;
45161 }
45162
45163 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
45164 {
45165- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
45166- char state[3];
45167- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
45168+ char *buf;
45169+ char *state;
45170+ int retval;
45171+
45172+ buf = kmalloc(2, GFP_KERNEL);
45173+ if (buf == NULL)
45174+ return -ENOMEM;
45175+ state = kmalloc(3, GFP_KERNEL);
45176+ if (state == NULL) {
45177+ kfree(buf);
45178+ return -ENOMEM;
45179+ }
45180+
45181+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
45182+ buf[1] = enable ? 1 : 0;
45183+
45184+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
45185+
45186+ kfree(buf);
45187+ kfree(state);
45188+ return retval;
45189 }
45190
45191 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45192 {
45193- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
45194- char state[3];
45195+ char *query;
45196+ char *state;
45197 int ret;
45198+ query = kmalloc(1, GFP_KERNEL);
45199+ if (query == NULL)
45200+ return -ENOMEM;
45201+ state = kmalloc(3, GFP_KERNEL);
45202+ if (state == NULL) {
45203+ kfree(query);
45204+ return -ENOMEM;
45205+ }
45206+
45207+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
45208
45209 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
45210
45211- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
45212- sizeof(state), 0);
45213+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
45214 if (ret < 0) {
45215 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
45216 "state info\n");
45217@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45218
45219 /* Copy this pointer as we are gonna need it in the release phase */
45220 cinergyt2_usb_device = adap->dev;
45221-
45222+ kfree(query);
45223+ kfree(state);
45224 return 0;
45225 }
45226
45227@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
45228 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45229 {
45230 struct cinergyt2_state *st = d->priv;
45231- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
45232+ u8 *key, *cmd;
45233 int i;
45234
45235+ cmd = kmalloc(1, GFP_KERNEL);
45236+ if (cmd == NULL)
45237+ return -EINVAL;
45238+ key = kzalloc(5, GFP_KERNEL);
45239+ if (key == NULL) {
45240+ kfree(cmd);
45241+ return -EINVAL;
45242+ }
45243+
45244+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
45245+
45246 *state = REMOTE_NO_KEY_PRESSED;
45247
45248- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
45249+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
45250 if (key[4] == 0xff) {
45251 /* key repeat */
45252 st->rc_counter++;
45253@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45254 *event = d->last_event;
45255 deb_rc("repeat key, event %x\n",
45256 *event);
45257- return 0;
45258+ goto out;
45259 }
45260 }
45261 deb_rc("repeated key (non repeatable)\n");
45262 }
45263- return 0;
45264+ goto out;
45265 }
45266
45267 /* hack to pass checksum on the custom field */
45268@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45269
45270 deb_rc("key: %*ph\n", 5, key);
45271 }
45272+out:
45273+ kfree(cmd);
45274+ kfree(key);
45275 return 0;
45276 }
45277
45278diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45279index c890fe4..f9b2ae6 100644
45280--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45281+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45282@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
45283 fe_status_t *status)
45284 {
45285 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45286- struct dvbt_get_status_msg result;
45287- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45288+ struct dvbt_get_status_msg *result;
45289+ u8 *cmd;
45290 int ret;
45291
45292- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
45293- sizeof(result), 0);
45294+ cmd = kmalloc(1, GFP_KERNEL);
45295+ if (cmd == NULL)
45296+ return -ENOMEM;
45297+ result = kmalloc(sizeof(*result), GFP_KERNEL);
45298+ if (result == NULL) {
45299+ kfree(cmd);
45300+ return -ENOMEM;
45301+ }
45302+
45303+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45304+
45305+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
45306+ sizeof(*result), 0);
45307 if (ret < 0)
45308- return ret;
45309+ goto out;
45310
45311 *status = 0;
45312
45313- if (0xffff - le16_to_cpu(result.gain) > 30)
45314+ if (0xffff - le16_to_cpu(result->gain) > 30)
45315 *status |= FE_HAS_SIGNAL;
45316- if (result.lock_bits & (1 << 6))
45317+ if (result->lock_bits & (1 << 6))
45318 *status |= FE_HAS_LOCK;
45319- if (result.lock_bits & (1 << 5))
45320+ if (result->lock_bits & (1 << 5))
45321 *status |= FE_HAS_SYNC;
45322- if (result.lock_bits & (1 << 4))
45323+ if (result->lock_bits & (1 << 4))
45324 *status |= FE_HAS_CARRIER;
45325- if (result.lock_bits & (1 << 1))
45326+ if (result->lock_bits & (1 << 1))
45327 *status |= FE_HAS_VITERBI;
45328
45329 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
45330 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
45331 *status &= ~FE_HAS_LOCK;
45332
45333- return 0;
45334+out:
45335+ kfree(cmd);
45336+ kfree(result);
45337+ return ret;
45338 }
45339
45340 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
45341 {
45342 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45343- struct dvbt_get_status_msg status;
45344- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45345+ struct dvbt_get_status_msg *status;
45346+ char *cmd;
45347 int ret;
45348
45349- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45350- sizeof(status), 0);
45351+ cmd = kmalloc(1, GFP_KERNEL);
45352+ if (cmd == NULL)
45353+ return -ENOMEM;
45354+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45355+ if (status == NULL) {
45356+ kfree(cmd);
45357+ return -ENOMEM;
45358+ }
45359+
45360+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45361+
45362+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45363+ sizeof(*status), 0);
45364 if (ret < 0)
45365- return ret;
45366+ goto out;
45367
45368- *ber = le32_to_cpu(status.viterbi_error_rate);
45369+ *ber = le32_to_cpu(status->viterbi_error_rate);
45370+out:
45371+ kfree(cmd);
45372+ kfree(status);
45373 return 0;
45374 }
45375
45376 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
45377 {
45378 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45379- struct dvbt_get_status_msg status;
45380- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45381+ struct dvbt_get_status_msg *status;
45382+ u8 *cmd;
45383 int ret;
45384
45385- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
45386- sizeof(status), 0);
45387+ cmd = kmalloc(1, GFP_KERNEL);
45388+ if (cmd == NULL)
45389+ return -ENOMEM;
45390+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45391+ if (status == NULL) {
45392+ kfree(cmd);
45393+ return -ENOMEM;
45394+ }
45395+
45396+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45397+
45398+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
45399+ sizeof(*status), 0);
45400 if (ret < 0) {
45401 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
45402 ret);
45403- return ret;
45404+ goto out;
45405 }
45406- *unc = le32_to_cpu(status.uncorrected_block_count);
45407- return 0;
45408+ *unc = le32_to_cpu(status->uncorrected_block_count);
45409+
45410+out:
45411+ kfree(cmd);
45412+ kfree(status);
45413+ return ret;
45414 }
45415
45416 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
45417 u16 *strength)
45418 {
45419 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45420- struct dvbt_get_status_msg status;
45421- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45422+ struct dvbt_get_status_msg *status;
45423+ char *cmd;
45424 int ret;
45425
45426- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45427- sizeof(status), 0);
45428+ cmd = kmalloc(1, GFP_KERNEL);
45429+ if (cmd == NULL)
45430+ return -ENOMEM;
45431+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45432+ if (status == NULL) {
45433+ kfree(cmd);
45434+ return -ENOMEM;
45435+ }
45436+
45437+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45438+
45439+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45440+ sizeof(*status), 0);
45441 if (ret < 0) {
45442 err("cinergyt2_fe_read_signal_strength() Failed!"
45443 " (Error=%d)\n", ret);
45444- return ret;
45445+ goto out;
45446 }
45447- *strength = (0xffff - le16_to_cpu(status.gain));
45448+ *strength = (0xffff - le16_to_cpu(status->gain));
45449+
45450+out:
45451+ kfree(cmd);
45452+ kfree(status);
45453 return 0;
45454 }
45455
45456 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
45457 {
45458 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45459- struct dvbt_get_status_msg status;
45460- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45461+ struct dvbt_get_status_msg *status;
45462+ char *cmd;
45463 int ret;
45464
45465- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45466- sizeof(status), 0);
45467+ cmd = kmalloc(1, GFP_KERNEL);
45468+ if (cmd == NULL)
45469+ return -ENOMEM;
45470+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45471+ if (status == NULL) {
45472+ kfree(cmd);
45473+ return -ENOMEM;
45474+ }
45475+
45476+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45477+
45478+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45479+ sizeof(*status), 0);
45480 if (ret < 0) {
45481 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
45482- return ret;
45483+ goto out;
45484 }
45485- *snr = (status.snr << 8) | status.snr;
45486- return 0;
45487+ *snr = (status->snr << 8) | status->snr;
45488+
45489+out:
45490+ kfree(cmd);
45491+ kfree(status);
45492+ return ret;
45493 }
45494
45495 static int cinergyt2_fe_init(struct dvb_frontend *fe)
45496@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
45497 {
45498 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
45499 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45500- struct dvbt_set_parameters_msg param;
45501- char result[2];
45502+ struct dvbt_set_parameters_msg *param;
45503+ char *result;
45504 int err;
45505
45506- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45507- param.tps = cpu_to_le16(compute_tps(fep));
45508- param.freq = cpu_to_le32(fep->frequency / 1000);
45509- param.flags = 0;
45510+ result = kmalloc(2, GFP_KERNEL);
45511+ if (result == NULL)
45512+ return -ENOMEM;
45513+ param = kmalloc(sizeof(*param), GFP_KERNEL);
45514+ if (param == NULL) {
45515+ kfree(result);
45516+ return -ENOMEM;
45517+ }
45518+
45519+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45520+ param->tps = cpu_to_le16(compute_tps(fep));
45521+ param->freq = cpu_to_le32(fep->frequency / 1000);
45522+ param->flags = 0;
45523
45524 switch (fep->bandwidth_hz) {
45525 default:
45526 case 8000000:
45527- param.bandwidth = 8;
45528+ param->bandwidth = 8;
45529 break;
45530 case 7000000:
45531- param.bandwidth = 7;
45532+ param->bandwidth = 7;
45533 break;
45534 case 6000000:
45535- param.bandwidth = 6;
45536+ param->bandwidth = 6;
45537 break;
45538 }
45539
45540 err = dvb_usb_generic_rw(state->d,
45541- (char *)&param, sizeof(param),
45542- result, sizeof(result), 0);
45543+ (char *)param, sizeof(*param),
45544+ result, 2, 0);
45545 if (err < 0)
45546 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
45547
45548- return (err < 0) ? err : 0;
45549+ kfree(result);
45550+ kfree(param);
45551+ return err;
45552 }
45553
45554 static void cinergyt2_fe_release(struct dvb_frontend *fe)
45555diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45556index 733a7ff..f8b52e3 100644
45557--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45558+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45559@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
45560
45561 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
45562 {
45563- struct hexline hx;
45564- u8 reset;
45565+ struct hexline *hx;
45566+ u8 *reset;
45567 int ret,pos=0;
45568
45569+ reset = kmalloc(1, GFP_KERNEL);
45570+ if (reset == NULL)
45571+ return -ENOMEM;
45572+
45573+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
45574+ if (hx == NULL) {
45575+ kfree(reset);
45576+ return -ENOMEM;
45577+ }
45578+
45579 /* stop the CPU */
45580- reset = 1;
45581- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
45582+ reset[0] = 1;
45583+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
45584 err("could not stop the USB controller CPU.");
45585
45586- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
45587- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
45588- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
45589+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
45590+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
45591+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
45592
45593- if (ret != hx.len) {
45594+ if (ret != hx->len) {
45595 err("error while transferring firmware "
45596 "(transferred size: %d, block size: %d)",
45597- ret,hx.len);
45598+ ret,hx->len);
45599 ret = -EINVAL;
45600 break;
45601 }
45602 }
45603 if (ret < 0) {
45604 err("firmware download failed at %d with %d",pos,ret);
45605+ kfree(reset);
45606+ kfree(hx);
45607 return ret;
45608 }
45609
45610 if (ret == 0) {
45611 /* restart the CPU */
45612- reset = 0;
45613- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
45614+ reset[0] = 0;
45615+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
45616 err("could not restart the USB controller CPU.");
45617 ret = -EINVAL;
45618 }
45619 } else
45620 ret = -EIO;
45621
45622+ kfree(reset);
45623+ kfree(hx);
45624+
45625 return ret;
45626 }
45627 EXPORT_SYMBOL(usb_cypress_load_firmware);
45628diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
45629index 1a3df10..57997a5 100644
45630--- a/drivers/media/usb/dvb-usb/dw2102.c
45631+++ b/drivers/media/usb/dvb-usb/dw2102.c
45632@@ -118,7 +118,7 @@ struct su3000_state {
45633
45634 struct s6x0_state {
45635 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
45636-};
45637+} __no_const;
45638
45639 /* debug */
45640 static int dvb_usb_dw2102_debug;
45641diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
45642index 5801ae7..83f71fa 100644
45643--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
45644+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
45645@@ -87,8 +87,11 @@ struct technisat_usb2_state {
45646 static int technisat_usb2_i2c_access(struct usb_device *udev,
45647 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
45648 {
45649- u8 b[64];
45650- int ret, actual_length;
45651+ u8 *b = kmalloc(64, GFP_KERNEL);
45652+ int ret, actual_length, error = 0;
45653+
45654+ if (b == NULL)
45655+ return -ENOMEM;
45656
45657 deb_i2c("i2c-access: %02x, tx: ", device_addr);
45658 debug_dump(tx, txlen, deb_i2c);
45659@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45660
45661 if (ret < 0) {
45662 err("i2c-error: out failed %02x = %d", device_addr, ret);
45663- return -ENODEV;
45664+ error = -ENODEV;
45665+ goto out;
45666 }
45667
45668 ret = usb_bulk_msg(udev,
45669@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45670 b, 64, &actual_length, 1000);
45671 if (ret < 0) {
45672 err("i2c-error: in failed %02x = %d", device_addr, ret);
45673- return -ENODEV;
45674+ error = -ENODEV;
45675+ goto out;
45676 }
45677
45678 if (b[0] != I2C_STATUS_OK) {
45679@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45680 /* handle tuner-i2c-nak */
45681 if (!(b[0] == I2C_STATUS_NAK &&
45682 device_addr == 0x60
45683- /* && device_is_technisat_usb2 */))
45684- return -ENODEV;
45685+ /* && device_is_technisat_usb2 */)) {
45686+ error = -ENODEV;
45687+ goto out;
45688+ }
45689 }
45690
45691 deb_i2c("status: %d, ", b[0]);
45692@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45693
45694 deb_i2c("\n");
45695
45696- return 0;
45697+out:
45698+ kfree(b);
45699+ return error;
45700 }
45701
45702 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
45703@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45704 {
45705 int ret;
45706
45707- u8 led[8] = {
45708- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45709- 0
45710- };
45711+ u8 *led = kzalloc(8, GFP_KERNEL);
45712+
45713+ if (led == NULL)
45714+ return -ENOMEM;
45715
45716 if (disable_led_control && state != TECH_LED_OFF)
45717 return 0;
45718
45719+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
45720+
45721 switch (state) {
45722 case TECH_LED_ON:
45723 led[1] = 0x82;
45724@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45725 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45726 USB_TYPE_VENDOR | USB_DIR_OUT,
45727 0, 0,
45728- led, sizeof(led), 500);
45729+ led, 8, 500);
45730
45731 mutex_unlock(&d->i2c_mutex);
45732+
45733+ kfree(led);
45734+
45735 return ret;
45736 }
45737
45738 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
45739 {
45740 int ret;
45741- u8 b = 0;
45742+ u8 *b = kzalloc(1, GFP_KERNEL);
45743+
45744+ if (b == NULL)
45745+ return -ENOMEM;
45746
45747 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
45748 return -EAGAIN;
45749@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
45750 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
45751 USB_TYPE_VENDOR | USB_DIR_OUT,
45752 (red << 8) | green, 0,
45753- &b, 1, 500);
45754+ b, 1, 500);
45755
45756 mutex_unlock(&d->i2c_mutex);
45757
45758+ kfree(b);
45759+
45760 return ret;
45761 }
45762
45763@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45764 struct dvb_usb_device_description **desc, int *cold)
45765 {
45766 int ret;
45767- u8 version[3];
45768+ u8 *version = kmalloc(3, GFP_KERNEL);
45769
45770 /* first select the interface */
45771 if (usb_set_interface(udev, 0, 1) != 0)
45772@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45773
45774 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
45775
45776+ if (version == NULL)
45777+ return 0;
45778+
45779 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
45780 GET_VERSION_INFO_VENDOR_REQUEST,
45781 USB_TYPE_VENDOR | USB_DIR_IN,
45782 0, 0,
45783- version, sizeof(version), 500);
45784+ version, 3, 500);
45785
45786 if (ret < 0)
45787 *cold = 1;
45788@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45789 *cold = 0;
45790 }
45791
45792+ kfree(version);
45793+
45794 return 0;
45795 }
45796
45797@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
45798
45799 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45800 {
45801- u8 buf[62], *b;
45802+ u8 *buf, *b;
45803 int ret;
45804 struct ir_raw_event ev;
45805
45806+ buf = kmalloc(62, GFP_KERNEL);
45807+
45808+ if (buf == NULL)
45809+ return -ENOMEM;
45810+
45811 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
45812 buf[1] = 0x08;
45813 buf[2] = 0x8f;
45814@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45815 GET_IR_DATA_VENDOR_REQUEST,
45816 USB_TYPE_VENDOR | USB_DIR_IN,
45817 0x8080, 0,
45818- buf, sizeof(buf), 500);
45819+ buf, 62, 500);
45820
45821 unlock:
45822 mutex_unlock(&d->i2c_mutex);
45823
45824- if (ret < 0)
45825+ if (ret < 0) {
45826+ kfree(buf);
45827 return ret;
45828+ }
45829
45830- if (ret == 1)
45831+ if (ret == 1) {
45832+ kfree(buf);
45833 return 0; /* no key pressed */
45834+ }
45835
45836 /* decoding */
45837 b = buf+1;
45838@@ -656,6 +689,8 @@ unlock:
45839
45840 ir_raw_event_handle(d->rc_dev);
45841
45842+ kfree(buf);
45843+
45844 return 1;
45845 }
45846
45847diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45848index af63543..0436f20 100644
45849--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45850+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45851@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
45852 * by passing a very big num_planes value */
45853 uplane = compat_alloc_user_space(num_planes *
45854 sizeof(struct v4l2_plane));
45855- kp->m.planes = (__force struct v4l2_plane *)uplane;
45856+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
45857
45858 while (--num_planes >= 0) {
45859 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
45860@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
45861 if (num_planes == 0)
45862 return 0;
45863
45864- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
45865+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
45866 if (get_user(p, &up->m.planes))
45867 return -EFAULT;
45868 uplane32 = compat_ptr(p);
45869@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
45870 get_user(kp->flags, &up->flags) ||
45871 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
45872 return -EFAULT;
45873- kp->base = (__force void *)compat_ptr(tmp);
45874+ kp->base = (__force_kernel void *)compat_ptr(tmp);
45875 return 0;
45876 }
45877
45878@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
45879 n * sizeof(struct v4l2_ext_control32)))
45880 return -EFAULT;
45881 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
45882- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
45883+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
45884 while (--n >= 0) {
45885 u32 id;
45886
45887@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
45888 {
45889 struct v4l2_ext_control32 __user *ucontrols;
45890 struct v4l2_ext_control __user *kcontrols =
45891- (__force struct v4l2_ext_control __user *)kp->controls;
45892+ (struct v4l2_ext_control __force_user *)kp->controls;
45893 int n = kp->count;
45894 compat_caddr_t p;
45895
45896@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
45897 get_user(tmp, &up->edid) ||
45898 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
45899 return -EFAULT;
45900- kp->edid = (__force u8 *)compat_ptr(tmp);
45901+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
45902 return 0;
45903 }
45904
45905diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
45906index 015f92a..59e311e 100644
45907--- a/drivers/media/v4l2-core/v4l2-device.c
45908+++ b/drivers/media/v4l2-core/v4l2-device.c
45909@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
45910 EXPORT_SYMBOL_GPL(v4l2_device_put);
45911
45912 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
45913- atomic_t *instance)
45914+ atomic_unchecked_t *instance)
45915 {
45916- int num = atomic_inc_return(instance) - 1;
45917+ int num = atomic_inc_return_unchecked(instance) - 1;
45918 int len = strlen(basename);
45919
45920 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
45921diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
45922index faac2f4..e39dcd9 100644
45923--- a/drivers/media/v4l2-core/v4l2-ioctl.c
45924+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
45925@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
45926 struct file *file, void *fh, void *p);
45927 } u;
45928 void (*debug)(const void *arg, bool write_only);
45929-};
45930+} __do_const;
45931+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
45932
45933 /* This control needs a priority check */
45934 #define INFO_FL_PRIO (1 << 0)
45935@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
45936 struct video_device *vfd = video_devdata(file);
45937 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
45938 bool write_only = false;
45939- struct v4l2_ioctl_info default_info;
45940+ v4l2_ioctl_info_no_const default_info;
45941 const struct v4l2_ioctl_info *info;
45942 void *fh = file->private_data;
45943 struct v4l2_fh *vfh = NULL;
45944@@ -2422,7 +2423,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45945 ret = -EINVAL;
45946 break;
45947 }
45948- *user_ptr = (void __user *)buf->m.planes;
45949+ *user_ptr = (void __force_user *)buf->m.planes;
45950 *kernel_ptr = (void **)&buf->m.planes;
45951 *array_size = sizeof(struct v4l2_plane) * buf->length;
45952 ret = 1;
45953@@ -2439,7 +2440,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45954 ret = -EINVAL;
45955 break;
45956 }
45957- *user_ptr = (void __user *)edid->edid;
45958+ *user_ptr = (void __force_user *)edid->edid;
45959 *kernel_ptr = (void **)&edid->edid;
45960 *array_size = edid->blocks * 128;
45961 ret = 1;
45962@@ -2457,7 +2458,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45963 ret = -EINVAL;
45964 break;
45965 }
45966- *user_ptr = (void __user *)ctrls->controls;
45967+ *user_ptr = (void __force_user *)ctrls->controls;
45968 *kernel_ptr = (void **)&ctrls->controls;
45969 *array_size = sizeof(struct v4l2_ext_control)
45970 * ctrls->count;
45971@@ -2558,7 +2559,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
45972 }
45973
45974 if (has_array_args) {
45975- *kernel_ptr = (void __force *)user_ptr;
45976+ *kernel_ptr = (void __force_kernel *)user_ptr;
45977 if (copy_to_user(user_ptr, mbuf, array_size))
45978 err = -EFAULT;
45979 goto out_array_args;
45980diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
45981index 24696f5..3637780 100644
45982--- a/drivers/memory/omap-gpmc.c
45983+++ b/drivers/memory/omap-gpmc.c
45984@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
45985 };
45986
45987 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
45988-static struct irq_chip gpmc_irq_chip;
45989 static int gpmc_irq_start;
45990
45991 static struct resource gpmc_mem_root;
45992@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
45993
45994 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
45995
45996+static struct irq_chip gpmc_irq_chip = {
45997+ .name = "gpmc",
45998+ .irq_startup = gpmc_irq_noop_ret,
45999+ .irq_enable = gpmc_irq_enable,
46000+ .irq_disable = gpmc_irq_disable,
46001+ .irq_shutdown = gpmc_irq_noop,
46002+ .irq_ack = gpmc_irq_noop,
46003+ .irq_mask = gpmc_irq_noop,
46004+ .irq_unmask = gpmc_irq_noop,
46005+};
46006+
46007 static int gpmc_setup_irq(void)
46008 {
46009 int i;
46010@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
46011 return gpmc_irq_start;
46012 }
46013
46014- gpmc_irq_chip.name = "gpmc";
46015- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
46016- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
46017- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
46018- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
46019- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
46020- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
46021- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
46022-
46023 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
46024 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
46025
46026diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
46027index 187f836..679544b 100644
46028--- a/drivers/message/fusion/mptbase.c
46029+++ b/drivers/message/fusion/mptbase.c
46030@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46031 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
46032 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
46033
46034+#ifdef CONFIG_GRKERNSEC_HIDESYM
46035+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
46036+#else
46037 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
46038 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
46039+#endif
46040+
46041 /*
46042 * Rounding UP to nearest 4-kB boundary here...
46043 */
46044@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46045 ioc->facts.GlobalCredits);
46046
46047 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
46048+#ifdef CONFIG_GRKERNSEC_HIDESYM
46049+ NULL, NULL);
46050+#else
46051 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
46052+#endif
46053 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
46054 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
46055 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
46056diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
46057index 5bdaae1..eced16f 100644
46058--- a/drivers/message/fusion/mptsas.c
46059+++ b/drivers/message/fusion/mptsas.c
46060@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
46061 return 0;
46062 }
46063
46064+static inline void
46065+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46066+{
46067+ if (phy_info->port_details) {
46068+ phy_info->port_details->rphy = rphy;
46069+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46070+ ioc->name, rphy));
46071+ }
46072+
46073+ if (rphy) {
46074+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46075+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46076+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46077+ ioc->name, rphy, rphy->dev.release));
46078+ }
46079+}
46080+
46081 /* no mutex */
46082 static void
46083 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
46084@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
46085 return NULL;
46086 }
46087
46088-static inline void
46089-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46090-{
46091- if (phy_info->port_details) {
46092- phy_info->port_details->rphy = rphy;
46093- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46094- ioc->name, rphy));
46095- }
46096-
46097- if (rphy) {
46098- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46099- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46100- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46101- ioc->name, rphy, rphy->dev.release));
46102- }
46103-}
46104-
46105 static inline struct sas_port *
46106 mptsas_get_port(struct mptsas_phyinfo *phy_info)
46107 {
46108diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
46109index b7d87cd..3fb36da 100644
46110--- a/drivers/message/i2o/i2o_proc.c
46111+++ b/drivers/message/i2o/i2o_proc.c
46112@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
46113 "Array Controller Device"
46114 };
46115
46116-static char *chtostr(char *tmp, u8 *chars, int n)
46117-{
46118- tmp[0] = 0;
46119- return strncat(tmp, (char *)chars, n);
46120-}
46121-
46122 static int i2o_report_query_status(struct seq_file *seq, int block_status,
46123 char *group)
46124 {
46125@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
46126 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
46127 {
46128 struct i2o_controller *c = (struct i2o_controller *)seq->private;
46129- static u32 work32[5];
46130- static u8 *work8 = (u8 *) work32;
46131- static u16 *work16 = (u16 *) work32;
46132+ u32 work32[5];
46133+ u8 *work8 = (u8 *) work32;
46134+ u16 *work16 = (u16 *) work32;
46135 int token;
46136 u32 hwcap;
46137
46138@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46139 } *result;
46140
46141 i2o_exec_execute_ddm_table ddm_table;
46142- char tmp[28 + 1];
46143
46144 result = kmalloc(sizeof(*result), GFP_KERNEL);
46145 if (!result)
46146@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46147
46148 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
46149 seq_printf(seq, "%-#8x", ddm_table.module_id);
46150- seq_printf(seq, "%-29s",
46151- chtostr(tmp, ddm_table.module_name_version, 28));
46152+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
46153 seq_printf(seq, "%9d ", ddm_table.data_size);
46154 seq_printf(seq, "%8d", ddm_table.code_size);
46155
46156@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46157
46158 i2o_driver_result_table *result;
46159 i2o_driver_store_table *dst;
46160- char tmp[28 + 1];
46161
46162 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
46163 if (result == NULL)
46164@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46165
46166 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
46167 seq_printf(seq, "%-#8x", dst->module_id);
46168- seq_printf(seq, "%-29s",
46169- chtostr(tmp, dst->module_name_version, 28));
46170- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
46171+ seq_printf(seq, "%-.28s", dst->module_name_version);
46172+ seq_printf(seq, "%-.8s", dst->date);
46173 seq_printf(seq, "%8d ", dst->module_size);
46174 seq_printf(seq, "%8d ", dst->mpb_size);
46175 seq_printf(seq, "0x%04x", dst->module_flags);
46176@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
46177 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46178 {
46179 struct i2o_device *d = (struct i2o_device *)seq->private;
46180- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46181+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46182 // == (allow) 512d bytes (max)
46183- static u16 *work16 = (u16 *) work32;
46184+ u16 *work16 = (u16 *) work32;
46185 int token;
46186- char tmp[16 + 1];
46187
46188 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
46189
46190@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46191 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
46192 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
46193 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
46194- seq_printf(seq, "Vendor info : %s\n",
46195- chtostr(tmp, (u8 *) (work32 + 2), 16));
46196- seq_printf(seq, "Product info : %s\n",
46197- chtostr(tmp, (u8 *) (work32 + 6), 16));
46198- seq_printf(seq, "Description : %s\n",
46199- chtostr(tmp, (u8 *) (work32 + 10), 16));
46200- seq_printf(seq, "Product rev. : %s\n",
46201- chtostr(tmp, (u8 *) (work32 + 14), 8));
46202+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
46203+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
46204+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
46205+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
46206
46207 seq_printf(seq, "Serial number : ");
46208 print_serial_number(seq, (u8 *) (work32 + 16),
46209@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46210 u8 pad[256]; // allow up to 256 byte (max) serial number
46211 } result;
46212
46213- char tmp[24 + 1];
46214-
46215 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
46216
46217 if (token < 0) {
46218@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46219 }
46220
46221 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
46222- seq_printf(seq, "Module name : %s\n",
46223- chtostr(tmp, result.module_name, 24));
46224- seq_printf(seq, "Module revision : %s\n",
46225- chtostr(tmp, result.module_rev, 8));
46226+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
46227+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
46228
46229 seq_printf(seq, "Serial number : ");
46230 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
46231@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46232 u8 instance_number[4];
46233 } result;
46234
46235- char tmp[64 + 1];
46236-
46237 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
46238
46239 if (token < 0) {
46240@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46241 return 0;
46242 }
46243
46244- seq_printf(seq, "Device name : %s\n",
46245- chtostr(tmp, result.device_name, 64));
46246- seq_printf(seq, "Service name : %s\n",
46247- chtostr(tmp, result.service_name, 64));
46248- seq_printf(seq, "Physical name : %s\n",
46249- chtostr(tmp, result.physical_location, 64));
46250- seq_printf(seq, "Instance number : %s\n",
46251- chtostr(tmp, result.instance_number, 4));
46252+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
46253+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
46254+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
46255+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
46256
46257 return 0;
46258 }
46259@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46260 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
46261 {
46262 struct i2o_device *d = (struct i2o_device *)seq->private;
46263- static u32 work32[12];
46264- static u16 *work16 = (u16 *) work32;
46265- static u8 *work8 = (u8 *) work32;
46266+ u32 work32[12];
46267+ u16 *work16 = (u16 *) work32;
46268+ u8 *work8 = (u8 *) work32;
46269 int token;
46270
46271 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
46272diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
46273index 92752fb..a7494f6 100644
46274--- a/drivers/message/i2o/iop.c
46275+++ b/drivers/message/i2o/iop.c
46276@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
46277
46278 spin_lock_irqsave(&c->context_list_lock, flags);
46279
46280- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
46281- atomic_inc(&c->context_list_counter);
46282+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
46283+ atomic_inc_unchecked(&c->context_list_counter);
46284
46285- entry->context = atomic_read(&c->context_list_counter);
46286+ entry->context = atomic_read_unchecked(&c->context_list_counter);
46287
46288 list_add(&entry->list, &c->context_list);
46289
46290@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
46291
46292 #if BITS_PER_LONG == 64
46293 spin_lock_init(&c->context_list_lock);
46294- atomic_set(&c->context_list_counter, 0);
46295+ atomic_set_unchecked(&c->context_list_counter, 0);
46296 INIT_LIST_HEAD(&c->context_list);
46297 #endif
46298
46299diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
46300index 9a8e185..27ff17d 100644
46301--- a/drivers/mfd/ab8500-debugfs.c
46302+++ b/drivers/mfd/ab8500-debugfs.c
46303@@ -100,7 +100,7 @@ static int irq_last;
46304 static u32 *irq_count;
46305 static int num_irqs;
46306
46307-static struct device_attribute **dev_attr;
46308+static device_attribute_no_const **dev_attr;
46309 static char **event_name;
46310
46311 static u8 avg_sample = SAMPLE_16;
46312diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
46313index c880c89..45a7c68 100644
46314--- a/drivers/mfd/max8925-i2c.c
46315+++ b/drivers/mfd/max8925-i2c.c
46316@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
46317 const struct i2c_device_id *id)
46318 {
46319 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
46320- static struct max8925_chip *chip;
46321+ struct max8925_chip *chip;
46322 struct device_node *node = client->dev.of_node;
46323
46324 if (node && !pdata) {
46325diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
46326index 7612d89..70549c2 100644
46327--- a/drivers/mfd/tps65910.c
46328+++ b/drivers/mfd/tps65910.c
46329@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
46330 struct tps65910_platform_data *pdata)
46331 {
46332 int ret = 0;
46333- static struct regmap_irq_chip *tps6591x_irqs_chip;
46334+ struct regmap_irq_chip *tps6591x_irqs_chip;
46335
46336 if (!irq) {
46337 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
46338diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
46339index 1b772ef..01e77d33 100644
46340--- a/drivers/mfd/twl4030-irq.c
46341+++ b/drivers/mfd/twl4030-irq.c
46342@@ -34,6 +34,7 @@
46343 #include <linux/of.h>
46344 #include <linux/irqdomain.h>
46345 #include <linux/i2c/twl.h>
46346+#include <asm/pgtable.h>
46347
46348 #include "twl-core.h"
46349
46350@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
46351 * Install an irq handler for each of the SIH modules;
46352 * clone dummy irq_chip since PIH can't *do* anything
46353 */
46354- twl4030_irq_chip = dummy_irq_chip;
46355- twl4030_irq_chip.name = "twl4030";
46356+ pax_open_kernel();
46357+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
46358+ *(const char **)&twl4030_irq_chip.name = "twl4030";
46359
46360- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46361+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46362+ pax_close_kernel();
46363
46364 for (i = irq_base; i < irq_end; i++) {
46365 irq_set_chip_and_handler(i, &twl4030_irq_chip,
46366diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
46367index 464419b..64bae8d 100644
46368--- a/drivers/misc/c2port/core.c
46369+++ b/drivers/misc/c2port/core.c
46370@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
46371 goto error_idr_alloc;
46372 c2dev->id = ret;
46373
46374- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46375+ pax_open_kernel();
46376+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46377+ pax_close_kernel();
46378
46379 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
46380 "c2port%d", c2dev->id);
46381diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
46382index 8385177..2f54635 100644
46383--- a/drivers/misc/eeprom/sunxi_sid.c
46384+++ b/drivers/misc/eeprom/sunxi_sid.c
46385@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
46386
46387 platform_set_drvdata(pdev, sid_data);
46388
46389- sid_bin_attr.size = sid_data->keysize;
46390+ pax_open_kernel();
46391+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
46392+ pax_close_kernel();
46393 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
46394 return -ENODEV;
46395
46396diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
46397index 36f5d52..32311c3 100644
46398--- a/drivers/misc/kgdbts.c
46399+++ b/drivers/misc/kgdbts.c
46400@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
46401 char before[BREAK_INSTR_SIZE];
46402 char after[BREAK_INSTR_SIZE];
46403
46404- probe_kernel_read(before, (char *)kgdbts_break_test,
46405+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
46406 BREAK_INSTR_SIZE);
46407 init_simple_test();
46408 ts.tst = plant_and_detach_test;
46409@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
46410 /* Activate test with initial breakpoint */
46411 if (!is_early)
46412 kgdb_breakpoint();
46413- probe_kernel_read(after, (char *)kgdbts_break_test,
46414+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
46415 BREAK_INSTR_SIZE);
46416 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
46417 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
46418diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
46419index 3ef4627..8d00486 100644
46420--- a/drivers/misc/lis3lv02d/lis3lv02d.c
46421+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
46422@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
46423 * the lid is closed. This leads to interrupts as soon as a little move
46424 * is done.
46425 */
46426- atomic_inc(&lis3->count);
46427+ atomic_inc_unchecked(&lis3->count);
46428
46429 wake_up_interruptible(&lis3->misc_wait);
46430 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
46431@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
46432 if (lis3->pm_dev)
46433 pm_runtime_get_sync(lis3->pm_dev);
46434
46435- atomic_set(&lis3->count, 0);
46436+ atomic_set_unchecked(&lis3->count, 0);
46437 return 0;
46438 }
46439
46440@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
46441 add_wait_queue(&lis3->misc_wait, &wait);
46442 while (true) {
46443 set_current_state(TASK_INTERRUPTIBLE);
46444- data = atomic_xchg(&lis3->count, 0);
46445+ data = atomic_xchg_unchecked(&lis3->count, 0);
46446 if (data)
46447 break;
46448
46449@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
46450 struct lis3lv02d, miscdev);
46451
46452 poll_wait(file, &lis3->misc_wait, wait);
46453- if (atomic_read(&lis3->count))
46454+ if (atomic_read_unchecked(&lis3->count))
46455 return POLLIN | POLLRDNORM;
46456 return 0;
46457 }
46458diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
46459index c439c82..1f20f57 100644
46460--- a/drivers/misc/lis3lv02d/lis3lv02d.h
46461+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
46462@@ -297,7 +297,7 @@ struct lis3lv02d {
46463 struct input_polled_dev *idev; /* input device */
46464 struct platform_device *pdev; /* platform device */
46465 struct regulator_bulk_data regulators[2];
46466- atomic_t count; /* interrupt count after last read */
46467+ atomic_unchecked_t count; /* interrupt count after last read */
46468 union axis_conversion ac; /* hw -> logical axis */
46469 int mapped_btns[3];
46470
46471diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
46472index 2f30bad..c4c13d0 100644
46473--- a/drivers/misc/sgi-gru/gruhandles.c
46474+++ b/drivers/misc/sgi-gru/gruhandles.c
46475@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
46476 unsigned long nsec;
46477
46478 nsec = CLKS2NSEC(clks);
46479- atomic_long_inc(&mcs_op_statistics[op].count);
46480- atomic_long_add(nsec, &mcs_op_statistics[op].total);
46481+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
46482+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
46483 if (mcs_op_statistics[op].max < nsec)
46484 mcs_op_statistics[op].max = nsec;
46485 }
46486diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
46487index 4f76359..cdfcb2e 100644
46488--- a/drivers/misc/sgi-gru/gruprocfs.c
46489+++ b/drivers/misc/sgi-gru/gruprocfs.c
46490@@ -32,9 +32,9 @@
46491
46492 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
46493
46494-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
46495+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
46496 {
46497- unsigned long val = atomic_long_read(v);
46498+ unsigned long val = atomic_long_read_unchecked(v);
46499
46500 seq_printf(s, "%16lu %s\n", val, id);
46501 }
46502@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
46503
46504 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
46505 for (op = 0; op < mcsop_last; op++) {
46506- count = atomic_long_read(&mcs_op_statistics[op].count);
46507- total = atomic_long_read(&mcs_op_statistics[op].total);
46508+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
46509+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
46510 max = mcs_op_statistics[op].max;
46511 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
46512 count ? total / count : 0, max);
46513diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
46514index 5c3ce24..4915ccb 100644
46515--- a/drivers/misc/sgi-gru/grutables.h
46516+++ b/drivers/misc/sgi-gru/grutables.h
46517@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
46518 * GRU statistics.
46519 */
46520 struct gru_stats_s {
46521- atomic_long_t vdata_alloc;
46522- atomic_long_t vdata_free;
46523- atomic_long_t gts_alloc;
46524- atomic_long_t gts_free;
46525- atomic_long_t gms_alloc;
46526- atomic_long_t gms_free;
46527- atomic_long_t gts_double_allocate;
46528- atomic_long_t assign_context;
46529- atomic_long_t assign_context_failed;
46530- atomic_long_t free_context;
46531- atomic_long_t load_user_context;
46532- atomic_long_t load_kernel_context;
46533- atomic_long_t lock_kernel_context;
46534- atomic_long_t unlock_kernel_context;
46535- atomic_long_t steal_user_context;
46536- atomic_long_t steal_kernel_context;
46537- atomic_long_t steal_context_failed;
46538- atomic_long_t nopfn;
46539- atomic_long_t asid_new;
46540- atomic_long_t asid_next;
46541- atomic_long_t asid_wrap;
46542- atomic_long_t asid_reuse;
46543- atomic_long_t intr;
46544- atomic_long_t intr_cbr;
46545- atomic_long_t intr_tfh;
46546- atomic_long_t intr_spurious;
46547- atomic_long_t intr_mm_lock_failed;
46548- atomic_long_t call_os;
46549- atomic_long_t call_os_wait_queue;
46550- atomic_long_t user_flush_tlb;
46551- atomic_long_t user_unload_context;
46552- atomic_long_t user_exception;
46553- atomic_long_t set_context_option;
46554- atomic_long_t check_context_retarget_intr;
46555- atomic_long_t check_context_unload;
46556- atomic_long_t tlb_dropin;
46557- atomic_long_t tlb_preload_page;
46558- atomic_long_t tlb_dropin_fail_no_asid;
46559- atomic_long_t tlb_dropin_fail_upm;
46560- atomic_long_t tlb_dropin_fail_invalid;
46561- atomic_long_t tlb_dropin_fail_range_active;
46562- atomic_long_t tlb_dropin_fail_idle;
46563- atomic_long_t tlb_dropin_fail_fmm;
46564- atomic_long_t tlb_dropin_fail_no_exception;
46565- atomic_long_t tfh_stale_on_fault;
46566- atomic_long_t mmu_invalidate_range;
46567- atomic_long_t mmu_invalidate_page;
46568- atomic_long_t flush_tlb;
46569- atomic_long_t flush_tlb_gru;
46570- atomic_long_t flush_tlb_gru_tgh;
46571- atomic_long_t flush_tlb_gru_zero_asid;
46572+ atomic_long_unchecked_t vdata_alloc;
46573+ atomic_long_unchecked_t vdata_free;
46574+ atomic_long_unchecked_t gts_alloc;
46575+ atomic_long_unchecked_t gts_free;
46576+ atomic_long_unchecked_t gms_alloc;
46577+ atomic_long_unchecked_t gms_free;
46578+ atomic_long_unchecked_t gts_double_allocate;
46579+ atomic_long_unchecked_t assign_context;
46580+ atomic_long_unchecked_t assign_context_failed;
46581+ atomic_long_unchecked_t free_context;
46582+ atomic_long_unchecked_t load_user_context;
46583+ atomic_long_unchecked_t load_kernel_context;
46584+ atomic_long_unchecked_t lock_kernel_context;
46585+ atomic_long_unchecked_t unlock_kernel_context;
46586+ atomic_long_unchecked_t steal_user_context;
46587+ atomic_long_unchecked_t steal_kernel_context;
46588+ atomic_long_unchecked_t steal_context_failed;
46589+ atomic_long_unchecked_t nopfn;
46590+ atomic_long_unchecked_t asid_new;
46591+ atomic_long_unchecked_t asid_next;
46592+ atomic_long_unchecked_t asid_wrap;
46593+ atomic_long_unchecked_t asid_reuse;
46594+ atomic_long_unchecked_t intr;
46595+ atomic_long_unchecked_t intr_cbr;
46596+ atomic_long_unchecked_t intr_tfh;
46597+ atomic_long_unchecked_t intr_spurious;
46598+ atomic_long_unchecked_t intr_mm_lock_failed;
46599+ atomic_long_unchecked_t call_os;
46600+ atomic_long_unchecked_t call_os_wait_queue;
46601+ atomic_long_unchecked_t user_flush_tlb;
46602+ atomic_long_unchecked_t user_unload_context;
46603+ atomic_long_unchecked_t user_exception;
46604+ atomic_long_unchecked_t set_context_option;
46605+ atomic_long_unchecked_t check_context_retarget_intr;
46606+ atomic_long_unchecked_t check_context_unload;
46607+ atomic_long_unchecked_t tlb_dropin;
46608+ atomic_long_unchecked_t tlb_preload_page;
46609+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
46610+ atomic_long_unchecked_t tlb_dropin_fail_upm;
46611+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
46612+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
46613+ atomic_long_unchecked_t tlb_dropin_fail_idle;
46614+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
46615+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
46616+ atomic_long_unchecked_t tfh_stale_on_fault;
46617+ atomic_long_unchecked_t mmu_invalidate_range;
46618+ atomic_long_unchecked_t mmu_invalidate_page;
46619+ atomic_long_unchecked_t flush_tlb;
46620+ atomic_long_unchecked_t flush_tlb_gru;
46621+ atomic_long_unchecked_t flush_tlb_gru_tgh;
46622+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
46623
46624- atomic_long_t copy_gpa;
46625- atomic_long_t read_gpa;
46626+ atomic_long_unchecked_t copy_gpa;
46627+ atomic_long_unchecked_t read_gpa;
46628
46629- atomic_long_t mesq_receive;
46630- atomic_long_t mesq_receive_none;
46631- atomic_long_t mesq_send;
46632- atomic_long_t mesq_send_failed;
46633- atomic_long_t mesq_noop;
46634- atomic_long_t mesq_send_unexpected_error;
46635- atomic_long_t mesq_send_lb_overflow;
46636- atomic_long_t mesq_send_qlimit_reached;
46637- atomic_long_t mesq_send_amo_nacked;
46638- atomic_long_t mesq_send_put_nacked;
46639- atomic_long_t mesq_page_overflow;
46640- atomic_long_t mesq_qf_locked;
46641- atomic_long_t mesq_qf_noop_not_full;
46642- atomic_long_t mesq_qf_switch_head_failed;
46643- atomic_long_t mesq_qf_unexpected_error;
46644- atomic_long_t mesq_noop_unexpected_error;
46645- atomic_long_t mesq_noop_lb_overflow;
46646- atomic_long_t mesq_noop_qlimit_reached;
46647- atomic_long_t mesq_noop_amo_nacked;
46648- atomic_long_t mesq_noop_put_nacked;
46649- atomic_long_t mesq_noop_page_overflow;
46650+ atomic_long_unchecked_t mesq_receive;
46651+ atomic_long_unchecked_t mesq_receive_none;
46652+ atomic_long_unchecked_t mesq_send;
46653+ atomic_long_unchecked_t mesq_send_failed;
46654+ atomic_long_unchecked_t mesq_noop;
46655+ atomic_long_unchecked_t mesq_send_unexpected_error;
46656+ atomic_long_unchecked_t mesq_send_lb_overflow;
46657+ atomic_long_unchecked_t mesq_send_qlimit_reached;
46658+ atomic_long_unchecked_t mesq_send_amo_nacked;
46659+ atomic_long_unchecked_t mesq_send_put_nacked;
46660+ atomic_long_unchecked_t mesq_page_overflow;
46661+ atomic_long_unchecked_t mesq_qf_locked;
46662+ atomic_long_unchecked_t mesq_qf_noop_not_full;
46663+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
46664+ atomic_long_unchecked_t mesq_qf_unexpected_error;
46665+ atomic_long_unchecked_t mesq_noop_unexpected_error;
46666+ atomic_long_unchecked_t mesq_noop_lb_overflow;
46667+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
46668+ atomic_long_unchecked_t mesq_noop_amo_nacked;
46669+ atomic_long_unchecked_t mesq_noop_put_nacked;
46670+ atomic_long_unchecked_t mesq_noop_page_overflow;
46671
46672 };
46673
46674@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
46675 tghop_invalidate, mcsop_last};
46676
46677 struct mcs_op_statistic {
46678- atomic_long_t count;
46679- atomic_long_t total;
46680+ atomic_long_unchecked_t count;
46681+ atomic_long_unchecked_t total;
46682 unsigned long max;
46683 };
46684
46685@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
46686
46687 #define STAT(id) do { \
46688 if (gru_options & OPT_STATS) \
46689- atomic_long_inc(&gru_stats.id); \
46690+ atomic_long_inc_unchecked(&gru_stats.id); \
46691 } while (0)
46692
46693 #ifdef CONFIG_SGI_GRU_DEBUG
46694diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
46695index c862cd4..0d176fe 100644
46696--- a/drivers/misc/sgi-xp/xp.h
46697+++ b/drivers/misc/sgi-xp/xp.h
46698@@ -288,7 +288,7 @@ struct xpc_interface {
46699 xpc_notify_func, void *);
46700 void (*received) (short, int, void *);
46701 enum xp_retval (*partid_to_nasids) (short, void *);
46702-};
46703+} __no_const;
46704
46705 extern struct xpc_interface xpc_interface;
46706
46707diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
46708index 01be66d..e3a0c7e 100644
46709--- a/drivers/misc/sgi-xp/xp_main.c
46710+++ b/drivers/misc/sgi-xp/xp_main.c
46711@@ -78,13 +78,13 @@ xpc_notloaded(void)
46712 }
46713
46714 struct xpc_interface xpc_interface = {
46715- (void (*)(int))xpc_notloaded,
46716- (void (*)(int))xpc_notloaded,
46717- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46718- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46719+ .connect = (void (*)(int))xpc_notloaded,
46720+ .disconnect = (void (*)(int))xpc_notloaded,
46721+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46722+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46723 void *))xpc_notloaded,
46724- (void (*)(short, int, void *))xpc_notloaded,
46725- (enum xp_retval(*)(short, void *))xpc_notloaded
46726+ .received = (void (*)(short, int, void *))xpc_notloaded,
46727+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
46728 };
46729 EXPORT_SYMBOL_GPL(xpc_interface);
46730
46731diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
46732index b94d5f7..7f494c5 100644
46733--- a/drivers/misc/sgi-xp/xpc.h
46734+++ b/drivers/misc/sgi-xp/xpc.h
46735@@ -835,6 +835,7 @@ struct xpc_arch_operations {
46736 void (*received_payload) (struct xpc_channel *, void *);
46737 void (*notify_senders_of_disconnect) (struct xpc_channel *);
46738 };
46739+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
46740
46741 /* struct xpc_partition act_state values (for XPC HB) */
46742
46743@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
46744 /* found in xpc_main.c */
46745 extern struct device *xpc_part;
46746 extern struct device *xpc_chan;
46747-extern struct xpc_arch_operations xpc_arch_ops;
46748+extern xpc_arch_operations_no_const xpc_arch_ops;
46749 extern int xpc_disengage_timelimit;
46750 extern int xpc_disengage_timedout;
46751 extern int xpc_activate_IRQ_rcvd;
46752diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
46753index 82dc574..8539ab2 100644
46754--- a/drivers/misc/sgi-xp/xpc_main.c
46755+++ b/drivers/misc/sgi-xp/xpc_main.c
46756@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
46757 .notifier_call = xpc_system_die,
46758 };
46759
46760-struct xpc_arch_operations xpc_arch_ops;
46761+xpc_arch_operations_no_const xpc_arch_ops;
46762
46763 /*
46764 * Timer function to enforce the timelimit on the partition disengage.
46765@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
46766
46767 if (((die_args->trapnr == X86_TRAP_MF) ||
46768 (die_args->trapnr == X86_TRAP_XF)) &&
46769- !user_mode_vm(die_args->regs))
46770+ !user_mode(die_args->regs))
46771 xpc_die_deactivate();
46772
46773 break;
46774diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
46775index 4409d79..d7766d0 100644
46776--- a/drivers/mmc/card/block.c
46777+++ b/drivers/mmc/card/block.c
46778@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
46779 if (idata->ic.postsleep_min_us)
46780 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
46781
46782- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
46783+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
46784 err = -EFAULT;
46785 goto cmd_rel_host;
46786 }
46787diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
46788index 0d0f7a2..45b8d60 100644
46789--- a/drivers/mmc/host/dw_mmc.h
46790+++ b/drivers/mmc/host/dw_mmc.h
46791@@ -276,5 +276,5 @@ struct dw_mci_drv_data {
46792 int (*parse_dt)(struct dw_mci *host);
46793 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
46794 struct dw_mci_tuning_data *tuning_data);
46795-};
46796+} __do_const;
46797 #endif /* _DW_MMC_H_ */
46798diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
46799index 8232e9a..7776006 100644
46800--- a/drivers/mmc/host/mmci.c
46801+++ b/drivers/mmc/host/mmci.c
46802@@ -1635,7 +1635,9 @@ static int mmci_probe(struct amba_device *dev,
46803 mmc->caps |= MMC_CAP_CMD23;
46804
46805 if (variant->busy_detect) {
46806- mmci_ops.card_busy = mmci_card_busy;
46807+ pax_open_kernel();
46808+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
46809+ pax_close_kernel();
46810 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
46811 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
46812 mmc->max_busy_timeout = 0;
46813diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
46814index 7c71dcd..74cb746 100644
46815--- a/drivers/mmc/host/omap_hsmmc.c
46816+++ b/drivers/mmc/host/omap_hsmmc.c
46817@@ -2120,7 +2120,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
46818
46819 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
46820 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
46821- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46822+ pax_open_kernel();
46823+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46824+ pax_close_kernel();
46825 }
46826
46827 pm_runtime_enable(host->dev);
46828diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
46829index af1f7c0..00d368a 100644
46830--- a/drivers/mmc/host/sdhci-esdhc-imx.c
46831+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
46832@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
46833 host->mmc->caps |= MMC_CAP_1_8V_DDR;
46834 }
46835
46836- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
46837- sdhci_esdhc_ops.platform_execute_tuning =
46838+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
46839+ pax_open_kernel();
46840+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
46841 esdhc_executing_tuning;
46842+ pax_close_kernel();
46843+ }
46844
46845 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
46846 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
46847diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
46848index c45b893..fba0144 100644
46849--- a/drivers/mmc/host/sdhci-s3c.c
46850+++ b/drivers/mmc/host/sdhci-s3c.c
46851@@ -590,9 +590,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
46852 * we can use overriding functions instead of default.
46853 */
46854 if (sc->no_divider) {
46855- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46856- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46857- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46858+ pax_open_kernel();
46859+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46860+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46861+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46862+ pax_close_kernel();
46863 }
46864
46865 /* It supports additional host capabilities if needed */
46866diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
46867index 423666b..81ff5eb 100644
46868--- a/drivers/mtd/chips/cfi_cmdset_0020.c
46869+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
46870@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
46871 size_t totlen = 0, thislen;
46872 int ret = 0;
46873 size_t buflen = 0;
46874- static char *buffer;
46875+ char *buffer;
46876
46877 if (!ECCBUF_SIZE) {
46878 /* We should fall back to a general writev implementation.
46879diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
46880index b3b7ca1..5dd4634 100644
46881--- a/drivers/mtd/nand/denali.c
46882+++ b/drivers/mtd/nand/denali.c
46883@@ -24,6 +24,7 @@
46884 #include <linux/slab.h>
46885 #include <linux/mtd/mtd.h>
46886 #include <linux/module.h>
46887+#include <linux/slab.h>
46888
46889 #include "denali.h"
46890
46891diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46892index 4f3851a..f477a23 100644
46893--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46894+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46895@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
46896
46897 /* first try to map the upper buffer directly */
46898 if (virt_addr_valid(this->upper_buf) &&
46899- !object_is_on_stack(this->upper_buf)) {
46900+ !object_starts_on_stack(this->upper_buf)) {
46901 sg_init_one(sgl, this->upper_buf, this->upper_len);
46902 ret = dma_map_sg(this->dev, sgl, 1, dr);
46903 if (ret == 0)
46904diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
46905index 51b9d6a..52af9a7 100644
46906--- a/drivers/mtd/nftlmount.c
46907+++ b/drivers/mtd/nftlmount.c
46908@@ -24,6 +24,7 @@
46909 #include <asm/errno.h>
46910 #include <linux/delay.h>
46911 #include <linux/slab.h>
46912+#include <linux/sched.h>
46913 #include <linux/mtd/mtd.h>
46914 #include <linux/mtd/nand.h>
46915 #include <linux/mtd/nftl.h>
46916diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
46917index c23184a..4115c41 100644
46918--- a/drivers/mtd/sm_ftl.c
46919+++ b/drivers/mtd/sm_ftl.c
46920@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
46921 #define SM_CIS_VENDOR_OFFSET 0x59
46922 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
46923 {
46924- struct attribute_group *attr_group;
46925+ attribute_group_no_const *attr_group;
46926 struct attribute **attributes;
46927 struct sm_sysfs_attribute *vendor_attribute;
46928 char *vendor;
46929diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
46930index 7b11243..b3278a3 100644
46931--- a/drivers/net/bonding/bond_netlink.c
46932+++ b/drivers/net/bonding/bond_netlink.c
46933@@ -585,7 +585,7 @@ nla_put_failure:
46934 return -EMSGSIZE;
46935 }
46936
46937-struct rtnl_link_ops bond_link_ops __read_mostly = {
46938+struct rtnl_link_ops bond_link_ops = {
46939 .kind = "bond",
46940 .priv_size = sizeof(struct bonding),
46941 .setup = bond_setup,
46942diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
46943index b3b922a..80bba38 100644
46944--- a/drivers/net/caif/caif_hsi.c
46945+++ b/drivers/net/caif/caif_hsi.c
46946@@ -1444,7 +1444,7 @@ err:
46947 return -ENODEV;
46948 }
46949
46950-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
46951+static struct rtnl_link_ops caif_hsi_link_ops = {
46952 .kind = "cfhsi",
46953 .priv_size = sizeof(struct cfhsi),
46954 .setup = cfhsi_setup,
46955diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
46956index 98d73aa..63ef9da 100644
46957--- a/drivers/net/can/Kconfig
46958+++ b/drivers/net/can/Kconfig
46959@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
46960
46961 config CAN_FLEXCAN
46962 tristate "Support for Freescale FLEXCAN based chips"
46963- depends on ARM || PPC
46964+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
46965 ---help---
46966 Say Y here if you want to support for Freescale FlexCAN.
46967
46968diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
46969index 62ca0e8..3bed607 100644
46970--- a/drivers/net/can/dev.c
46971+++ b/drivers/net/can/dev.c
46972@@ -958,7 +958,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
46973 return -EOPNOTSUPP;
46974 }
46975
46976-static struct rtnl_link_ops can_link_ops __read_mostly = {
46977+static struct rtnl_link_ops can_link_ops = {
46978 .kind = "can",
46979 .maxtype = IFLA_CAN_MAX,
46980 .policy = can_policy,
46981diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
46982index 674f367..ec3a31f 100644
46983--- a/drivers/net/can/vcan.c
46984+++ b/drivers/net/can/vcan.c
46985@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
46986 dev->destructor = free_netdev;
46987 }
46988
46989-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
46990+static struct rtnl_link_ops vcan_link_ops = {
46991 .kind = "vcan",
46992 .setup = vcan_setup,
46993 };
46994diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
46995index 49adbf1..fff7ff8 100644
46996--- a/drivers/net/dummy.c
46997+++ b/drivers/net/dummy.c
46998@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
46999 return 0;
47000 }
47001
47002-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
47003+static struct rtnl_link_ops dummy_link_ops = {
47004 .kind = DRV_NAME,
47005 .setup = dummy_setup,
47006 .validate = dummy_validate,
47007diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
47008index 0443654..4f0aa18 100644
47009--- a/drivers/net/ethernet/8390/ax88796.c
47010+++ b/drivers/net/ethernet/8390/ax88796.c
47011@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
47012 if (ax->plat->reg_offsets)
47013 ei_local->reg_offset = ax->plat->reg_offsets;
47014 else {
47015+ resource_size_t _mem_size = mem_size;
47016+ do_div(_mem_size, 0x18);
47017 ei_local->reg_offset = ax->reg_offsets;
47018 for (ret = 0; ret < 0x18; ret++)
47019- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
47020+ ax->reg_offsets[ret] = _mem_size * ret;
47021 }
47022
47023 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
47024diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
47025index 760c72c..a99728c 100644
47026--- a/drivers/net/ethernet/altera/altera_tse_main.c
47027+++ b/drivers/net/ethernet/altera/altera_tse_main.c
47028@@ -1217,7 +1217,7 @@ static int tse_shutdown(struct net_device *dev)
47029 return 0;
47030 }
47031
47032-static struct net_device_ops altera_tse_netdev_ops = {
47033+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
47034 .ndo_open = tse_open,
47035 .ndo_stop = tse_shutdown,
47036 .ndo_start_xmit = tse_start_xmit,
47037@@ -1454,11 +1454,13 @@ static int altera_tse_probe(struct platform_device *pdev)
47038 ndev->netdev_ops = &altera_tse_netdev_ops;
47039 altera_tse_set_ethtool_ops(ndev);
47040
47041+ pax_open_kernel();
47042 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
47043
47044 if (priv->hash_filter)
47045 altera_tse_netdev_ops.ndo_set_rx_mode =
47046 tse_set_rx_mode_hashfilter;
47047+ pax_close_kernel();
47048
47049 /* Scatter/gather IO is not supported,
47050 * so it is turned off
47051diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47052index 29a0927..5a348e24 100644
47053--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47054+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47055@@ -1122,14 +1122,14 @@ do { \
47056 * operations, everything works on mask values.
47057 */
47058 #define XMDIO_READ(_pdata, _mmd, _reg) \
47059- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
47060+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
47061 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
47062
47063 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
47064 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
47065
47066 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
47067- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
47068+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
47069 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
47070
47071 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
47072diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47073index 8a50b01..39c1ad0 100644
47074--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47075+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47076@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
47077
47078 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
47079
47080- pdata->hw_if.config_dcb_tc(pdata);
47081+ pdata->hw_if->config_dcb_tc(pdata);
47082
47083 return 0;
47084 }
47085@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
47086
47087 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
47088
47089- pdata->hw_if.config_dcb_pfc(pdata);
47090+ pdata->hw_if->config_dcb_pfc(pdata);
47091
47092 return 0;
47093 }
47094diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47095index a50891f..b26fe24 100644
47096--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47097+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47098@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
47099
47100 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47101 {
47102- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47103+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47104 struct xgbe_channel *channel;
47105 struct xgbe_ring *ring;
47106 struct xgbe_ring_data *rdata;
47107@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47108
47109 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
47110 {
47111- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47112+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47113 struct xgbe_channel *channel;
47114 struct xgbe_ring *ring;
47115 struct xgbe_ring_desc *rdesc;
47116@@ -624,7 +624,7 @@ err_out:
47117 static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47118 {
47119 struct xgbe_prv_data *pdata = channel->pdata;
47120- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47121+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47122 struct xgbe_ring *ring = channel->rx_ring;
47123 struct xgbe_ring_data *rdata;
47124 int i;
47125@@ -650,17 +650,12 @@ static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47126 DBGPR("<--xgbe_realloc_rx_buffer\n");
47127 }
47128
47129-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
47130-{
47131- DBGPR("-->xgbe_init_function_ptrs_desc\n");
47132-
47133- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
47134- desc_if->free_ring_resources = xgbe_free_ring_resources;
47135- desc_if->map_tx_skb = xgbe_map_tx_skb;
47136- desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
47137- desc_if->unmap_rdata = xgbe_unmap_rdata;
47138- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
47139- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
47140-
47141- DBGPR("<--xgbe_init_function_ptrs_desc\n");
47142-}
47143+const struct xgbe_desc_if default_xgbe_desc_if = {
47144+ .alloc_ring_resources = xgbe_alloc_ring_resources,
47145+ .free_ring_resources = xgbe_free_ring_resources,
47146+ .map_tx_skb = xgbe_map_tx_skb,
47147+ .realloc_rx_buffer = xgbe_realloc_rx_buffer,
47148+ .unmap_rdata = xgbe_unmap_rdata,
47149+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
47150+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
47151+};
47152diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47153index 4c66cd1..1a20aab 100644
47154--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47155+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47156@@ -2703,7 +2703,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
47157
47158 static int xgbe_init(struct xgbe_prv_data *pdata)
47159 {
47160- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47161+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47162 int ret;
47163
47164 DBGPR("-->xgbe_init\n");
47165@@ -2767,108 +2767,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
47166 return 0;
47167 }
47168
47169-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
47170-{
47171- DBGPR("-->xgbe_init_function_ptrs\n");
47172-
47173- hw_if->tx_complete = xgbe_tx_complete;
47174-
47175- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
47176- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
47177- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
47178- hw_if->set_mac_address = xgbe_set_mac_address;
47179-
47180- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
47181- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
47182-
47183- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
47184- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
47185- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
47186- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
47187- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
47188-
47189- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
47190- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
47191-
47192- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
47193- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
47194- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
47195-
47196- hw_if->enable_tx = xgbe_enable_tx;
47197- hw_if->disable_tx = xgbe_disable_tx;
47198- hw_if->enable_rx = xgbe_enable_rx;
47199- hw_if->disable_rx = xgbe_disable_rx;
47200-
47201- hw_if->powerup_tx = xgbe_powerup_tx;
47202- hw_if->powerdown_tx = xgbe_powerdown_tx;
47203- hw_if->powerup_rx = xgbe_powerup_rx;
47204- hw_if->powerdown_rx = xgbe_powerdown_rx;
47205-
47206- hw_if->dev_xmit = xgbe_dev_xmit;
47207- hw_if->dev_read = xgbe_dev_read;
47208- hw_if->enable_int = xgbe_enable_int;
47209- hw_if->disable_int = xgbe_disable_int;
47210- hw_if->init = xgbe_init;
47211- hw_if->exit = xgbe_exit;
47212+const struct xgbe_hw_if default_xgbe_hw_if = {
47213+ .tx_complete = xgbe_tx_complete,
47214+
47215+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
47216+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
47217+ .add_mac_addresses = xgbe_add_mac_addresses,
47218+ .set_mac_address = xgbe_set_mac_address,
47219+
47220+ .enable_rx_csum = xgbe_enable_rx_csum,
47221+ .disable_rx_csum = xgbe_disable_rx_csum,
47222+
47223+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
47224+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
47225+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
47226+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
47227+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
47228+
47229+ .read_mmd_regs = xgbe_read_mmd_regs,
47230+ .write_mmd_regs = xgbe_write_mmd_regs,
47231+
47232+ .set_gmii_speed = xgbe_set_gmii_speed,
47233+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
47234+ .set_xgmii_speed = xgbe_set_xgmii_speed,
47235+
47236+ .enable_tx = xgbe_enable_tx,
47237+ .disable_tx = xgbe_disable_tx,
47238+ .enable_rx = xgbe_enable_rx,
47239+ .disable_rx = xgbe_disable_rx,
47240+
47241+ .powerup_tx = xgbe_powerup_tx,
47242+ .powerdown_tx = xgbe_powerdown_tx,
47243+ .powerup_rx = xgbe_powerup_rx,
47244+ .powerdown_rx = xgbe_powerdown_rx,
47245+
47246+ .dev_xmit = xgbe_dev_xmit,
47247+ .dev_read = xgbe_dev_read,
47248+ .enable_int = xgbe_enable_int,
47249+ .disable_int = xgbe_disable_int,
47250+ .init = xgbe_init,
47251+ .exit = xgbe_exit,
47252
47253 /* Descriptor related Sequences have to be initialized here */
47254- hw_if->tx_desc_init = xgbe_tx_desc_init;
47255- hw_if->rx_desc_init = xgbe_rx_desc_init;
47256- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
47257- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
47258- hw_if->is_last_desc = xgbe_is_last_desc;
47259- hw_if->is_context_desc = xgbe_is_context_desc;
47260- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
47261+ .tx_desc_init = xgbe_tx_desc_init,
47262+ .rx_desc_init = xgbe_rx_desc_init,
47263+ .tx_desc_reset = xgbe_tx_desc_reset,
47264+ .rx_desc_reset = xgbe_rx_desc_reset,
47265+ .is_last_desc = xgbe_is_last_desc,
47266+ .is_context_desc = xgbe_is_context_desc,
47267+ .tx_start_xmit = xgbe_tx_start_xmit,
47268
47269 /* For FLOW ctrl */
47270- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
47271- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
47272+ .config_tx_flow_control = xgbe_config_tx_flow_control,
47273+ .config_rx_flow_control = xgbe_config_rx_flow_control,
47274
47275 /* For RX coalescing */
47276- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
47277- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
47278- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
47279- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
47280+ .config_rx_coalesce = xgbe_config_rx_coalesce,
47281+ .config_tx_coalesce = xgbe_config_tx_coalesce,
47282+ .usec_to_riwt = xgbe_usec_to_riwt,
47283+ .riwt_to_usec = xgbe_riwt_to_usec,
47284
47285 /* For RX and TX threshold config */
47286- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
47287- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
47288+ .config_rx_threshold = xgbe_config_rx_threshold,
47289+ .config_tx_threshold = xgbe_config_tx_threshold,
47290
47291 /* For RX and TX Store and Forward Mode config */
47292- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
47293- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
47294+ .config_rsf_mode = xgbe_config_rsf_mode,
47295+ .config_tsf_mode = xgbe_config_tsf_mode,
47296
47297 /* For TX DMA Operating on Second Frame config */
47298- hw_if->config_osp_mode = xgbe_config_osp_mode;
47299+ .config_osp_mode = xgbe_config_osp_mode,
47300
47301 /* For RX and TX PBL config */
47302- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
47303- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
47304- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
47305- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
47306- hw_if->config_pblx8 = xgbe_config_pblx8;
47307+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
47308+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
47309+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
47310+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
47311+ .config_pblx8 = xgbe_config_pblx8,
47312
47313 /* For MMC statistics support */
47314- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
47315- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
47316- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
47317+ .tx_mmc_int = xgbe_tx_mmc_int,
47318+ .rx_mmc_int = xgbe_rx_mmc_int,
47319+ .read_mmc_stats = xgbe_read_mmc_stats,
47320
47321 /* For PTP config */
47322- hw_if->config_tstamp = xgbe_config_tstamp;
47323- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
47324- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
47325- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
47326- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
47327+ .config_tstamp = xgbe_config_tstamp,
47328+ .update_tstamp_addend = xgbe_update_tstamp_addend,
47329+ .set_tstamp_time = xgbe_set_tstamp_time,
47330+ .get_tstamp_time = xgbe_get_tstamp_time,
47331+ .get_tx_tstamp = xgbe_get_tx_tstamp,
47332
47333 /* For Data Center Bridging config */
47334- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
47335- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
47336+ .config_dcb_tc = xgbe_config_dcb_tc,
47337+ .config_dcb_pfc = xgbe_config_dcb_pfc,
47338
47339 /* For Receive Side Scaling */
47340- hw_if->enable_rss = xgbe_enable_rss;
47341- hw_if->disable_rss = xgbe_disable_rss;
47342- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
47343- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
47344-
47345- DBGPR("<--xgbe_init_function_ptrs\n");
47346-}
47347+ .enable_rss = xgbe_enable_rss,
47348+ .disable_rss = xgbe_disable_rss,
47349+ .set_rss_hash_key = xgbe_set_rss_hash_key,
47350+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
47351+};
47352diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47353index e5ffb2c..e56d30b 100644
47354--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47355+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47356@@ -239,7 +239,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
47357 * support, tell it now
47358 */
47359 if (ring->tx.xmit_more)
47360- pdata->hw_if.tx_start_xmit(channel, ring);
47361+ pdata->hw_if->tx_start_xmit(channel, ring);
47362
47363 return NETDEV_TX_BUSY;
47364 }
47365@@ -267,7 +267,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
47366
47367 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47368 {
47369- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47370+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47371 struct xgbe_channel *channel;
47372 enum xgbe_int int_id;
47373 unsigned int i;
47374@@ -289,7 +289,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47375
47376 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47377 {
47378- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47379+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47380 struct xgbe_channel *channel;
47381 enum xgbe_int int_id;
47382 unsigned int i;
47383@@ -312,7 +312,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47384 static irqreturn_t xgbe_isr(int irq, void *data)
47385 {
47386 struct xgbe_prv_data *pdata = data;
47387- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47388+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47389 struct xgbe_channel *channel;
47390 unsigned int dma_isr, dma_ch_isr;
47391 unsigned int mac_isr, mac_tssr;
47392@@ -611,7 +611,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
47393
47394 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47395 {
47396- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47397+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47398
47399 DBGPR("-->xgbe_init_tx_coalesce\n");
47400
47401@@ -625,7 +625,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47402
47403 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47404 {
47405- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47406+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47407
47408 DBGPR("-->xgbe_init_rx_coalesce\n");
47409
47410@@ -639,7 +639,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47411
47412 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47413 {
47414- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47415+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47416 struct xgbe_channel *channel;
47417 struct xgbe_ring *ring;
47418 struct xgbe_ring_data *rdata;
47419@@ -664,7 +664,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47420
47421 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47422 {
47423- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47424+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47425 struct xgbe_channel *channel;
47426 struct xgbe_ring *ring;
47427 struct xgbe_ring_data *rdata;
47428@@ -690,7 +690,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47429 static void xgbe_adjust_link(struct net_device *netdev)
47430 {
47431 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47432- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47433+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47434 struct phy_device *phydev = pdata->phydev;
47435 int new_state = 0;
47436
47437@@ -798,7 +798,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
47438 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47439 {
47440 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47441- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47442+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47443 unsigned long flags;
47444
47445 DBGPR("-->xgbe_powerdown\n");
47446@@ -836,7 +836,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47447 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47448 {
47449 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47450- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47451+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47452 unsigned long flags;
47453
47454 DBGPR("-->xgbe_powerup\n");
47455@@ -873,7 +873,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47456
47457 static int xgbe_start(struct xgbe_prv_data *pdata)
47458 {
47459- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47460+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47461 struct net_device *netdev = pdata->netdev;
47462
47463 DBGPR("-->xgbe_start\n");
47464@@ -899,7 +899,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
47465
47466 static void xgbe_stop(struct xgbe_prv_data *pdata)
47467 {
47468- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47469+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47470 struct xgbe_channel *channel;
47471 struct net_device *netdev = pdata->netdev;
47472 struct netdev_queue *txq;
47473@@ -932,7 +932,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
47474 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
47475 {
47476 struct xgbe_channel *channel;
47477- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47478+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47479 unsigned int i;
47480
47481 DBGPR("-->xgbe_restart_dev\n");
47482@@ -1135,7 +1135,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
47483 return -ERANGE;
47484 }
47485
47486- pdata->hw_if.config_tstamp(pdata, mac_tscr);
47487+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
47488
47489 memcpy(&pdata->tstamp_config, &config, sizeof(config));
47490
47491@@ -1284,8 +1284,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
47492 static int xgbe_open(struct net_device *netdev)
47493 {
47494 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47495- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47496- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47497+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47498+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47499 struct xgbe_channel *channel = NULL;
47500 unsigned int i = 0;
47501 int ret;
47502@@ -1400,8 +1400,8 @@ err_phy_init:
47503 static int xgbe_close(struct net_device *netdev)
47504 {
47505 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47506- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47507- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47508+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47509+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47510 struct xgbe_channel *channel;
47511 unsigned int i;
47512
47513@@ -1442,8 +1442,8 @@ static int xgbe_close(struct net_device *netdev)
47514 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
47515 {
47516 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47517- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47518- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47519+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47520+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47521 struct xgbe_channel *channel;
47522 struct xgbe_ring *ring;
47523 struct xgbe_packet_data *packet;
47524@@ -1518,7 +1518,7 @@ tx_netdev_return:
47525 static void xgbe_set_rx_mode(struct net_device *netdev)
47526 {
47527 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47528- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47529+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47530 unsigned int pr_mode, am_mode;
47531
47532 DBGPR("-->xgbe_set_rx_mode\n");
47533@@ -1537,7 +1537,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
47534 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
47535 {
47536 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47537- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47538+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47539 struct sockaddr *saddr = addr;
47540
47541 DBGPR("-->xgbe_set_mac_address\n");
47542@@ -1604,7 +1604,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
47543
47544 DBGPR("-->%s\n", __func__);
47545
47546- pdata->hw_if.read_mmc_stats(pdata);
47547+ pdata->hw_if->read_mmc_stats(pdata);
47548
47549 s->rx_packets = pstats->rxframecount_gb;
47550 s->rx_bytes = pstats->rxoctetcount_gb;
47551@@ -1631,7 +1631,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
47552 u16 vid)
47553 {
47554 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47555- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47556+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47557
47558 DBGPR("-->%s\n", __func__);
47559
47560@@ -1647,7 +1647,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
47561 u16 vid)
47562 {
47563 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47564- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47565+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47566
47567 DBGPR("-->%s\n", __func__);
47568
47569@@ -1713,7 +1713,7 @@ static int xgbe_set_features(struct net_device *netdev,
47570 netdev_features_t features)
47571 {
47572 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47573- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47574+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47575 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
47576 int ret = 0;
47577
47578@@ -1778,7 +1778,7 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
47579 static void xgbe_rx_refresh(struct xgbe_channel *channel)
47580 {
47581 struct xgbe_prv_data *pdata = channel->pdata;
47582- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47583+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47584 struct xgbe_ring *ring = channel->rx_ring;
47585 struct xgbe_ring_data *rdata;
47586
47587@@ -1819,8 +1819,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
47588 static int xgbe_tx_poll(struct xgbe_channel *channel)
47589 {
47590 struct xgbe_prv_data *pdata = channel->pdata;
47591- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47592- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47593+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47594+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47595 struct xgbe_ring *ring = channel->tx_ring;
47596 struct xgbe_ring_data *rdata;
47597 struct xgbe_ring_desc *rdesc;
47598@@ -1891,7 +1891,7 @@ unlock:
47599 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
47600 {
47601 struct xgbe_prv_data *pdata = channel->pdata;
47602- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47603+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47604 struct xgbe_ring *ring = channel->rx_ring;
47605 struct xgbe_ring_data *rdata;
47606 struct xgbe_packet_data *packet;
47607diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47608index ebf4893..28108c7 100644
47609--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47610+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47611@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
47612
47613 DBGPR("-->%s\n", __func__);
47614
47615- pdata->hw_if.read_mmc_stats(pdata);
47616+ pdata->hw_if->read_mmc_stats(pdata);
47617 for (i = 0; i < XGBE_STATS_COUNT; i++) {
47618 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
47619 *data++ = *(u64 *)stat;
47620@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
47621 struct ethtool_coalesce *ec)
47622 {
47623 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47624- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47625+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47626 unsigned int riwt;
47627
47628 DBGPR("-->xgbe_get_coalesce\n");
47629@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
47630 struct ethtool_coalesce *ec)
47631 {
47632 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47633- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47634+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47635 unsigned int rx_frames, rx_riwt, rx_usecs;
47636 unsigned int tx_frames, tx_usecs;
47637
47638diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47639index dbd3850..4e31b38 100644
47640--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47641+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47642@@ -155,12 +155,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
47643 DBGPR("<--xgbe_default_config\n");
47644 }
47645
47646-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
47647-{
47648- xgbe_init_function_ptrs_dev(&pdata->hw_if);
47649- xgbe_init_function_ptrs_desc(&pdata->desc_if);
47650-}
47651-
47652 static int xgbe_probe(struct platform_device *pdev)
47653 {
47654 struct xgbe_prv_data *pdata;
47655@@ -281,9 +275,8 @@ static int xgbe_probe(struct platform_device *pdev)
47656 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
47657
47658 /* Set all the function pointers */
47659- xgbe_init_all_fptrs(pdata);
47660- hw_if = &pdata->hw_if;
47661- desc_if = &pdata->desc_if;
47662+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
47663+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
47664
47665 /* Issue software reset to device */
47666 hw_if->exit(pdata);
47667diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47668index 363b210..b241389 100644
47669--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47670+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47671@@ -126,7 +126,7 @@
47672 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
47673 {
47674 struct xgbe_prv_data *pdata = mii->priv;
47675- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47676+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47677 int mmd_data;
47678
47679 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
47680@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
47681 u16 mmd_val)
47682 {
47683 struct xgbe_prv_data *pdata = mii->priv;
47684- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47685+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47686 int mmd_data = mmd_val;
47687
47688 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
47689diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47690index a1bf9d1c..84adcab 100644
47691--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47692+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47693@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
47694 tstamp_cc);
47695 u64 nsec;
47696
47697- nsec = pdata->hw_if.get_tstamp_time(pdata);
47698+ nsec = pdata->hw_if->get_tstamp_time(pdata);
47699
47700 return nsec;
47701 }
47702@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
47703
47704 spin_lock_irqsave(&pdata->tstamp_lock, flags);
47705
47706- pdata->hw_if.update_tstamp_addend(pdata, addend);
47707+ pdata->hw_if->update_tstamp_addend(pdata, addend);
47708
47709 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
47710
47711diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
47712index f9ec762..988c969 100644
47713--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
47714+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
47715@@ -668,8 +668,8 @@ struct xgbe_prv_data {
47716 int dev_irq;
47717 unsigned int per_channel_irq;
47718
47719- struct xgbe_hw_if hw_if;
47720- struct xgbe_desc_if desc_if;
47721+ const struct xgbe_hw_if *hw_if;
47722+ const struct xgbe_desc_if *desc_if;
47723
47724 /* AXI DMA settings */
47725 unsigned int axdomain;
47726@@ -787,6 +787,9 @@ struct xgbe_prv_data {
47727 #endif
47728 };
47729
47730+extern const struct xgbe_hw_if default_xgbe_hw_if;
47731+extern const struct xgbe_desc_if default_xgbe_desc_if;
47732+
47733 /* Function prototypes*/
47734
47735 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
47736diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47737index adcacda..fa6e0ae 100644
47738--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47739+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47740@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
47741 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
47742 {
47743 /* RX_MODE controlling object */
47744- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
47745+ bnx2x_init_rx_mode_obj(bp);
47746
47747 /* multicast configuration controlling object */
47748 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
47749diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47750index 07cdf9b..b08ecc7 100644
47751--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47752+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47753@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
47754 return rc;
47755 }
47756
47757-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47758- struct bnx2x_rx_mode_obj *o)
47759+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
47760 {
47761 if (CHIP_IS_E1x(bp)) {
47762- o->wait_comp = bnx2x_empty_rx_mode_wait;
47763- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
47764+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
47765+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
47766 } else {
47767- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
47768- o->config_rx_mode = bnx2x_set_rx_mode_e2;
47769+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
47770+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
47771 }
47772 }
47773
47774diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47775index 86baecb..ff3bb46 100644
47776--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47777+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47778@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
47779
47780 /********************* RX MODE ****************/
47781
47782-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47783- struct bnx2x_rx_mode_obj *o);
47784+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
47785
47786 /**
47787 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
47788diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
47789index 31c9f82..e65e986 100644
47790--- a/drivers/net/ethernet/broadcom/tg3.h
47791+++ b/drivers/net/ethernet/broadcom/tg3.h
47792@@ -150,6 +150,7 @@
47793 #define CHIPREV_ID_5750_A0 0x4000
47794 #define CHIPREV_ID_5750_A1 0x4001
47795 #define CHIPREV_ID_5750_A3 0x4003
47796+#define CHIPREV_ID_5750_C1 0x4201
47797 #define CHIPREV_ID_5750_C2 0x4202
47798 #define CHIPREV_ID_5752_A0_HW 0x5000
47799 #define CHIPREV_ID_5752_A0 0x6000
47800diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
47801index 903466e..b285864 100644
47802--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
47803+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
47804@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
47805 }
47806
47807 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
47808- bna_cb_ioceth_enable,
47809- bna_cb_ioceth_disable,
47810- bna_cb_ioceth_hbfail,
47811- bna_cb_ioceth_reset
47812+ .enable_cbfn = bna_cb_ioceth_enable,
47813+ .disable_cbfn = bna_cb_ioceth_disable,
47814+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
47815+ .reset_cbfn = bna_cb_ioceth_reset
47816 };
47817
47818 static void bna_attr_init(struct bna_ioceth *ioceth)
47819diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47820index 8cffcdf..aadf043 100644
47821--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47822+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47823@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
47824 */
47825 struct l2t_skb_cb {
47826 arp_failure_handler_func arp_failure_handler;
47827-};
47828+} __no_const;
47829
47830 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
47831
47832diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47833index ccf3436..b720d77 100644
47834--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47835+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47836@@ -2277,7 +2277,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
47837
47838 int i;
47839 struct adapter *ap = netdev2adap(dev);
47840- static const unsigned int *reg_ranges;
47841+ const unsigned int *reg_ranges;
47842 int arr_size = 0, buf_size = 0;
47843
47844 if (is_t4(ap->params.chip)) {
47845diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
47846index badff18..e15c4ec 100644
47847--- a/drivers/net/ethernet/dec/tulip/de4x5.c
47848+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
47849@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47850 for (i=0; i<ETH_ALEN; i++) {
47851 tmp.addr[i] = dev->dev_addr[i];
47852 }
47853- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47854+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47855 break;
47856
47857 case DE4X5_SET_HWADDR: /* Set the hardware address */
47858@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47859 spin_lock_irqsave(&lp->lock, flags);
47860 memcpy(&statbuf, &lp->pktStats, ioc->len);
47861 spin_unlock_irqrestore(&lp->lock, flags);
47862- if (copy_to_user(ioc->data, &statbuf, ioc->len))
47863+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
47864 return -EFAULT;
47865 break;
47866 }
47867diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
47868index d48806b..41cd80f 100644
47869--- a/drivers/net/ethernet/emulex/benet/be_main.c
47870+++ b/drivers/net/ethernet/emulex/benet/be_main.c
47871@@ -537,7 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
47872
47873 if (wrapped)
47874 newacc += 65536;
47875- ACCESS_ONCE(*acc) = newacc;
47876+ ACCESS_ONCE_RW(*acc) = newacc;
47877 }
47878
47879 static void populate_erx_stats(struct be_adapter *adapter,
47880diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
47881index 6d0c5d5..55be363 100644
47882--- a/drivers/net/ethernet/faraday/ftgmac100.c
47883+++ b/drivers/net/ethernet/faraday/ftgmac100.c
47884@@ -30,6 +30,8 @@
47885 #include <linux/netdevice.h>
47886 #include <linux/phy.h>
47887 #include <linux/platform_device.h>
47888+#include <linux/interrupt.h>
47889+#include <linux/irqreturn.h>
47890 #include <net/ip.h>
47891
47892 #include "ftgmac100.h"
47893diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
47894index dce5f7b..2433466 100644
47895--- a/drivers/net/ethernet/faraday/ftmac100.c
47896+++ b/drivers/net/ethernet/faraday/ftmac100.c
47897@@ -31,6 +31,8 @@
47898 #include <linux/module.h>
47899 #include <linux/netdevice.h>
47900 #include <linux/platform_device.h>
47901+#include <linux/interrupt.h>
47902+#include <linux/irqreturn.h>
47903
47904 #include "ftmac100.h"
47905
47906diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47907index 6d1ec92..4d5d97d 100644
47908--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47909+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47910@@ -407,7 +407,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
47911 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
47912
47913 /* Update the base adjustement value. */
47914- ACCESS_ONCE(pf->ptp_base_adj) = incval;
47915+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
47916 smp_mb(); /* Force the above update. */
47917 }
47918
47919diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47920index 5fd4b52..87aa34b 100644
47921--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47922+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47923@@ -794,7 +794,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
47924 }
47925
47926 /* update the base incval used to calculate frequency adjustment */
47927- ACCESS_ONCE(adapter->base_incval) = incval;
47928+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
47929 smp_mb();
47930
47931 /* need lock to prevent incorrect read while modifying cyclecounter */
47932diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47933index e3357bf..d4d5348 100644
47934--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47935+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
47936@@ -466,8 +466,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
47937 wmb();
47938
47939 /* we want to dirty this cache line once */
47940- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
47941- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
47942+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
47943+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
47944
47945 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
47946
47947diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
47948index 2bbd01f..e8baa64 100644
47949--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
47950+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
47951@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
47952 struct __vxge_hw_fifo *fifo;
47953 struct vxge_hw_fifo_config *config;
47954 u32 txdl_size, txdl_per_memblock;
47955- struct vxge_hw_mempool_cbs fifo_mp_callback;
47956+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
47957+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
47958+ };
47959+
47960 struct __vxge_hw_virtualpath *vpath;
47961
47962 if ((vp == NULL) || (attr == NULL)) {
47963@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
47964 goto exit;
47965 }
47966
47967- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
47968-
47969 fifo->mempool =
47970 __vxge_hw_mempool_create(vpath->hldev,
47971 fifo->config->memblock_size,
47972diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47973index 2bb48d5..d1a865d 100644
47974--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47975+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
47976@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
47977 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
47978 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
47979 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
47980- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
47981+ pax_open_kernel();
47982+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
47983+ pax_close_kernel();
47984 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
47985 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
47986 max_tx_rings = QLCNIC_MAX_TX_RINGS;
47987diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47988index be7d7a6..a8983f8 100644
47989--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47990+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
47991@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
47992 case QLCNIC_NON_PRIV_FUNC:
47993 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
47994 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
47995- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
47996+ pax_open_kernel();
47997+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
47998+ pax_close_kernel();
47999 break;
48000 case QLCNIC_PRIV_FUNC:
48001 ahw->op_mode = QLCNIC_PRIV_FUNC;
48002 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
48003- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48004+ pax_open_kernel();
48005+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48006+ pax_close_kernel();
48007 break;
48008 case QLCNIC_MGMT_FUNC:
48009 ahw->op_mode = QLCNIC_MGMT_FUNC;
48010 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48011- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48012+ pax_open_kernel();
48013+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48014+ pax_close_kernel();
48015 break;
48016 default:
48017 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
48018diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48019index c9f57fb..208bdc1 100644
48020--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48021+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48022@@ -1285,7 +1285,7 @@ flash_temp:
48023 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
48024 {
48025 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
48026- static const struct qlcnic_dump_operations *fw_dump_ops;
48027+ const struct qlcnic_dump_operations *fw_dump_ops;
48028 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
48029 u32 entry_offset, dump, no_entries, buf_offset = 0;
48030 int i, k, ops_cnt, ops_index, dump_size = 0;
48031diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
48032index 2e2cf80..ebc796d 100644
48033--- a/drivers/net/ethernet/realtek/r8169.c
48034+++ b/drivers/net/ethernet/realtek/r8169.c
48035@@ -788,22 +788,22 @@ struct rtl8169_private {
48036 struct mdio_ops {
48037 void (*write)(struct rtl8169_private *, int, int);
48038 int (*read)(struct rtl8169_private *, int);
48039- } mdio_ops;
48040+ } __no_const mdio_ops;
48041
48042 struct pll_power_ops {
48043 void (*down)(struct rtl8169_private *);
48044 void (*up)(struct rtl8169_private *);
48045- } pll_power_ops;
48046+ } __no_const pll_power_ops;
48047
48048 struct jumbo_ops {
48049 void (*enable)(struct rtl8169_private *);
48050 void (*disable)(struct rtl8169_private *);
48051- } jumbo_ops;
48052+ } __no_const jumbo_ops;
48053
48054 struct csi_ops {
48055 void (*write)(struct rtl8169_private *, int, int);
48056 u32 (*read)(struct rtl8169_private *, int);
48057- } csi_ops;
48058+ } __no_const csi_ops;
48059
48060 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
48061 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
48062diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
48063index 6b861e3..204ac86 100644
48064--- a/drivers/net/ethernet/sfc/ptp.c
48065+++ b/drivers/net/ethernet/sfc/ptp.c
48066@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
48067 ptp->start.dma_addr);
48068
48069 /* Clear flag that signals MC ready */
48070- ACCESS_ONCE(*start) = 0;
48071+ ACCESS_ONCE_RW(*start) = 0;
48072 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
48073 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
48074 EFX_BUG_ON_PARANOID(rc);
48075diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48076index 08c483b..2c4a553 100644
48077--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48078+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48079@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
48080
48081 writel(value, ioaddr + MMC_CNTRL);
48082
48083- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48084- MMC_CNTRL, value);
48085+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48086+// MMC_CNTRL, value);
48087 }
48088
48089 /* To mask all all interrupts.*/
48090diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
48091index 384ca4f..dd7d4f9 100644
48092--- a/drivers/net/hyperv/hyperv_net.h
48093+++ b/drivers/net/hyperv/hyperv_net.h
48094@@ -171,7 +171,7 @@ struct rndis_device {
48095 enum rndis_device_state state;
48096 bool link_state;
48097 bool link_change;
48098- atomic_t new_req_id;
48099+ atomic_unchecked_t new_req_id;
48100
48101 spinlock_t request_lock;
48102 struct list_head req_list;
48103diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
48104index ec0c40a..c9e42eb 100644
48105--- a/drivers/net/hyperv/rndis_filter.c
48106+++ b/drivers/net/hyperv/rndis_filter.c
48107@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
48108 * template
48109 */
48110 set = &rndis_msg->msg.set_req;
48111- set->req_id = atomic_inc_return(&dev->new_req_id);
48112+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48113
48114 /* Add to the request list */
48115 spin_lock_irqsave(&dev->request_lock, flags);
48116@@ -912,7 +912,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
48117
48118 /* Setup the rndis set */
48119 halt = &request->request_msg.msg.halt_req;
48120- halt->req_id = atomic_inc_return(&dev->new_req_id);
48121+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48122
48123 /* Ignore return since this msg is optional. */
48124 rndis_filter_send_request(dev, request);
48125diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
48126index 34f846b..4a0d5b1 100644
48127--- a/drivers/net/ifb.c
48128+++ b/drivers/net/ifb.c
48129@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
48130 return 0;
48131 }
48132
48133-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
48134+static struct rtnl_link_ops ifb_link_ops = {
48135 .kind = "ifb",
48136 .priv_size = sizeof(struct ifb_private),
48137 .setup = ifb_setup,
48138diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
48139index 612e073..a9f5eda 100644
48140--- a/drivers/net/macvlan.c
48141+++ b/drivers/net/macvlan.c
48142@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
48143 free_nskb:
48144 kfree_skb(nskb);
48145 err:
48146- atomic_long_inc(&skb->dev->rx_dropped);
48147+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
48148 }
48149
48150 static void macvlan_flush_sources(struct macvlan_port *port,
48151@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
48152 int macvlan_link_register(struct rtnl_link_ops *ops)
48153 {
48154 /* common fields */
48155- ops->priv_size = sizeof(struct macvlan_dev);
48156- ops->validate = macvlan_validate;
48157- ops->maxtype = IFLA_MACVLAN_MAX;
48158- ops->policy = macvlan_policy;
48159- ops->changelink = macvlan_changelink;
48160- ops->get_size = macvlan_get_size;
48161- ops->fill_info = macvlan_fill_info;
48162+ pax_open_kernel();
48163+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
48164+ *(void **)&ops->validate = macvlan_validate;
48165+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
48166+ *(const void **)&ops->policy = macvlan_policy;
48167+ *(void **)&ops->changelink = macvlan_changelink;
48168+ *(void **)&ops->get_size = macvlan_get_size;
48169+ *(void **)&ops->fill_info = macvlan_fill_info;
48170+ pax_close_kernel();
48171
48172 return rtnl_link_register(ops);
48173 };
48174@@ -1545,7 +1547,7 @@ static int macvlan_device_event(struct notifier_block *unused,
48175 return NOTIFY_DONE;
48176 }
48177
48178-static struct notifier_block macvlan_notifier_block __read_mostly = {
48179+static struct notifier_block macvlan_notifier_block = {
48180 .notifier_call = macvlan_device_event,
48181 };
48182
48183diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
48184index 4d050ee..012f6dd 100644
48185--- a/drivers/net/macvtap.c
48186+++ b/drivers/net/macvtap.c
48187@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
48188 dev->tx_queue_len = TUN_READQ_SIZE;
48189 }
48190
48191-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
48192+static struct rtnl_link_ops macvtap_link_ops = {
48193 .kind = "macvtap",
48194 .setup = macvtap_setup,
48195 .newlink = macvtap_newlink,
48196@@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
48197
48198 ret = 0;
48199 u = q->flags;
48200- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48201+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48202 put_user(u, &ifr->ifr_flags))
48203 ret = -EFAULT;
48204 macvtap_put_vlan(vlan);
48205@@ -1217,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
48206 return NOTIFY_DONE;
48207 }
48208
48209-static struct notifier_block macvtap_notifier_block __read_mostly = {
48210+static struct notifier_block macvtap_notifier_block = {
48211 .notifier_call = macvtap_device_event,
48212 };
48213
48214diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
48215index 34924df..a747360 100644
48216--- a/drivers/net/nlmon.c
48217+++ b/drivers/net/nlmon.c
48218@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
48219 return 0;
48220 }
48221
48222-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
48223+static struct rtnl_link_ops nlmon_link_ops = {
48224 .kind = "nlmon",
48225 .priv_size = sizeof(struct nlmon),
48226 .setup = nlmon_setup,
48227diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
48228index 3fc91e8..6c36337 100644
48229--- a/drivers/net/phy/phy_device.c
48230+++ b/drivers/net/phy/phy_device.c
48231@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
48232 * zero on success.
48233 *
48234 */
48235-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48236+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
48237 struct phy_c45_device_ids *c45_ids) {
48238 int phy_reg;
48239 int i, reg_addr;
48240@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48241 * its return value is in turn returned.
48242 *
48243 */
48244-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48245+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
48246 bool is_c45, struct phy_c45_device_ids *c45_ids)
48247 {
48248 int phy_reg;
48249@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48250 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
48251 {
48252 struct phy_c45_device_ids c45_ids = {0};
48253- u32 phy_id = 0;
48254+ int phy_id = 0;
48255 int r;
48256
48257 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
48258diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
48259index af034db..1611c0b2 100644
48260--- a/drivers/net/ppp/ppp_generic.c
48261+++ b/drivers/net/ppp/ppp_generic.c
48262@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48263 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
48264 struct ppp_stats stats;
48265 struct ppp_comp_stats cstats;
48266- char *vers;
48267
48268 switch (cmd) {
48269 case SIOCGPPPSTATS:
48270@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48271 break;
48272
48273 case SIOCGPPPVER:
48274- vers = PPP_VERSION;
48275- if (copy_to_user(addr, vers, strlen(vers) + 1))
48276+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
48277 break;
48278 err = 0;
48279 break;
48280diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
48281index 079f7ad..b2a2bfa7 100644
48282--- a/drivers/net/slip/slhc.c
48283+++ b/drivers/net/slip/slhc.c
48284@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
48285 register struct tcphdr *thp;
48286 register struct iphdr *ip;
48287 register struct cstate *cs;
48288- int len, hdrlen;
48289+ long len, hdrlen;
48290 unsigned char *cp = icp;
48291
48292 /* We've got a compressed packet; read the change byte */
48293diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
48294index 2c087ef..4859007 100644
48295--- a/drivers/net/team/team.c
48296+++ b/drivers/net/team/team.c
48297@@ -2103,7 +2103,7 @@ static unsigned int team_get_num_rx_queues(void)
48298 return TEAM_DEFAULT_NUM_RX_QUEUES;
48299 }
48300
48301-static struct rtnl_link_ops team_link_ops __read_mostly = {
48302+static struct rtnl_link_ops team_link_ops = {
48303 .kind = DRV_NAME,
48304 .priv_size = sizeof(struct team),
48305 .setup = team_setup,
48306@@ -2893,7 +2893,7 @@ static int team_device_event(struct notifier_block *unused,
48307 return NOTIFY_DONE;
48308 }
48309
48310-static struct notifier_block team_notifier_block __read_mostly = {
48311+static struct notifier_block team_notifier_block = {
48312 .notifier_call = team_device_event,
48313 };
48314
48315diff --git a/drivers/net/tun.c b/drivers/net/tun.c
48316index 10f9e40..3515e7e 100644
48317--- a/drivers/net/tun.c
48318+++ b/drivers/net/tun.c
48319@@ -1425,7 +1425,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
48320 return -EINVAL;
48321 }
48322
48323-static struct rtnl_link_ops tun_link_ops __read_mostly = {
48324+static struct rtnl_link_ops tun_link_ops = {
48325 .kind = DRV_NAME,
48326 .priv_size = sizeof(struct tun_struct),
48327 .setup = tun_setup,
48328@@ -1827,7 +1827,7 @@ unlock:
48329 }
48330
48331 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48332- unsigned long arg, int ifreq_len)
48333+ unsigned long arg, size_t ifreq_len)
48334 {
48335 struct tun_file *tfile = file->private_data;
48336 struct tun_struct *tun;
48337@@ -1841,6 +1841,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48338 int le;
48339 int ret;
48340
48341+ if (ifreq_len > sizeof ifr)
48342+ return -EFAULT;
48343+
48344 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
48345 if (copy_from_user(&ifr, argp, ifreq_len))
48346 return -EFAULT;
48347diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
48348index 9c5aa92..8cd0405 100644
48349--- a/drivers/net/usb/hso.c
48350+++ b/drivers/net/usb/hso.c
48351@@ -71,7 +71,7 @@
48352 #include <asm/byteorder.h>
48353 #include <linux/serial_core.h>
48354 #include <linux/serial.h>
48355-
48356+#include <asm/local.h>
48357
48358 #define MOD_AUTHOR "Option Wireless"
48359 #define MOD_DESCRIPTION "USB High Speed Option driver"
48360@@ -1178,7 +1178,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
48361 struct urb *urb;
48362
48363 urb = serial->rx_urb[0];
48364- if (serial->port.count > 0) {
48365+ if (atomic_read(&serial->port.count) > 0) {
48366 count = put_rxbuf_data(urb, serial);
48367 if (count == -1)
48368 return;
48369@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
48370 DUMP1(urb->transfer_buffer, urb->actual_length);
48371
48372 /* Anyone listening? */
48373- if (serial->port.count == 0)
48374+ if (atomic_read(&serial->port.count) == 0)
48375 return;
48376
48377 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
48378@@ -1278,8 +1278,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48379 tty_port_tty_set(&serial->port, tty);
48380
48381 /* check for port already opened, if not set the termios */
48382- serial->port.count++;
48383- if (serial->port.count == 1) {
48384+ if (atomic_inc_return(&serial->port.count) == 1) {
48385 serial->rx_state = RX_IDLE;
48386 /* Force default termio settings */
48387 _hso_serial_set_termios(tty, NULL);
48388@@ -1289,7 +1288,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48389 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
48390 if (result) {
48391 hso_stop_serial_device(serial->parent);
48392- serial->port.count--;
48393+ atomic_dec(&serial->port.count);
48394 kref_put(&serial->parent->ref, hso_serial_ref_free);
48395 }
48396 } else {
48397@@ -1326,10 +1325,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
48398
48399 /* reset the rts and dtr */
48400 /* do the actual close */
48401- serial->port.count--;
48402+ atomic_dec(&serial->port.count);
48403
48404- if (serial->port.count <= 0) {
48405- serial->port.count = 0;
48406+ if (atomic_read(&serial->port.count) <= 0) {
48407+ atomic_set(&serial->port.count, 0);
48408 tty_port_tty_set(&serial->port, NULL);
48409 if (!usb_gone)
48410 hso_stop_serial_device(serial->parent);
48411@@ -1404,7 +1403,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
48412
48413 /* the actual setup */
48414 spin_lock_irqsave(&serial->serial_lock, flags);
48415- if (serial->port.count)
48416+ if (atomic_read(&serial->port.count))
48417 _hso_serial_set_termios(tty, old);
48418 else
48419 tty->termios = *old;
48420@@ -1873,7 +1872,7 @@ static void intr_callback(struct urb *urb)
48421 D1("Pending read interrupt on port %d\n", i);
48422 spin_lock(&serial->serial_lock);
48423 if (serial->rx_state == RX_IDLE &&
48424- serial->port.count > 0) {
48425+ atomic_read(&serial->port.count) > 0) {
48426 /* Setup and send a ctrl req read on
48427 * port i */
48428 if (!serial->rx_urb_filled[0]) {
48429@@ -3046,7 +3045,7 @@ static int hso_resume(struct usb_interface *iface)
48430 /* Start all serial ports */
48431 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
48432 if (serial_table[i] && (serial_table[i]->interface == iface)) {
48433- if (dev2ser(serial_table[i])->port.count) {
48434+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
48435 result =
48436 hso_start_serial_device(serial_table[i], GFP_NOIO);
48437 hso_kick_transmit(dev2ser(serial_table[i]));
48438diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
48439index bf405f1..fd847ee 100644
48440--- a/drivers/net/usb/r8152.c
48441+++ b/drivers/net/usb/r8152.c
48442@@ -571,7 +571,7 @@ struct r8152 {
48443 void (*unload)(struct r8152 *);
48444 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
48445 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
48446- } rtl_ops;
48447+ } __no_const rtl_ops;
48448
48449 int intr_interval;
48450 u32 saved_wolopts;
48451diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
48452index a2515887..6d13233 100644
48453--- a/drivers/net/usb/sierra_net.c
48454+++ b/drivers/net/usb/sierra_net.c
48455@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
48456 /* atomic counter partially included in MAC address to make sure 2 devices
48457 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
48458 */
48459-static atomic_t iface_counter = ATOMIC_INIT(0);
48460+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
48461
48462 /*
48463 * SYNC Timer Delay definition used to set the expiry time
48464@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
48465 dev->net->netdev_ops = &sierra_net_device_ops;
48466
48467 /* change MAC addr to include, ifacenum, and to be unique */
48468- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
48469+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
48470 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
48471
48472 /* we will have to manufacture ethernet headers, prepare template */
48473diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
48474index 0ad6c0c..4013638 100644
48475--- a/drivers/net/virtio_net.c
48476+++ b/drivers/net/virtio_net.c
48477@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
48478 #define RECEIVE_AVG_WEIGHT 64
48479
48480 /* Minimum alignment for mergeable packet buffers. */
48481-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
48482+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
48483
48484 #define VIRTNET_DRIVER_VERSION "1.0.0"
48485
48486diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
48487index a8c755d..a988b71 100644
48488--- a/drivers/net/vxlan.c
48489+++ b/drivers/net/vxlan.c
48490@@ -2702,7 +2702,7 @@ nla_put_failure:
48491 return -EMSGSIZE;
48492 }
48493
48494-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
48495+static struct rtnl_link_ops vxlan_link_ops = {
48496 .kind = "vxlan",
48497 .maxtype = IFLA_VXLAN_MAX,
48498 .policy = vxlan_policy,
48499@@ -2749,7 +2749,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
48500 return NOTIFY_DONE;
48501 }
48502
48503-static struct notifier_block vxlan_notifier_block __read_mostly = {
48504+static struct notifier_block vxlan_notifier_block = {
48505 .notifier_call = vxlan_lowerdev_event,
48506 };
48507
48508diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
48509index 5920c99..ff2e4a5 100644
48510--- a/drivers/net/wan/lmc/lmc_media.c
48511+++ b/drivers/net/wan/lmc/lmc_media.c
48512@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
48513 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
48514
48515 lmc_media_t lmc_ds3_media = {
48516- lmc_ds3_init, /* special media init stuff */
48517- lmc_ds3_default, /* reset to default state */
48518- lmc_ds3_set_status, /* reset status to state provided */
48519- lmc_dummy_set_1, /* set clock source */
48520- lmc_dummy_set2_1, /* set line speed */
48521- lmc_ds3_set_100ft, /* set cable length */
48522- lmc_ds3_set_scram, /* set scrambler */
48523- lmc_ds3_get_link_status, /* get link status */
48524- lmc_dummy_set_1, /* set link status */
48525- lmc_ds3_set_crc_length, /* set CRC length */
48526- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48527- lmc_ds3_watchdog
48528+ .init = lmc_ds3_init, /* special media init stuff */
48529+ .defaults = lmc_ds3_default, /* reset to default state */
48530+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
48531+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
48532+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48533+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
48534+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
48535+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
48536+ .set_link_status = lmc_dummy_set_1, /* set link status */
48537+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
48538+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48539+ .watchdog = lmc_ds3_watchdog
48540 };
48541
48542 lmc_media_t lmc_hssi_media = {
48543- lmc_hssi_init, /* special media init stuff */
48544- lmc_hssi_default, /* reset to default state */
48545- lmc_hssi_set_status, /* reset status to state provided */
48546- lmc_hssi_set_clock, /* set clock source */
48547- lmc_dummy_set2_1, /* set line speed */
48548- lmc_dummy_set_1, /* set cable length */
48549- lmc_dummy_set_1, /* set scrambler */
48550- lmc_hssi_get_link_status, /* get link status */
48551- lmc_hssi_set_link_status, /* set link status */
48552- lmc_hssi_set_crc_length, /* set CRC length */
48553- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48554- lmc_hssi_watchdog
48555+ .init = lmc_hssi_init, /* special media init stuff */
48556+ .defaults = lmc_hssi_default, /* reset to default state */
48557+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
48558+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
48559+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48560+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48561+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48562+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
48563+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
48564+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
48565+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48566+ .watchdog = lmc_hssi_watchdog
48567 };
48568
48569-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
48570- lmc_ssi_default, /* reset to default state */
48571- lmc_ssi_set_status, /* reset status to state provided */
48572- lmc_ssi_set_clock, /* set clock source */
48573- lmc_ssi_set_speed, /* set line speed */
48574- lmc_dummy_set_1, /* set cable length */
48575- lmc_dummy_set_1, /* set scrambler */
48576- lmc_ssi_get_link_status, /* get link status */
48577- lmc_ssi_set_link_status, /* set link status */
48578- lmc_ssi_set_crc_length, /* set CRC length */
48579- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48580- lmc_ssi_watchdog
48581+lmc_media_t lmc_ssi_media = {
48582+ .init = lmc_ssi_init, /* special media init stuff */
48583+ .defaults = lmc_ssi_default, /* reset to default state */
48584+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
48585+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
48586+ .set_speed = lmc_ssi_set_speed, /* set line speed */
48587+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48588+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48589+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
48590+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
48591+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
48592+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48593+ .watchdog = lmc_ssi_watchdog
48594 };
48595
48596 lmc_media_t lmc_t1_media = {
48597- lmc_t1_init, /* special media init stuff */
48598- lmc_t1_default, /* reset to default state */
48599- lmc_t1_set_status, /* reset status to state provided */
48600- lmc_t1_set_clock, /* set clock source */
48601- lmc_dummy_set2_1, /* set line speed */
48602- lmc_dummy_set_1, /* set cable length */
48603- lmc_dummy_set_1, /* set scrambler */
48604- lmc_t1_get_link_status, /* get link status */
48605- lmc_dummy_set_1, /* set link status */
48606- lmc_t1_set_crc_length, /* set CRC length */
48607- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48608- lmc_t1_watchdog
48609+ .init = lmc_t1_init, /* special media init stuff */
48610+ .defaults = lmc_t1_default, /* reset to default state */
48611+ .set_status = lmc_t1_set_status, /* reset status to state provided */
48612+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
48613+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48614+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48615+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48616+ .get_link_status = lmc_t1_get_link_status, /* get link status */
48617+ .set_link_status = lmc_dummy_set_1, /* set link status */
48618+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
48619+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48620+ .watchdog = lmc_t1_watchdog
48621 };
48622
48623 static void
48624diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
48625index feacc3b..5bac0de 100644
48626--- a/drivers/net/wan/z85230.c
48627+++ b/drivers/net/wan/z85230.c
48628@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
48629
48630 struct z8530_irqhandler z8530_sync =
48631 {
48632- z8530_rx,
48633- z8530_tx,
48634- z8530_status
48635+ .rx = z8530_rx,
48636+ .tx = z8530_tx,
48637+ .status = z8530_status
48638 };
48639
48640 EXPORT_SYMBOL(z8530_sync);
48641@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
48642 }
48643
48644 static struct z8530_irqhandler z8530_dma_sync = {
48645- z8530_dma_rx,
48646- z8530_dma_tx,
48647- z8530_dma_status
48648+ .rx = z8530_dma_rx,
48649+ .tx = z8530_dma_tx,
48650+ .status = z8530_dma_status
48651 };
48652
48653 static struct z8530_irqhandler z8530_txdma_sync = {
48654- z8530_rx,
48655- z8530_dma_tx,
48656- z8530_dma_status
48657+ .rx = z8530_rx,
48658+ .tx = z8530_dma_tx,
48659+ .status = z8530_dma_status
48660 };
48661
48662 /**
48663@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
48664
48665 struct z8530_irqhandler z8530_nop=
48666 {
48667- z8530_rx_clear,
48668- z8530_tx_clear,
48669- z8530_status_clear
48670+ .rx = z8530_rx_clear,
48671+ .tx = z8530_tx_clear,
48672+ .status = z8530_status_clear
48673 };
48674
48675
48676diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
48677index 0b60295..b8bfa5b 100644
48678--- a/drivers/net/wimax/i2400m/rx.c
48679+++ b/drivers/net/wimax/i2400m/rx.c
48680@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
48681 if (i2400m->rx_roq == NULL)
48682 goto error_roq_alloc;
48683
48684- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
48685+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
48686 GFP_KERNEL);
48687 if (rd == NULL) {
48688 result = -ENOMEM;
48689diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
48690index e71a2ce..2268d61 100644
48691--- a/drivers/net/wireless/airo.c
48692+++ b/drivers/net/wireless/airo.c
48693@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
48694 struct airo_info *ai = dev->ml_priv;
48695 int ridcode;
48696 int enabled;
48697- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48698+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48699 unsigned char *iobuf;
48700
48701 /* Only super-user can write RIDs */
48702diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
48703index da92bfa..5a9001a 100644
48704--- a/drivers/net/wireless/at76c50x-usb.c
48705+++ b/drivers/net/wireless/at76c50x-usb.c
48706@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
48707 }
48708
48709 /* Convert timeout from the DFU status to jiffies */
48710-static inline unsigned long at76_get_timeout(struct dfu_status *s)
48711+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
48712 {
48713 return msecs_to_jiffies((s->poll_timeout[2] << 16)
48714 | (s->poll_timeout[1] << 8)
48715diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
48716index f1946a6..cd367fb 100644
48717--- a/drivers/net/wireless/ath/ath10k/htc.c
48718+++ b/drivers/net/wireless/ath/ath10k/htc.c
48719@@ -851,7 +851,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
48720 /* registered target arrival callback from the HIF layer */
48721 int ath10k_htc_init(struct ath10k *ar)
48722 {
48723- struct ath10k_hif_cb htc_callbacks;
48724+ static struct ath10k_hif_cb htc_callbacks = {
48725+ .rx_completion = ath10k_htc_rx_completion_handler,
48726+ .tx_completion = ath10k_htc_tx_completion_handler,
48727+ };
48728 struct ath10k_htc_ep *ep = NULL;
48729 struct ath10k_htc *htc = &ar->htc;
48730
48731@@ -860,8 +863,6 @@ int ath10k_htc_init(struct ath10k *ar)
48732 ath10k_htc_reset_endpoint_states(htc);
48733
48734 /* setup HIF layer callbacks */
48735- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
48736- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
48737 htc->ar = ar;
48738
48739 /* Get HIF default pipe for HTC message exchange */
48740diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
48741index 527179c..a890150 100644
48742--- a/drivers/net/wireless/ath/ath10k/htc.h
48743+++ b/drivers/net/wireless/ath/ath10k/htc.h
48744@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
48745
48746 struct ath10k_htc_ops {
48747 void (*target_send_suspend_complete)(struct ath10k *ar);
48748-};
48749+} __no_const;
48750
48751 struct ath10k_htc_ep_ops {
48752 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
48753 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
48754 void (*ep_tx_credits)(struct ath10k *);
48755-};
48756+} __no_const;
48757
48758 /* service connection information */
48759 struct ath10k_htc_svc_conn_req {
48760diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48761index f816909..e56cd8b 100644
48762--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48763+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48764@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48765 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
48766 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
48767
48768- ACCESS_ONCE(ads->ds_link) = i->link;
48769- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
48770+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
48771+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
48772
48773 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
48774 ctl6 = SM(i->keytype, AR_EncrType);
48775@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48776
48777 if ((i->is_first || i->is_last) &&
48778 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
48779- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
48780+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
48781 | set11nTries(i->rates, 1)
48782 | set11nTries(i->rates, 2)
48783 | set11nTries(i->rates, 3)
48784 | (i->dur_update ? AR_DurUpdateEna : 0)
48785 | SM(0, AR_BurstDur);
48786
48787- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
48788+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
48789 | set11nRate(i->rates, 1)
48790 | set11nRate(i->rates, 2)
48791 | set11nRate(i->rates, 3);
48792 } else {
48793- ACCESS_ONCE(ads->ds_ctl2) = 0;
48794- ACCESS_ONCE(ads->ds_ctl3) = 0;
48795+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
48796+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
48797 }
48798
48799 if (!i->is_first) {
48800- ACCESS_ONCE(ads->ds_ctl0) = 0;
48801- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48802- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48803+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
48804+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48805+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48806 return;
48807 }
48808
48809@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48810 break;
48811 }
48812
48813- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48814+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48815 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48816 | SM(i->txpower[0], AR_XmitPower0)
48817 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48818@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48819 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
48820 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
48821
48822- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48823- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48824+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48825+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48826
48827 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
48828 return;
48829
48830- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48831+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48832 | set11nPktDurRTSCTS(i->rates, 1);
48833
48834- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48835+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48836 | set11nPktDurRTSCTS(i->rates, 3);
48837
48838- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48839+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48840 | set11nRateFlags(i->rates, 1)
48841 | set11nRateFlags(i->rates, 2)
48842 | set11nRateFlags(i->rates, 3)
48843 | SM(i->rtscts_rate, AR_RTSCTSRate);
48844
48845- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48846- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48847- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48848+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48849+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48850+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48851 }
48852
48853 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
48854diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48855index da84b70..83e4978 100644
48856--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48857+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48858@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48859 (i->qcu << AR_TxQcuNum_S) | desc_len;
48860
48861 checksum += val;
48862- ACCESS_ONCE(ads->info) = val;
48863+ ACCESS_ONCE_RW(ads->info) = val;
48864
48865 checksum += i->link;
48866- ACCESS_ONCE(ads->link) = i->link;
48867+ ACCESS_ONCE_RW(ads->link) = i->link;
48868
48869 checksum += i->buf_addr[0];
48870- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
48871+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
48872 checksum += i->buf_addr[1];
48873- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
48874+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
48875 checksum += i->buf_addr[2];
48876- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
48877+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
48878 checksum += i->buf_addr[3];
48879- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
48880+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
48881
48882 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
48883- ACCESS_ONCE(ads->ctl3) = val;
48884+ ACCESS_ONCE_RW(ads->ctl3) = val;
48885 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
48886- ACCESS_ONCE(ads->ctl5) = val;
48887+ ACCESS_ONCE_RW(ads->ctl5) = val;
48888 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
48889- ACCESS_ONCE(ads->ctl7) = val;
48890+ ACCESS_ONCE_RW(ads->ctl7) = val;
48891 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
48892- ACCESS_ONCE(ads->ctl9) = val;
48893+ ACCESS_ONCE_RW(ads->ctl9) = val;
48894
48895 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
48896- ACCESS_ONCE(ads->ctl10) = checksum;
48897+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
48898
48899 if (i->is_first || i->is_last) {
48900- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
48901+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
48902 | set11nTries(i->rates, 1)
48903 | set11nTries(i->rates, 2)
48904 | set11nTries(i->rates, 3)
48905 | (i->dur_update ? AR_DurUpdateEna : 0)
48906 | SM(0, AR_BurstDur);
48907
48908- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
48909+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
48910 | set11nRate(i->rates, 1)
48911 | set11nRate(i->rates, 2)
48912 | set11nRate(i->rates, 3);
48913 } else {
48914- ACCESS_ONCE(ads->ctl13) = 0;
48915- ACCESS_ONCE(ads->ctl14) = 0;
48916+ ACCESS_ONCE_RW(ads->ctl13) = 0;
48917+ ACCESS_ONCE_RW(ads->ctl14) = 0;
48918 }
48919
48920 ads->ctl20 = 0;
48921@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48922
48923 ctl17 = SM(i->keytype, AR_EncrType);
48924 if (!i->is_first) {
48925- ACCESS_ONCE(ads->ctl11) = 0;
48926- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
48927- ACCESS_ONCE(ads->ctl15) = 0;
48928- ACCESS_ONCE(ads->ctl16) = 0;
48929- ACCESS_ONCE(ads->ctl17) = ctl17;
48930- ACCESS_ONCE(ads->ctl18) = 0;
48931- ACCESS_ONCE(ads->ctl19) = 0;
48932+ ACCESS_ONCE_RW(ads->ctl11) = 0;
48933+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
48934+ ACCESS_ONCE_RW(ads->ctl15) = 0;
48935+ ACCESS_ONCE_RW(ads->ctl16) = 0;
48936+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
48937+ ACCESS_ONCE_RW(ads->ctl18) = 0;
48938+ ACCESS_ONCE_RW(ads->ctl19) = 0;
48939 return;
48940 }
48941
48942- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
48943+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
48944 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48945 | SM(i->txpower[0], AR_XmitPower0)
48946 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48947@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48948 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
48949 ctl12 |= SM(val, AR_PAPRDChainMask);
48950
48951- ACCESS_ONCE(ads->ctl12) = ctl12;
48952- ACCESS_ONCE(ads->ctl17) = ctl17;
48953+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
48954+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
48955
48956- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
48957+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
48958 | set11nPktDurRTSCTS(i->rates, 1);
48959
48960- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
48961+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
48962 | set11nPktDurRTSCTS(i->rates, 3);
48963
48964- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
48965+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
48966 | set11nRateFlags(i->rates, 1)
48967 | set11nRateFlags(i->rates, 2)
48968 | set11nRateFlags(i->rates, 3)
48969 | SM(i->rtscts_rate, AR_RTSCTSRate);
48970
48971- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
48972+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
48973
48974- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
48975- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
48976- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
48977+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
48978+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
48979+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
48980 }
48981
48982 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
48983diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
48984index 1cbd335..27dfb40 100644
48985--- a/drivers/net/wireless/ath/ath9k/hw.h
48986+++ b/drivers/net/wireless/ath/ath9k/hw.h
48987@@ -640,7 +640,7 @@ struct ath_hw_private_ops {
48988
48989 /* ANI */
48990 void (*ani_cache_ini_regs)(struct ath_hw *ah);
48991-};
48992+} __no_const;
48993
48994 /**
48995 * struct ath_spec_scan - parameters for Atheros spectral scan
48996@@ -716,7 +716,7 @@ struct ath_hw_ops {
48997 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
48998 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
48999 #endif
49000-};
49001+} __no_const;
49002
49003 struct ath_nf_limits {
49004 s16 max;
49005diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
49006index 62b0bf4..4ae094c 100644
49007--- a/drivers/net/wireless/ath/ath9k/main.c
49008+++ b/drivers/net/wireless/ath/ath9k/main.c
49009@@ -2546,16 +2546,18 @@ void ath9k_fill_chanctx_ops(void)
49010 if (!ath9k_is_chanctx_enabled())
49011 return;
49012
49013- ath9k_ops.hw_scan = ath9k_hw_scan;
49014- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49015- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49016- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49017- ath9k_ops.add_chanctx = ath9k_add_chanctx;
49018- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49019- ath9k_ops.change_chanctx = ath9k_change_chanctx;
49020- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49021- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49022- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49023+ pax_open_kernel();
49024+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
49025+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49026+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49027+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49028+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
49029+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49030+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
49031+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49032+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49033+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49034+ pax_close_kernel();
49035 }
49036
49037 #endif
49038diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
49039index 058a9f2..d5cb1ba 100644
49040--- a/drivers/net/wireless/b43/phy_lp.c
49041+++ b/drivers/net/wireless/b43/phy_lp.c
49042@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
49043 {
49044 struct ssb_bus *bus = dev->dev->sdev->bus;
49045
49046- static const struct b206x_channel *chandata = NULL;
49047+ const struct b206x_channel *chandata = NULL;
49048 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
49049 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
49050 u16 old_comm15, scale;
49051diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
49052index dc1d20c..f7a4f06 100644
49053--- a/drivers/net/wireless/iwlegacy/3945-mac.c
49054+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
49055@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
49056 */
49057 if (il3945_mod_params.disable_hw_scan) {
49058 D_INFO("Disabling hw_scan\n");
49059- il3945_mac_ops.hw_scan = NULL;
49060+ pax_open_kernel();
49061+ *(void **)&il3945_mac_ops.hw_scan = NULL;
49062+ pax_close_kernel();
49063 }
49064
49065 D_INFO("*** LOAD DRIVER ***\n");
49066diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49067index 0ffb6ff..c0b7f0e 100644
49068--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49069+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49070@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
49071 {
49072 struct iwl_priv *priv = file->private_data;
49073 char buf[64];
49074- int buf_size;
49075+ size_t buf_size;
49076 u32 offset, len;
49077
49078 memset(buf, 0, sizeof(buf));
49079@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
49080 struct iwl_priv *priv = file->private_data;
49081
49082 char buf[8];
49083- int buf_size;
49084+ size_t buf_size;
49085 u32 reset_flag;
49086
49087 memset(buf, 0, sizeof(buf));
49088@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
49089 {
49090 struct iwl_priv *priv = file->private_data;
49091 char buf[8];
49092- int buf_size;
49093+ size_t buf_size;
49094 int ht40;
49095
49096 memset(buf, 0, sizeof(buf));
49097@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
49098 {
49099 struct iwl_priv *priv = file->private_data;
49100 char buf[8];
49101- int buf_size;
49102+ size_t buf_size;
49103 int value;
49104
49105 memset(buf, 0, sizeof(buf));
49106@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
49107 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
49108 DEBUGFS_READ_FILE_OPS(current_sleep_command);
49109
49110-static const char *fmt_value = " %-30s %10u\n";
49111-static const char *fmt_hex = " %-30s 0x%02X\n";
49112-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
49113-static const char *fmt_header =
49114+static const char fmt_value[] = " %-30s %10u\n";
49115+static const char fmt_hex[] = " %-30s 0x%02X\n";
49116+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
49117+static const char fmt_header[] =
49118 "%-32s current cumulative delta max\n";
49119
49120 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
49121@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
49122 {
49123 struct iwl_priv *priv = file->private_data;
49124 char buf[8];
49125- int buf_size;
49126+ size_t buf_size;
49127 int clear;
49128
49129 memset(buf, 0, sizeof(buf));
49130@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
49131 {
49132 struct iwl_priv *priv = file->private_data;
49133 char buf[8];
49134- int buf_size;
49135+ size_t buf_size;
49136 int trace;
49137
49138 memset(buf, 0, sizeof(buf));
49139@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
49140 {
49141 struct iwl_priv *priv = file->private_data;
49142 char buf[8];
49143- int buf_size;
49144+ size_t buf_size;
49145 int missed;
49146
49147 memset(buf, 0, sizeof(buf));
49148@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
49149
49150 struct iwl_priv *priv = file->private_data;
49151 char buf[8];
49152- int buf_size;
49153+ size_t buf_size;
49154 int plcp;
49155
49156 memset(buf, 0, sizeof(buf));
49157@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
49158
49159 struct iwl_priv *priv = file->private_data;
49160 char buf[8];
49161- int buf_size;
49162+ size_t buf_size;
49163 int flush;
49164
49165 memset(buf, 0, sizeof(buf));
49166@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
49167
49168 struct iwl_priv *priv = file->private_data;
49169 char buf[8];
49170- int buf_size;
49171+ size_t buf_size;
49172 int rts;
49173
49174 if (!priv->cfg->ht_params)
49175@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
49176 {
49177 struct iwl_priv *priv = file->private_data;
49178 char buf[8];
49179- int buf_size;
49180+ size_t buf_size;
49181
49182 memset(buf, 0, sizeof(buf));
49183 buf_size = min(count, sizeof(buf) - 1);
49184@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
49185 struct iwl_priv *priv = file->private_data;
49186 u32 event_log_flag;
49187 char buf[8];
49188- int buf_size;
49189+ size_t buf_size;
49190
49191 /* check that the interface is up */
49192 if (!iwl_is_ready(priv))
49193@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
49194 struct iwl_priv *priv = file->private_data;
49195 char buf[8];
49196 u32 calib_disabled;
49197- int buf_size;
49198+ size_t buf_size;
49199
49200 memset(buf, 0, sizeof(buf));
49201 buf_size = min(count, sizeof(buf) - 1);
49202diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
49203index 523fe0c..0d9473b 100644
49204--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
49205+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
49206@@ -1781,7 +1781,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
49207 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
49208
49209 char buf[8];
49210- int buf_size;
49211+ size_t buf_size;
49212 u32 reset_flag;
49213
49214 memset(buf, 0, sizeof(buf));
49215@@ -1802,7 +1802,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
49216 {
49217 struct iwl_trans *trans = file->private_data;
49218 char buf[8];
49219- int buf_size;
49220+ size_t buf_size;
49221 int csr;
49222
49223 memset(buf, 0, sizeof(buf));
49224diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
49225index ef58a88..fafa731 100644
49226--- a/drivers/net/wireless/mac80211_hwsim.c
49227+++ b/drivers/net/wireless/mac80211_hwsim.c
49228@@ -3066,20 +3066,20 @@ static int __init init_mac80211_hwsim(void)
49229 if (channels < 1)
49230 return -EINVAL;
49231
49232- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
49233- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49234- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49235- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49236- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49237- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49238- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49239- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49240- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49241- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49242- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
49243- mac80211_hwsim_assign_vif_chanctx;
49244- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
49245- mac80211_hwsim_unassign_vif_chanctx;
49246+ pax_open_kernel();
49247+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
49248+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49249+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49250+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49251+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49252+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49253+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49254+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49255+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49256+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49257+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
49258+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
49259+ pax_close_kernel();
49260
49261 spin_lock_init(&hwsim_radio_lock);
49262 INIT_LIST_HEAD(&hwsim_radios);
49263diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
49264index 1a4facd..a2ecbbd 100644
49265--- a/drivers/net/wireless/rndis_wlan.c
49266+++ b/drivers/net/wireless/rndis_wlan.c
49267@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
49268
49269 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
49270
49271- if (rts_threshold < 0 || rts_threshold > 2347)
49272+ if (rts_threshold > 2347)
49273 rts_threshold = 2347;
49274
49275 tmp = cpu_to_le32(rts_threshold);
49276diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
49277index 9bb398b..b0cc047 100644
49278--- a/drivers/net/wireless/rt2x00/rt2x00.h
49279+++ b/drivers/net/wireless/rt2x00/rt2x00.h
49280@@ -375,7 +375,7 @@ struct rt2x00_intf {
49281 * for hardware which doesn't support hardware
49282 * sequence counting.
49283 */
49284- atomic_t seqno;
49285+ atomic_unchecked_t seqno;
49286 };
49287
49288 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
49289diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
49290index 66ff364..3ce34f7 100644
49291--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
49292+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
49293@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
49294 * sequence counter given by mac80211.
49295 */
49296 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
49297- seqno = atomic_add_return(0x10, &intf->seqno);
49298+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
49299 else
49300- seqno = atomic_read(&intf->seqno);
49301+ seqno = atomic_read_unchecked(&intf->seqno);
49302
49303 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
49304 hdr->seq_ctrl |= cpu_to_le16(seqno);
49305diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
49306index b661f896..ddf7d2b 100644
49307--- a/drivers/net/wireless/ti/wl1251/sdio.c
49308+++ b/drivers/net/wireless/ti/wl1251/sdio.c
49309@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
49310
49311 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
49312
49313- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49314- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49315+ pax_open_kernel();
49316+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49317+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49318+ pax_close_kernel();
49319
49320 wl1251_info("using dedicated interrupt line");
49321 } else {
49322- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49323- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49324+ pax_open_kernel();
49325+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49326+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49327+ pax_close_kernel();
49328
49329 wl1251_info("using SDIO interrupt");
49330 }
49331diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
49332index d6d0d6d..60c23a0 100644
49333--- a/drivers/net/wireless/ti/wl12xx/main.c
49334+++ b/drivers/net/wireless/ti/wl12xx/main.c
49335@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49336 sizeof(wl->conf.mem));
49337
49338 /* read data preparation is only needed by wl127x */
49339- wl->ops->prepare_read = wl127x_prepare_read;
49340+ pax_open_kernel();
49341+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49342+ pax_close_kernel();
49343
49344 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49345 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49346@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49347 sizeof(wl->conf.mem));
49348
49349 /* read data preparation is only needed by wl127x */
49350- wl->ops->prepare_read = wl127x_prepare_read;
49351+ pax_open_kernel();
49352+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49353+ pax_close_kernel();
49354
49355 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49356 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49357diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
49358index 8e56261..9140678 100644
49359--- a/drivers/net/wireless/ti/wl18xx/main.c
49360+++ b/drivers/net/wireless/ti/wl18xx/main.c
49361@@ -1916,8 +1916,10 @@ static int wl18xx_setup(struct wl1271 *wl)
49362 }
49363
49364 if (!checksum_param) {
49365- wl18xx_ops.set_rx_csum = NULL;
49366- wl18xx_ops.init_vif = NULL;
49367+ pax_open_kernel();
49368+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
49369+ *(void **)&wl18xx_ops.init_vif = NULL;
49370+ pax_close_kernel();
49371 }
49372
49373 /* Enable 11a Band only if we have 5G antennas */
49374diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
49375index a912dc0..a8225ba 100644
49376--- a/drivers/net/wireless/zd1211rw/zd_usb.c
49377+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
49378@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
49379 {
49380 struct zd_usb *usb = urb->context;
49381 struct zd_usb_interrupt *intr = &usb->intr;
49382- int len;
49383+ unsigned int len;
49384 u16 int_num;
49385
49386 ZD_ASSERT(in_interrupt());
49387diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
49388index ce2e2cf..f81e500 100644
49389--- a/drivers/nfc/nfcwilink.c
49390+++ b/drivers/nfc/nfcwilink.c
49391@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
49392
49393 static int nfcwilink_probe(struct platform_device *pdev)
49394 {
49395- static struct nfcwilink *drv;
49396+ struct nfcwilink *drv;
49397 int rc;
49398 __u32 protocols;
49399
49400diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
49401index f2596c8..50d53af 100644
49402--- a/drivers/nfc/st21nfca/st21nfca.c
49403+++ b/drivers/nfc/st21nfca/st21nfca.c
49404@@ -559,7 +559,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
49405 goto exit;
49406 }
49407
49408- gate = uid_skb->data;
49409+ memcpy(gate, uid_skb->data, uid_skb->len);
49410 *len = uid_skb->len;
49411 exit:
49412 kfree_skb(uid_skb);
49413diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
49414index 5100742..6ad4e6d 100644
49415--- a/drivers/of/fdt.c
49416+++ b/drivers/of/fdt.c
49417@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
49418 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
49419 return 0;
49420 }
49421- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49422+ pax_open_kernel();
49423+ *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49424+ pax_close_kernel();
49425 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
49426 }
49427 late_initcall(of_fdt_raw_init);
49428diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
49429index d93b2b6..ae50401 100644
49430--- a/drivers/oprofile/buffer_sync.c
49431+++ b/drivers/oprofile/buffer_sync.c
49432@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
49433 if (cookie == NO_COOKIE)
49434 offset = pc;
49435 if (cookie == INVALID_COOKIE) {
49436- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49437+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49438 offset = pc;
49439 }
49440 if (cookie != last_cookie) {
49441@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
49442 /* add userspace sample */
49443
49444 if (!mm) {
49445- atomic_inc(&oprofile_stats.sample_lost_no_mm);
49446+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
49447 return 0;
49448 }
49449
49450 cookie = lookup_dcookie(mm, s->eip, &offset);
49451
49452 if (cookie == INVALID_COOKIE) {
49453- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49454+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49455 return 0;
49456 }
49457
49458@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
49459 /* ignore backtraces if failed to add a sample */
49460 if (state == sb_bt_start) {
49461 state = sb_bt_ignore;
49462- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
49463+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
49464 }
49465 }
49466 release_mm(mm);
49467diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
49468index c0cc4e7..44d4e54 100644
49469--- a/drivers/oprofile/event_buffer.c
49470+++ b/drivers/oprofile/event_buffer.c
49471@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
49472 }
49473
49474 if (buffer_pos == buffer_size) {
49475- atomic_inc(&oprofile_stats.event_lost_overflow);
49476+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
49477 return;
49478 }
49479
49480diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
49481index ed2c3ec..deda85a 100644
49482--- a/drivers/oprofile/oprof.c
49483+++ b/drivers/oprofile/oprof.c
49484@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
49485 if (oprofile_ops.switch_events())
49486 return;
49487
49488- atomic_inc(&oprofile_stats.multiplex_counter);
49489+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
49490 start_switch_worker();
49491 }
49492
49493diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
49494index ee2cfce..7f8f699 100644
49495--- a/drivers/oprofile/oprofile_files.c
49496+++ b/drivers/oprofile/oprofile_files.c
49497@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
49498
49499 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
49500
49501-static ssize_t timeout_read(struct file *file, char __user *buf,
49502+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
49503 size_t count, loff_t *offset)
49504 {
49505 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
49506diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
49507index 59659ce..6c860a0 100644
49508--- a/drivers/oprofile/oprofile_stats.c
49509+++ b/drivers/oprofile/oprofile_stats.c
49510@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
49511 cpu_buf->sample_invalid_eip = 0;
49512 }
49513
49514- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
49515- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
49516- atomic_set(&oprofile_stats.event_lost_overflow, 0);
49517- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
49518- atomic_set(&oprofile_stats.multiplex_counter, 0);
49519+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
49520+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
49521+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
49522+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
49523+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
49524 }
49525
49526
49527diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
49528index 1fc622b..8c48fc3 100644
49529--- a/drivers/oprofile/oprofile_stats.h
49530+++ b/drivers/oprofile/oprofile_stats.h
49531@@ -13,11 +13,11 @@
49532 #include <linux/atomic.h>
49533
49534 struct oprofile_stat_struct {
49535- atomic_t sample_lost_no_mm;
49536- atomic_t sample_lost_no_mapping;
49537- atomic_t bt_lost_no_mapping;
49538- atomic_t event_lost_overflow;
49539- atomic_t multiplex_counter;
49540+ atomic_unchecked_t sample_lost_no_mm;
49541+ atomic_unchecked_t sample_lost_no_mapping;
49542+ atomic_unchecked_t bt_lost_no_mapping;
49543+ atomic_unchecked_t event_lost_overflow;
49544+ atomic_unchecked_t multiplex_counter;
49545 };
49546
49547 extern struct oprofile_stat_struct oprofile_stats;
49548diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
49549index 3f49345..c750d0b 100644
49550--- a/drivers/oprofile/oprofilefs.c
49551+++ b/drivers/oprofile/oprofilefs.c
49552@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
49553
49554 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
49555 {
49556- atomic_t *val = file->private_data;
49557- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
49558+ atomic_unchecked_t *val = file->private_data;
49559+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
49560 }
49561
49562
49563@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
49564
49565
49566 int oprofilefs_create_ro_atomic(struct dentry *root,
49567- char const *name, atomic_t *val)
49568+ char const *name, atomic_unchecked_t *val)
49569 {
49570 return __oprofilefs_create_file(root, name,
49571 &atomic_ro_fops, 0444, val);
49572diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
49573index bdef916..88c7dee 100644
49574--- a/drivers/oprofile/timer_int.c
49575+++ b/drivers/oprofile/timer_int.c
49576@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
49577 return NOTIFY_OK;
49578 }
49579
49580-static struct notifier_block __refdata oprofile_cpu_notifier = {
49581+static struct notifier_block oprofile_cpu_notifier = {
49582 .notifier_call = oprofile_cpu_notify,
49583 };
49584
49585diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
49586index 3b47080..6cd05dd 100644
49587--- a/drivers/parport/procfs.c
49588+++ b/drivers/parport/procfs.c
49589@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
49590
49591 *ppos += len;
49592
49593- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
49594+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
49595 }
49596
49597 #ifdef CONFIG_PARPORT_1284
49598@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
49599
49600 *ppos += len;
49601
49602- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
49603+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
49604 }
49605 #endif /* IEEE1284.3 support. */
49606
49607diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
49608index 6ca2399..68d866b 100644
49609--- a/drivers/pci/hotplug/acpiphp_ibm.c
49610+++ b/drivers/pci/hotplug/acpiphp_ibm.c
49611@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
49612 goto init_cleanup;
49613 }
49614
49615- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49616+ pax_open_kernel();
49617+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49618+ pax_close_kernel();
49619 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
49620
49621 return retval;
49622diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
49623index 66b7bbe..26bee78 100644
49624--- a/drivers/pci/hotplug/cpcihp_generic.c
49625+++ b/drivers/pci/hotplug/cpcihp_generic.c
49626@@ -73,7 +73,6 @@ static u16 port;
49627 static unsigned int enum_bit;
49628 static u8 enum_mask;
49629
49630-static struct cpci_hp_controller_ops generic_hpc_ops;
49631 static struct cpci_hp_controller generic_hpc;
49632
49633 static int __init validate_parameters(void)
49634@@ -139,6 +138,10 @@ static int query_enum(void)
49635 return ((value & enum_mask) == enum_mask);
49636 }
49637
49638+static struct cpci_hp_controller_ops generic_hpc_ops = {
49639+ .query_enum = query_enum,
49640+};
49641+
49642 static int __init cpcihp_generic_init(void)
49643 {
49644 int status;
49645@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
49646 pci_dev_put(dev);
49647
49648 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
49649- generic_hpc_ops.query_enum = query_enum;
49650 generic_hpc.ops = &generic_hpc_ops;
49651
49652 status = cpci_hp_register_controller(&generic_hpc);
49653diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
49654index 7ecf34e..effed62 100644
49655--- a/drivers/pci/hotplug/cpcihp_zt5550.c
49656+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
49657@@ -59,7 +59,6 @@
49658 /* local variables */
49659 static bool debug;
49660 static bool poll;
49661-static struct cpci_hp_controller_ops zt5550_hpc_ops;
49662 static struct cpci_hp_controller zt5550_hpc;
49663
49664 /* Primary cPCI bus bridge device */
49665@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
49666 return 0;
49667 }
49668
49669+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
49670+ .query_enum = zt5550_hc_query_enum,
49671+};
49672+
49673 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
49674 {
49675 int status;
49676@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
49677 dbg("returned from zt5550_hc_config");
49678
49679 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
49680- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
49681 zt5550_hpc.ops = &zt5550_hpc_ops;
49682 if (!poll) {
49683 zt5550_hpc.irq = hc_dev->irq;
49684 zt5550_hpc.irq_flags = IRQF_SHARED;
49685 zt5550_hpc.dev_id = hc_dev;
49686
49687- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49688- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49689- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49690+ pax_open_kernel();
49691+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49692+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49693+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49694+ pax_open_kernel();
49695 } else {
49696 info("using ENUM# polling mode");
49697 }
49698diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
49699index 1e08ff8c..3cd145f 100644
49700--- a/drivers/pci/hotplug/cpqphp_nvram.c
49701+++ b/drivers/pci/hotplug/cpqphp_nvram.c
49702@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
49703
49704 void compaq_nvram_init (void __iomem *rom_start)
49705 {
49706+#ifndef CONFIG_PAX_KERNEXEC
49707 if (rom_start)
49708 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
49709+#endif
49710
49711 dbg("int15 entry = %p\n", compaq_int15_entry_point);
49712
49713diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
49714index 56d8486..f26113f 100644
49715--- a/drivers/pci/hotplug/pci_hotplug_core.c
49716+++ b/drivers/pci/hotplug/pci_hotplug_core.c
49717@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
49718 return -EINVAL;
49719 }
49720
49721- slot->ops->owner = owner;
49722- slot->ops->mod_name = mod_name;
49723+ pax_open_kernel();
49724+ *(struct module **)&slot->ops->owner = owner;
49725+ *(const char **)&slot->ops->mod_name = mod_name;
49726+ pax_close_kernel();
49727
49728 mutex_lock(&pci_hp_mutex);
49729 /*
49730diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
49731index 07aa722..84514b4 100644
49732--- a/drivers/pci/hotplug/pciehp_core.c
49733+++ b/drivers/pci/hotplug/pciehp_core.c
49734@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
49735 struct slot *slot = ctrl->slot;
49736 struct hotplug_slot *hotplug = NULL;
49737 struct hotplug_slot_info *info = NULL;
49738- struct hotplug_slot_ops *ops = NULL;
49739+ hotplug_slot_ops_no_const *ops = NULL;
49740 char name[SLOT_NAME_SIZE];
49741 int retval = -ENOMEM;
49742
49743diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
49744index fd60806..ab6c565 100644
49745--- a/drivers/pci/msi.c
49746+++ b/drivers/pci/msi.c
49747@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
49748 {
49749 struct attribute **msi_attrs;
49750 struct attribute *msi_attr;
49751- struct device_attribute *msi_dev_attr;
49752- struct attribute_group *msi_irq_group;
49753+ device_attribute_no_const *msi_dev_attr;
49754+ attribute_group_no_const *msi_irq_group;
49755 const struct attribute_group **msi_irq_groups;
49756 struct msi_desc *entry;
49757 int ret = -ENOMEM;
49758@@ -573,7 +573,7 @@ error_attrs:
49759 count = 0;
49760 msi_attr = msi_attrs[count];
49761 while (msi_attr) {
49762- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
49763+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
49764 kfree(msi_attr->name);
49765 kfree(msi_dev_attr);
49766 ++count;
49767diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
49768index 312f23a..d21181c 100644
49769--- a/drivers/pci/pci-sysfs.c
49770+++ b/drivers/pci/pci-sysfs.c
49771@@ -1140,7 +1140,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
49772 {
49773 /* allocate attribute structure, piggyback attribute name */
49774 int name_len = write_combine ? 13 : 10;
49775- struct bin_attribute *res_attr;
49776+ bin_attribute_no_const *res_attr;
49777 int retval;
49778
49779 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
49780@@ -1317,7 +1317,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
49781 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
49782 {
49783 int retval;
49784- struct bin_attribute *attr;
49785+ bin_attribute_no_const *attr;
49786
49787 /* If the device has VPD, try to expose it in sysfs. */
49788 if (dev->vpd) {
49789@@ -1364,7 +1364,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
49790 {
49791 int retval;
49792 int rom_size = 0;
49793- struct bin_attribute *attr;
49794+ bin_attribute_no_const *attr;
49795
49796 if (!sysfs_initialized)
49797 return -EACCES;
49798diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
49799index d54632a..198c84d 100644
49800--- a/drivers/pci/pci.h
49801+++ b/drivers/pci/pci.h
49802@@ -93,7 +93,7 @@ struct pci_vpd_ops {
49803 struct pci_vpd {
49804 unsigned int len;
49805 const struct pci_vpd_ops *ops;
49806- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
49807+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
49808 };
49809
49810 int pci_vpd_pci22_init(struct pci_dev *dev);
49811diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
49812index e1e7026..d28dd33 100644
49813--- a/drivers/pci/pcie/aspm.c
49814+++ b/drivers/pci/pcie/aspm.c
49815@@ -27,9 +27,9 @@
49816 #define MODULE_PARAM_PREFIX "pcie_aspm."
49817
49818 /* Note: those are not register definitions */
49819-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
49820-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
49821-#define ASPM_STATE_L1 (4) /* L1 state */
49822+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
49823+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
49824+#define ASPM_STATE_L1 (4U) /* L1 state */
49825 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
49826 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
49827
49828diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
49829index 23212f8..65e945b 100644
49830--- a/drivers/pci/probe.c
49831+++ b/drivers/pci/probe.c
49832@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
49833 u16 orig_cmd;
49834 struct pci_bus_region region, inverted_region;
49835
49836- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
49837+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
49838
49839 /* No printks while decoding is disabled! */
49840 if (!dev->mmio_always_on) {
49841diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
49842index 3f155e7..0f4b1f0 100644
49843--- a/drivers/pci/proc.c
49844+++ b/drivers/pci/proc.c
49845@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
49846 static int __init pci_proc_init(void)
49847 {
49848 struct pci_dev *dev = NULL;
49849+
49850+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49851+#ifdef CONFIG_GRKERNSEC_PROC_USER
49852+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
49853+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49854+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
49855+#endif
49856+#else
49857 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
49858+#endif
49859 proc_create("devices", 0, proc_bus_pci_dir,
49860 &proc_bus_pci_dev_operations);
49861 proc_initialized = 1;
49862diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
49863index b84fdd6..b89d829 100644
49864--- a/drivers/platform/chrome/chromeos_laptop.c
49865+++ b/drivers/platform/chrome/chromeos_laptop.c
49866@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
49867 .callback = chromeos_laptop_dmi_matched, \
49868 .driver_data = (void *)&board_
49869
49870-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
49871+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
49872 {
49873 .ident = "Samsung Series 5 550",
49874 .matches = {
49875diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
49876index 1e1e594..8fe59c5 100644
49877--- a/drivers/platform/x86/alienware-wmi.c
49878+++ b/drivers/platform/x86/alienware-wmi.c
49879@@ -150,7 +150,7 @@ struct wmax_led_args {
49880 } __packed;
49881
49882 static struct platform_device *platform_device;
49883-static struct device_attribute *zone_dev_attrs;
49884+static device_attribute_no_const *zone_dev_attrs;
49885 static struct attribute **zone_attrs;
49886 static struct platform_zone *zone_data;
49887
49888@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
49889 }
49890 };
49891
49892-static struct attribute_group zone_attribute_group = {
49893+static attribute_group_no_const zone_attribute_group = {
49894 .name = "rgb_zones",
49895 };
49896
49897diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
49898index 7543a56..367ca8ed 100644
49899--- a/drivers/platform/x86/asus-wmi.c
49900+++ b/drivers/platform/x86/asus-wmi.c
49901@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
49902 int err;
49903 u32 retval = -1;
49904
49905+#ifdef CONFIG_GRKERNSEC_KMEM
49906+ return -EPERM;
49907+#endif
49908+
49909 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
49910
49911 if (err < 0)
49912@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
49913 int err;
49914 u32 retval = -1;
49915
49916+#ifdef CONFIG_GRKERNSEC_KMEM
49917+ return -EPERM;
49918+#endif
49919+
49920 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
49921 &retval);
49922
49923@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
49924 union acpi_object *obj;
49925 acpi_status status;
49926
49927+#ifdef CONFIG_GRKERNSEC_KMEM
49928+ return -EPERM;
49929+#endif
49930+
49931 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
49932 1, asus->debug.method_id,
49933 &input, &output);
49934diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
49935index 0859877..1cf7d08 100644
49936--- a/drivers/platform/x86/msi-laptop.c
49937+++ b/drivers/platform/x86/msi-laptop.c
49938@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
49939
49940 if (!quirks->ec_read_only) {
49941 /* allow userland write sysfs file */
49942- dev_attr_bluetooth.store = store_bluetooth;
49943- dev_attr_wlan.store = store_wlan;
49944- dev_attr_threeg.store = store_threeg;
49945- dev_attr_bluetooth.attr.mode |= S_IWUSR;
49946- dev_attr_wlan.attr.mode |= S_IWUSR;
49947- dev_attr_threeg.attr.mode |= S_IWUSR;
49948+ pax_open_kernel();
49949+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
49950+ *(void **)&dev_attr_wlan.store = store_wlan;
49951+ *(void **)&dev_attr_threeg.store = store_threeg;
49952+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
49953+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
49954+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
49955+ pax_close_kernel();
49956 }
49957
49958 /* disable hardware control by fn key */
49959diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
49960index 6d2bac0..ec2b029 100644
49961--- a/drivers/platform/x86/msi-wmi.c
49962+++ b/drivers/platform/x86/msi-wmi.c
49963@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
49964 static void msi_wmi_notify(u32 value, void *context)
49965 {
49966 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
49967- static struct key_entry *key;
49968+ struct key_entry *key;
49969 union acpi_object *obj;
49970 acpi_status status;
49971
49972diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
49973index 6dd1c0e..5d602c7 100644
49974--- a/drivers/platform/x86/sony-laptop.c
49975+++ b/drivers/platform/x86/sony-laptop.c
49976@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
49977 }
49978
49979 /* High speed charging function */
49980-static struct device_attribute *hsc_handle;
49981+static device_attribute_no_const *hsc_handle;
49982
49983 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
49984 struct device_attribute *attr,
49985@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
49986 }
49987
49988 /* low battery function */
49989-static struct device_attribute *lowbatt_handle;
49990+static device_attribute_no_const *lowbatt_handle;
49991
49992 static ssize_t sony_nc_lowbatt_store(struct device *dev,
49993 struct device_attribute *attr,
49994@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
49995 }
49996
49997 /* fan speed function */
49998-static struct device_attribute *fan_handle, *hsf_handle;
49999+static device_attribute_no_const *fan_handle, *hsf_handle;
50000
50001 static ssize_t sony_nc_hsfan_store(struct device *dev,
50002 struct device_attribute *attr,
50003@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
50004 }
50005
50006 /* USB charge function */
50007-static struct device_attribute *uc_handle;
50008+static device_attribute_no_const *uc_handle;
50009
50010 static ssize_t sony_nc_usb_charge_store(struct device *dev,
50011 struct device_attribute *attr,
50012@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
50013 }
50014
50015 /* Panel ID function */
50016-static struct device_attribute *panel_handle;
50017+static device_attribute_no_const *panel_handle;
50018
50019 static ssize_t sony_nc_panelid_show(struct device *dev,
50020 struct device_attribute *attr, char *buffer)
50021@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
50022 }
50023
50024 /* smart connect function */
50025-static struct device_attribute *sc_handle;
50026+static device_attribute_no_const *sc_handle;
50027
50028 static ssize_t sony_nc_smart_conn_store(struct device *dev,
50029 struct device_attribute *attr,
50030diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
50031index c3d11fa..f83cded 100644
50032--- a/drivers/platform/x86/thinkpad_acpi.c
50033+++ b/drivers/platform/x86/thinkpad_acpi.c
50034@@ -2092,7 +2092,7 @@ static int hotkey_mask_get(void)
50035 return 0;
50036 }
50037
50038-void static hotkey_mask_warn_incomplete_mask(void)
50039+static void hotkey_mask_warn_incomplete_mask(void)
50040 {
50041 /* log only what the user can fix... */
50042 const u32 wantedmask = hotkey_driver_mask &
50043@@ -2436,10 +2436,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
50044 && !tp_features.bright_unkfw)
50045 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
50046 }
50047+}
50048
50049 #undef TPACPI_COMPARE_KEY
50050 #undef TPACPI_MAY_SEND_KEY
50051-}
50052
50053 /*
50054 * Polling driver
50055diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
50056index 438d4c7..ca8a2fb 100644
50057--- a/drivers/pnp/pnpbios/bioscalls.c
50058+++ b/drivers/pnp/pnpbios/bioscalls.c
50059@@ -59,7 +59,7 @@ do { \
50060 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
50061 } while(0)
50062
50063-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
50064+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
50065 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
50066
50067 /*
50068@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50069
50070 cpu = get_cpu();
50071 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
50072+
50073+ pax_open_kernel();
50074 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
50075+ pax_close_kernel();
50076
50077 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
50078 spin_lock_irqsave(&pnp_bios_lock, flags);
50079@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50080 :"memory");
50081 spin_unlock_irqrestore(&pnp_bios_lock, flags);
50082
50083+ pax_open_kernel();
50084 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
50085+ pax_close_kernel();
50086+
50087 put_cpu();
50088
50089 /* If we get here and this is set then the PnP BIOS faulted on us. */
50090@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
50091 return status;
50092 }
50093
50094-void pnpbios_calls_init(union pnp_bios_install_struct *header)
50095+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
50096 {
50097 int i;
50098
50099@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50100 pnp_bios_callpoint.offset = header->fields.pm16offset;
50101 pnp_bios_callpoint.segment = PNP_CS16;
50102
50103+ pax_open_kernel();
50104+
50105 for_each_possible_cpu(i) {
50106 struct desc_struct *gdt = get_cpu_gdt_table(i);
50107 if (!gdt)
50108@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50109 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
50110 (unsigned long)__va(header->fields.pm16dseg));
50111 }
50112+
50113+ pax_close_kernel();
50114 }
50115diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
50116index 0c52e2a..3421ab7 100644
50117--- a/drivers/power/pda_power.c
50118+++ b/drivers/power/pda_power.c
50119@@ -37,7 +37,11 @@ static int polling;
50120
50121 #if IS_ENABLED(CONFIG_USB_PHY)
50122 static struct usb_phy *transceiver;
50123-static struct notifier_block otg_nb;
50124+static int otg_handle_notification(struct notifier_block *nb,
50125+ unsigned long event, void *unused);
50126+static struct notifier_block otg_nb = {
50127+ .notifier_call = otg_handle_notification
50128+};
50129 #endif
50130
50131 static struct regulator *ac_draw;
50132@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
50133
50134 #if IS_ENABLED(CONFIG_USB_PHY)
50135 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
50136- otg_nb.notifier_call = otg_handle_notification;
50137 ret = usb_register_notifier(transceiver, &otg_nb);
50138 if (ret) {
50139 dev_err(dev, "failure to register otg notifier\n");
50140diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
50141index cc439fd..8fa30df 100644
50142--- a/drivers/power/power_supply.h
50143+++ b/drivers/power/power_supply.h
50144@@ -16,12 +16,12 @@ struct power_supply;
50145
50146 #ifdef CONFIG_SYSFS
50147
50148-extern void power_supply_init_attrs(struct device_type *dev_type);
50149+extern void power_supply_init_attrs(void);
50150 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
50151
50152 #else
50153
50154-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
50155+static inline void power_supply_init_attrs(void) {}
50156 #define power_supply_uevent NULL
50157
50158 #endif /* CONFIG_SYSFS */
50159diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
50160index 694e8cd..9f03483 100644
50161--- a/drivers/power/power_supply_core.c
50162+++ b/drivers/power/power_supply_core.c
50163@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
50164 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
50165 EXPORT_SYMBOL_GPL(power_supply_notifier);
50166
50167-static struct device_type power_supply_dev_type;
50168+extern const struct attribute_group *power_supply_attr_groups[];
50169+static struct device_type power_supply_dev_type = {
50170+ .groups = power_supply_attr_groups,
50171+};
50172
50173 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
50174 struct power_supply *supply)
50175@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
50176 return PTR_ERR(power_supply_class);
50177
50178 power_supply_class->dev_uevent = power_supply_uevent;
50179- power_supply_init_attrs(&power_supply_dev_type);
50180+ power_supply_init_attrs();
50181
50182 return 0;
50183 }
50184diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
50185index 62653f5..d0bb485 100644
50186--- a/drivers/power/power_supply_sysfs.c
50187+++ b/drivers/power/power_supply_sysfs.c
50188@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
50189 .is_visible = power_supply_attr_is_visible,
50190 };
50191
50192-static const struct attribute_group *power_supply_attr_groups[] = {
50193+const struct attribute_group *power_supply_attr_groups[] = {
50194 &power_supply_attr_group,
50195 NULL,
50196 };
50197
50198-void power_supply_init_attrs(struct device_type *dev_type)
50199+void power_supply_init_attrs(void)
50200 {
50201 int i;
50202
50203- dev_type->groups = power_supply_attr_groups;
50204-
50205 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
50206 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
50207 }
50208diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
50209index 84419af..268ede8 100644
50210--- a/drivers/powercap/powercap_sys.c
50211+++ b/drivers/powercap/powercap_sys.c
50212@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
50213 struct device_attribute name_attr;
50214 };
50215
50216+static ssize_t show_constraint_name(struct device *dev,
50217+ struct device_attribute *dev_attr,
50218+ char *buf);
50219+
50220 static struct powercap_constraint_attr
50221- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
50222+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
50223+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
50224+ .power_limit_attr = {
50225+ .attr = {
50226+ .name = NULL,
50227+ .mode = S_IWUSR | S_IRUGO
50228+ },
50229+ .show = show_constraint_power_limit_uw,
50230+ .store = store_constraint_power_limit_uw
50231+ },
50232+
50233+ .time_window_attr = {
50234+ .attr = {
50235+ .name = NULL,
50236+ .mode = S_IWUSR | S_IRUGO
50237+ },
50238+ .show = show_constraint_time_window_us,
50239+ .store = store_constraint_time_window_us
50240+ },
50241+
50242+ .max_power_attr = {
50243+ .attr = {
50244+ .name = NULL,
50245+ .mode = S_IRUGO
50246+ },
50247+ .show = show_constraint_max_power_uw,
50248+ .store = NULL
50249+ },
50250+
50251+ .min_power_attr = {
50252+ .attr = {
50253+ .name = NULL,
50254+ .mode = S_IRUGO
50255+ },
50256+ .show = show_constraint_min_power_uw,
50257+ .store = NULL
50258+ },
50259+
50260+ .max_time_window_attr = {
50261+ .attr = {
50262+ .name = NULL,
50263+ .mode = S_IRUGO
50264+ },
50265+ .show = show_constraint_max_time_window_us,
50266+ .store = NULL
50267+ },
50268+
50269+ .min_time_window_attr = {
50270+ .attr = {
50271+ .name = NULL,
50272+ .mode = S_IRUGO
50273+ },
50274+ .show = show_constraint_min_time_window_us,
50275+ .store = NULL
50276+ },
50277+
50278+ .name_attr = {
50279+ .attr = {
50280+ .name = NULL,
50281+ .mode = S_IRUGO
50282+ },
50283+ .show = show_constraint_name,
50284+ .store = NULL
50285+ }
50286+ }
50287+};
50288
50289 /* A list of powercap control_types */
50290 static LIST_HEAD(powercap_cntrl_list);
50291@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
50292 }
50293
50294 static int create_constraint_attribute(int id, const char *name,
50295- int mode,
50296- struct device_attribute *dev_attr,
50297- ssize_t (*show)(struct device *,
50298- struct device_attribute *, char *),
50299- ssize_t (*store)(struct device *,
50300- struct device_attribute *,
50301- const char *, size_t)
50302- )
50303+ struct device_attribute *dev_attr)
50304 {
50305+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
50306
50307- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
50308- id, name);
50309- if (!dev_attr->attr.name)
50310+ if (!name)
50311 return -ENOMEM;
50312- dev_attr->attr.mode = mode;
50313- dev_attr->show = show;
50314- dev_attr->store = store;
50315+
50316+ pax_open_kernel();
50317+ *(const char **)&dev_attr->attr.name = name;
50318+ pax_close_kernel();
50319
50320 return 0;
50321 }
50322@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
50323
50324 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
50325 ret = create_constraint_attribute(i, "power_limit_uw",
50326- S_IWUSR | S_IRUGO,
50327- &constraint_attrs[i].power_limit_attr,
50328- show_constraint_power_limit_uw,
50329- store_constraint_power_limit_uw);
50330+ &constraint_attrs[i].power_limit_attr);
50331 if (ret)
50332 goto err_alloc;
50333 ret = create_constraint_attribute(i, "time_window_us",
50334- S_IWUSR | S_IRUGO,
50335- &constraint_attrs[i].time_window_attr,
50336- show_constraint_time_window_us,
50337- store_constraint_time_window_us);
50338+ &constraint_attrs[i].time_window_attr);
50339 if (ret)
50340 goto err_alloc;
50341- ret = create_constraint_attribute(i, "name", S_IRUGO,
50342- &constraint_attrs[i].name_attr,
50343- show_constraint_name,
50344- NULL);
50345+ ret = create_constraint_attribute(i, "name",
50346+ &constraint_attrs[i].name_attr);
50347 if (ret)
50348 goto err_alloc;
50349- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
50350- &constraint_attrs[i].max_power_attr,
50351- show_constraint_max_power_uw,
50352- NULL);
50353+ ret = create_constraint_attribute(i, "max_power_uw",
50354+ &constraint_attrs[i].max_power_attr);
50355 if (ret)
50356 goto err_alloc;
50357- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
50358- &constraint_attrs[i].min_power_attr,
50359- show_constraint_min_power_uw,
50360- NULL);
50361+ ret = create_constraint_attribute(i, "min_power_uw",
50362+ &constraint_attrs[i].min_power_attr);
50363 if (ret)
50364 goto err_alloc;
50365 ret = create_constraint_attribute(i, "max_time_window_us",
50366- S_IRUGO,
50367- &constraint_attrs[i].max_time_window_attr,
50368- show_constraint_max_time_window_us,
50369- NULL);
50370+ &constraint_attrs[i].max_time_window_attr);
50371 if (ret)
50372 goto err_alloc;
50373 ret = create_constraint_attribute(i, "min_time_window_us",
50374- S_IRUGO,
50375- &constraint_attrs[i].min_time_window_attr,
50376- show_constraint_min_time_window_us,
50377- NULL);
50378+ &constraint_attrs[i].min_time_window_attr);
50379 if (ret)
50380 goto err_alloc;
50381
50382@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
50383 power_zone->zone_dev_attrs[count++] =
50384 &dev_attr_max_energy_range_uj.attr;
50385 if (power_zone->ops->get_energy_uj) {
50386+ pax_open_kernel();
50387 if (power_zone->ops->reset_energy_uj)
50388- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50389+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50390 else
50391- dev_attr_energy_uj.attr.mode = S_IRUGO;
50392+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
50393+ pax_close_kernel();
50394 power_zone->zone_dev_attrs[count++] =
50395 &dev_attr_energy_uj.attr;
50396 }
50397diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
50398index 9c5d414..c7900ce 100644
50399--- a/drivers/ptp/ptp_private.h
50400+++ b/drivers/ptp/ptp_private.h
50401@@ -51,7 +51,7 @@ struct ptp_clock {
50402 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
50403 wait_queue_head_t tsev_wq;
50404 int defunct; /* tells readers to go away when clock is being removed */
50405- struct device_attribute *pin_dev_attr;
50406+ device_attribute_no_const *pin_dev_attr;
50407 struct attribute **pin_attr;
50408 struct attribute_group pin_attr_group;
50409 };
50410diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
50411index 302e626..12579af 100644
50412--- a/drivers/ptp/ptp_sysfs.c
50413+++ b/drivers/ptp/ptp_sysfs.c
50414@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
50415 goto no_pin_attr;
50416
50417 for (i = 0; i < n_pins; i++) {
50418- struct device_attribute *da = &ptp->pin_dev_attr[i];
50419+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
50420 sysfs_attr_init(&da->attr);
50421 da->attr.name = info->pin_config[i].name;
50422 da->attr.mode = 0644;
50423diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
50424index a5761d0..a2a4540 100644
50425--- a/drivers/regulator/core.c
50426+++ b/drivers/regulator/core.c
50427@@ -3591,7 +3591,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50428 {
50429 const struct regulation_constraints *constraints = NULL;
50430 const struct regulator_init_data *init_data;
50431- static atomic_t regulator_no = ATOMIC_INIT(0);
50432+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
50433 struct regulator_dev *rdev;
50434 struct device *dev;
50435 int ret, i;
50436@@ -3665,7 +3665,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50437 rdev->dev.class = &regulator_class;
50438 rdev->dev.parent = dev;
50439 dev_set_name(&rdev->dev, "regulator.%d",
50440- atomic_inc_return(&regulator_no) - 1);
50441+ atomic_inc_return_unchecked(&regulator_no) - 1);
50442 ret = device_register(&rdev->dev);
50443 if (ret != 0) {
50444 put_device(&rdev->dev);
50445diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
50446index 7eee2ca..4024513 100644
50447--- a/drivers/regulator/max8660.c
50448+++ b/drivers/regulator/max8660.c
50449@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
50450 max8660->shadow_regs[MAX8660_OVER1] = 5;
50451 } else {
50452 /* Otherwise devices can be toggled via software */
50453- max8660_dcdc_ops.enable = max8660_dcdc_enable;
50454- max8660_dcdc_ops.disable = max8660_dcdc_disable;
50455+ pax_open_kernel();
50456+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
50457+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
50458+ pax_close_kernel();
50459 }
50460
50461 /*
50462diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
50463index c3d55c2..0dddfe6 100644
50464--- a/drivers/regulator/max8973-regulator.c
50465+++ b/drivers/regulator/max8973-regulator.c
50466@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
50467 if (!pdata || !pdata->enable_ext_control) {
50468 max->desc.enable_reg = MAX8973_VOUT;
50469 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
50470- max->ops.enable = regulator_enable_regmap;
50471- max->ops.disable = regulator_disable_regmap;
50472- max->ops.is_enabled = regulator_is_enabled_regmap;
50473+ pax_open_kernel();
50474+ *(void **)&max->ops.enable = regulator_enable_regmap;
50475+ *(void **)&max->ops.disable = regulator_disable_regmap;
50476+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
50477+ pax_close_kernel();
50478 }
50479
50480 if (pdata) {
50481diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
50482index 0d17c92..a29f627 100644
50483--- a/drivers/regulator/mc13892-regulator.c
50484+++ b/drivers/regulator/mc13892-regulator.c
50485@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
50486 mc13xxx_unlock(mc13892);
50487
50488 /* update mc13892_vcam ops */
50489- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50490+ pax_open_kernel();
50491+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50492 sizeof(struct regulator_ops));
50493- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50494- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50495+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50496+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50497+ pax_close_kernel();
50498 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
50499
50500 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
50501diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
50502index 5b2e761..c8c8a4a 100644
50503--- a/drivers/rtc/rtc-cmos.c
50504+++ b/drivers/rtc/rtc-cmos.c
50505@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
50506 hpet_rtc_timer_init();
50507
50508 /* export at least the first block of NVRAM */
50509- nvram.size = address_space - NVRAM_OFFSET;
50510+ pax_open_kernel();
50511+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
50512+ pax_close_kernel();
50513 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
50514 if (retval < 0) {
50515 dev_dbg(dev, "can't create nvram file? %d\n", retval);
50516diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
50517index d049393..bb20be0 100644
50518--- a/drivers/rtc/rtc-dev.c
50519+++ b/drivers/rtc/rtc-dev.c
50520@@ -16,6 +16,7 @@
50521 #include <linux/module.h>
50522 #include <linux/rtc.h>
50523 #include <linux/sched.h>
50524+#include <linux/grsecurity.h>
50525 #include "rtc-core.h"
50526
50527 static dev_t rtc_devt;
50528@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
50529 if (copy_from_user(&tm, uarg, sizeof(tm)))
50530 return -EFAULT;
50531
50532+ gr_log_timechange();
50533+
50534 return rtc_set_time(rtc, &tm);
50535
50536 case RTC_PIE_ON:
50537diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
50538index 4ffabb3..1f87fca 100644
50539--- a/drivers/rtc/rtc-ds1307.c
50540+++ b/drivers/rtc/rtc-ds1307.c
50541@@ -107,7 +107,7 @@ struct ds1307 {
50542 u8 offset; /* register's offset */
50543 u8 regs[11];
50544 u16 nvram_offset;
50545- struct bin_attribute *nvram;
50546+ bin_attribute_no_const *nvram;
50547 enum ds_type type;
50548 unsigned long flags;
50549 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
50550diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
50551index 90abb5b..e0bf6dd 100644
50552--- a/drivers/rtc/rtc-m48t59.c
50553+++ b/drivers/rtc/rtc-m48t59.c
50554@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
50555 if (IS_ERR(m48t59->rtc))
50556 return PTR_ERR(m48t59->rtc);
50557
50558- m48t59_nvram_attr.size = pdata->offset;
50559+ pax_open_kernel();
50560+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
50561+ pax_close_kernel();
50562
50563 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
50564 if (ret)
50565diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
50566index e693af6..2e525b6 100644
50567--- a/drivers/scsi/bfa/bfa_fcpim.h
50568+++ b/drivers/scsi/bfa/bfa_fcpim.h
50569@@ -36,7 +36,7 @@ struct bfa_iotag_s {
50570
50571 struct bfa_itn_s {
50572 bfa_isr_func_t isr;
50573-};
50574+} __no_const;
50575
50576 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
50577 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
50578diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
50579index 0f19455..ef7adb5 100644
50580--- a/drivers/scsi/bfa/bfa_fcs.c
50581+++ b/drivers/scsi/bfa/bfa_fcs.c
50582@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
50583 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
50584
50585 static struct bfa_fcs_mod_s fcs_modules[] = {
50586- { bfa_fcs_port_attach, NULL, NULL },
50587- { bfa_fcs_uf_attach, NULL, NULL },
50588- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
50589- bfa_fcs_fabric_modexit },
50590+ {
50591+ .attach = bfa_fcs_port_attach,
50592+ .modinit = NULL,
50593+ .modexit = NULL
50594+ },
50595+ {
50596+ .attach = bfa_fcs_uf_attach,
50597+ .modinit = NULL,
50598+ .modexit = NULL
50599+ },
50600+ {
50601+ .attach = bfa_fcs_fabric_attach,
50602+ .modinit = bfa_fcs_fabric_modinit,
50603+ .modexit = bfa_fcs_fabric_modexit
50604+ },
50605 };
50606
50607 /*
50608diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
50609index ff75ef8..2dfe00a 100644
50610--- a/drivers/scsi/bfa/bfa_fcs_lport.c
50611+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
50612@@ -89,15 +89,26 @@ static struct {
50613 void (*offline) (struct bfa_fcs_lport_s *port);
50614 } __port_action[] = {
50615 {
50616- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
50617- bfa_fcs_lport_unknown_offline}, {
50618- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
50619- bfa_fcs_lport_fab_offline}, {
50620- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
50621- bfa_fcs_lport_n2n_offline}, {
50622- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
50623- bfa_fcs_lport_loop_offline},
50624- };
50625+ .init = bfa_fcs_lport_unknown_init,
50626+ .online = bfa_fcs_lport_unknown_online,
50627+ .offline = bfa_fcs_lport_unknown_offline
50628+ },
50629+ {
50630+ .init = bfa_fcs_lport_fab_init,
50631+ .online = bfa_fcs_lport_fab_online,
50632+ .offline = bfa_fcs_lport_fab_offline
50633+ },
50634+ {
50635+ .init = bfa_fcs_lport_n2n_init,
50636+ .online = bfa_fcs_lport_n2n_online,
50637+ .offline = bfa_fcs_lport_n2n_offline
50638+ },
50639+ {
50640+ .init = bfa_fcs_lport_loop_init,
50641+ .online = bfa_fcs_lport_loop_online,
50642+ .offline = bfa_fcs_lport_loop_offline
50643+ },
50644+};
50645
50646 /*
50647 * fcs_port_sm FCS logical port state machine
50648diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
50649index a38aafa0..fe8f03b 100644
50650--- a/drivers/scsi/bfa/bfa_ioc.h
50651+++ b/drivers/scsi/bfa/bfa_ioc.h
50652@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
50653 bfa_ioc_disable_cbfn_t disable_cbfn;
50654 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
50655 bfa_ioc_reset_cbfn_t reset_cbfn;
50656-};
50657+} __no_const;
50658
50659 /*
50660 * IOC event notification mechanism.
50661@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
50662 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
50663 enum bfi_ioc_state fwstate);
50664 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
50665-};
50666+} __no_const;
50667
50668 /*
50669 * Queue element to wait for room in request queue. FIFO order is
50670diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
50671index a14c784..6de6790 100644
50672--- a/drivers/scsi/bfa/bfa_modules.h
50673+++ b/drivers/scsi/bfa/bfa_modules.h
50674@@ -78,12 +78,12 @@ enum {
50675 \
50676 extern struct bfa_module_s hal_mod_ ## __mod; \
50677 struct bfa_module_s hal_mod_ ## __mod = { \
50678- bfa_ ## __mod ## _meminfo, \
50679- bfa_ ## __mod ## _attach, \
50680- bfa_ ## __mod ## _detach, \
50681- bfa_ ## __mod ## _start, \
50682- bfa_ ## __mod ## _stop, \
50683- bfa_ ## __mod ## _iocdisable, \
50684+ .meminfo = bfa_ ## __mod ## _meminfo, \
50685+ .attach = bfa_ ## __mod ## _attach, \
50686+ .detach = bfa_ ## __mod ## _detach, \
50687+ .start = bfa_ ## __mod ## _start, \
50688+ .stop = bfa_ ## __mod ## _stop, \
50689+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
50690 }
50691
50692 #define BFA_CACHELINE_SZ (256)
50693diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
50694index 045c4e1..13de803 100644
50695--- a/drivers/scsi/fcoe/fcoe_sysfs.c
50696+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
50697@@ -33,8 +33,8 @@
50698 */
50699 #include "libfcoe.h"
50700
50701-static atomic_t ctlr_num;
50702-static atomic_t fcf_num;
50703+static atomic_unchecked_t ctlr_num;
50704+static atomic_unchecked_t fcf_num;
50705
50706 /*
50707 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
50708@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
50709 if (!ctlr)
50710 goto out;
50711
50712- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
50713+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
50714 ctlr->f = f;
50715 ctlr->mode = FIP_CONN_TYPE_FABRIC;
50716 INIT_LIST_HEAD(&ctlr->fcfs);
50717@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
50718 fcf->dev.parent = &ctlr->dev;
50719 fcf->dev.bus = &fcoe_bus_type;
50720 fcf->dev.type = &fcoe_fcf_device_type;
50721- fcf->id = atomic_inc_return(&fcf_num) - 1;
50722+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
50723 fcf->state = FCOE_FCF_STATE_UNKNOWN;
50724
50725 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
50726@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
50727 {
50728 int error;
50729
50730- atomic_set(&ctlr_num, 0);
50731- atomic_set(&fcf_num, 0);
50732+ atomic_set_unchecked(&ctlr_num, 0);
50733+ atomic_set_unchecked(&fcf_num, 0);
50734
50735 error = bus_register(&fcoe_bus_type);
50736 if (error)
50737diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
50738index 8bb173e..20236b4 100644
50739--- a/drivers/scsi/hosts.c
50740+++ b/drivers/scsi/hosts.c
50741@@ -42,7 +42,7 @@
50742 #include "scsi_logging.h"
50743
50744
50745-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50746+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50747
50748
50749 static void scsi_host_cls_release(struct device *dev)
50750@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
50751 * subtract one because we increment first then return, but we need to
50752 * know what the next host number was before increment
50753 */
50754- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
50755+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
50756 shost->dma_channel = 0xff;
50757
50758 /* These three are default values which can be overridden */
50759diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
50760index 6bb4611..0203251 100644
50761--- a/drivers/scsi/hpsa.c
50762+++ b/drivers/scsi/hpsa.c
50763@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
50764 struct reply_queue_buffer *rq = &h->reply_queue[q];
50765
50766 if (h->transMethod & CFGTBL_Trans_io_accel1)
50767- return h->access.command_completed(h, q);
50768+ return h->access->command_completed(h, q);
50769
50770 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
50771- return h->access.command_completed(h, q);
50772+ return h->access->command_completed(h, q);
50773
50774 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
50775 a = rq->head[rq->current_entry];
50776@@ -5360,7 +5360,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50777 while (!list_empty(&h->reqQ)) {
50778 c = list_entry(h->reqQ.next, struct CommandList, list);
50779 /* can't do anything if fifo is full */
50780- if ((h->access.fifo_full(h))) {
50781+ if ((h->access->fifo_full(h))) {
50782 h->fifo_recently_full = 1;
50783 dev_warn(&h->pdev->dev, "fifo full\n");
50784 break;
50785@@ -5376,7 +5376,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50786 atomic_inc(&h->commands_outstanding);
50787 spin_unlock_irqrestore(&h->lock, *flags);
50788 /* Tell the controller execute command */
50789- h->access.submit_command(h, c);
50790+ h->access->submit_command(h, c);
50791 spin_lock_irqsave(&h->lock, *flags);
50792 }
50793 }
50794@@ -5392,17 +5392,17 @@ static void lock_and_start_io(struct ctlr_info *h)
50795
50796 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
50797 {
50798- return h->access.command_completed(h, q);
50799+ return h->access->command_completed(h, q);
50800 }
50801
50802 static inline bool interrupt_pending(struct ctlr_info *h)
50803 {
50804- return h->access.intr_pending(h);
50805+ return h->access->intr_pending(h);
50806 }
50807
50808 static inline long interrupt_not_for_us(struct ctlr_info *h)
50809 {
50810- return (h->access.intr_pending(h) == 0) ||
50811+ return (h->access->intr_pending(h) == 0) ||
50812 (h->interrupts_enabled == 0);
50813 }
50814
50815@@ -6343,7 +6343,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
50816 if (prod_index < 0)
50817 return -ENODEV;
50818 h->product_name = products[prod_index].product_name;
50819- h->access = *(products[prod_index].access);
50820+ h->access = products[prod_index].access;
50821
50822 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
50823 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
50824@@ -6690,7 +6690,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
50825 unsigned long flags;
50826 u32 lockup_detected;
50827
50828- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50829+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50830 spin_lock_irqsave(&h->lock, flags);
50831 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
50832 if (!lockup_detected) {
50833@@ -6937,7 +6937,7 @@ reinit_after_soft_reset:
50834 }
50835
50836 /* make sure the board interrupts are off */
50837- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50838+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50839
50840 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
50841 goto clean2;
50842@@ -6972,7 +6972,7 @@ reinit_after_soft_reset:
50843 * fake ones to scoop up any residual completions.
50844 */
50845 spin_lock_irqsave(&h->lock, flags);
50846- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50847+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50848 spin_unlock_irqrestore(&h->lock, flags);
50849 free_irqs(h);
50850 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
50851@@ -6991,9 +6991,9 @@ reinit_after_soft_reset:
50852 dev_info(&h->pdev->dev, "Board READY.\n");
50853 dev_info(&h->pdev->dev,
50854 "Waiting for stale completions to drain.\n");
50855- h->access.set_intr_mask(h, HPSA_INTR_ON);
50856+ h->access->set_intr_mask(h, HPSA_INTR_ON);
50857 msleep(10000);
50858- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50859+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50860
50861 rc = controller_reset_failed(h->cfgtable);
50862 if (rc)
50863@@ -7019,7 +7019,7 @@ reinit_after_soft_reset:
50864 h->drv_req_rescan = 0;
50865
50866 /* Turn the interrupts on so we can service requests */
50867- h->access.set_intr_mask(h, HPSA_INTR_ON);
50868+ h->access->set_intr_mask(h, HPSA_INTR_ON);
50869
50870 hpsa_hba_inquiry(h);
50871 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
50872@@ -7084,7 +7084,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
50873 * To write all data in the battery backed cache to disks
50874 */
50875 hpsa_flush_cache(h);
50876- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50877+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50878 hpsa_free_irqs_and_disable_msix(h);
50879 }
50880
50881@@ -7202,7 +7202,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50882 CFGTBL_Trans_enable_directed_msix |
50883 (trans_support & (CFGTBL_Trans_io_accel1 |
50884 CFGTBL_Trans_io_accel2));
50885- struct access_method access = SA5_performant_access;
50886+ struct access_method *access = &SA5_performant_access;
50887
50888 /* This is a bit complicated. There are 8 registers on
50889 * the controller which we write to to tell it 8 different
50890@@ -7244,7 +7244,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50891 * perform the superfluous readl() after each command submission.
50892 */
50893 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
50894- access = SA5_performant_access_no_read;
50895+ access = &SA5_performant_access_no_read;
50896
50897 /* Controller spec: zero out this buffer. */
50898 for (i = 0; i < h->nreply_queues; i++)
50899@@ -7274,12 +7274,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
50900 * enable outbound interrupt coalescing in accelerator mode;
50901 */
50902 if (trans_support & CFGTBL_Trans_io_accel1) {
50903- access = SA5_ioaccel_mode1_access;
50904+ access = &SA5_ioaccel_mode1_access;
50905 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
50906 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
50907 } else {
50908 if (trans_support & CFGTBL_Trans_io_accel2) {
50909- access = SA5_ioaccel_mode2_access;
50910+ access = &SA5_ioaccel_mode2_access;
50911 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
50912 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
50913 }
50914diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
50915index 8e06d9e..396e0a1 100644
50916--- a/drivers/scsi/hpsa.h
50917+++ b/drivers/scsi/hpsa.h
50918@@ -127,7 +127,7 @@ struct ctlr_info {
50919 unsigned int msix_vector;
50920 unsigned int msi_vector;
50921 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
50922- struct access_method access;
50923+ struct access_method *access;
50924 char hba_mode_enabled;
50925
50926 /* queue and queue Info */
50927@@ -523,43 +523,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
50928 }
50929
50930 static struct access_method SA5_access = {
50931- SA5_submit_command,
50932- SA5_intr_mask,
50933- SA5_fifo_full,
50934- SA5_intr_pending,
50935- SA5_completed,
50936+ .submit_command = SA5_submit_command,
50937+ .set_intr_mask = SA5_intr_mask,
50938+ .fifo_full = SA5_fifo_full,
50939+ .intr_pending = SA5_intr_pending,
50940+ .command_completed = SA5_completed,
50941 };
50942
50943 static struct access_method SA5_ioaccel_mode1_access = {
50944- SA5_submit_command,
50945- SA5_performant_intr_mask,
50946- SA5_fifo_full,
50947- SA5_ioaccel_mode1_intr_pending,
50948- SA5_ioaccel_mode1_completed,
50949+ .submit_command = SA5_submit_command,
50950+ .set_intr_mask = SA5_performant_intr_mask,
50951+ .fifo_full = SA5_fifo_full,
50952+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
50953+ .command_completed = SA5_ioaccel_mode1_completed,
50954 };
50955
50956 static struct access_method SA5_ioaccel_mode2_access = {
50957- SA5_submit_command_ioaccel2,
50958- SA5_performant_intr_mask,
50959- SA5_fifo_full,
50960- SA5_performant_intr_pending,
50961- SA5_performant_completed,
50962+ .submit_command = SA5_submit_command_ioaccel2,
50963+ .set_intr_mask = SA5_performant_intr_mask,
50964+ .fifo_full = SA5_fifo_full,
50965+ .intr_pending = SA5_performant_intr_pending,
50966+ .command_completed = SA5_performant_completed,
50967 };
50968
50969 static struct access_method SA5_performant_access = {
50970- SA5_submit_command,
50971- SA5_performant_intr_mask,
50972- SA5_fifo_full,
50973- SA5_performant_intr_pending,
50974- SA5_performant_completed,
50975+ .submit_command = SA5_submit_command,
50976+ .set_intr_mask = SA5_performant_intr_mask,
50977+ .fifo_full = SA5_fifo_full,
50978+ .intr_pending = SA5_performant_intr_pending,
50979+ .command_completed = SA5_performant_completed,
50980 };
50981
50982 static struct access_method SA5_performant_access_no_read = {
50983- SA5_submit_command_no_read,
50984- SA5_performant_intr_mask,
50985- SA5_fifo_full,
50986- SA5_performant_intr_pending,
50987- SA5_performant_completed,
50988+ .submit_command = SA5_submit_command_no_read,
50989+ .set_intr_mask = SA5_performant_intr_mask,
50990+ .fifo_full = SA5_fifo_full,
50991+ .intr_pending = SA5_performant_intr_pending,
50992+ .command_completed = SA5_performant_completed,
50993 };
50994
50995 struct board_type {
50996diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
50997index 1b3a094..068e683 100644
50998--- a/drivers/scsi/libfc/fc_exch.c
50999+++ b/drivers/scsi/libfc/fc_exch.c
51000@@ -101,12 +101,12 @@ struct fc_exch_mgr {
51001 u16 pool_max_index;
51002
51003 struct {
51004- atomic_t no_free_exch;
51005- atomic_t no_free_exch_xid;
51006- atomic_t xid_not_found;
51007- atomic_t xid_busy;
51008- atomic_t seq_not_found;
51009- atomic_t non_bls_resp;
51010+ atomic_unchecked_t no_free_exch;
51011+ atomic_unchecked_t no_free_exch_xid;
51012+ atomic_unchecked_t xid_not_found;
51013+ atomic_unchecked_t xid_busy;
51014+ atomic_unchecked_t seq_not_found;
51015+ atomic_unchecked_t non_bls_resp;
51016 } stats;
51017 };
51018
51019@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
51020 /* allocate memory for exchange */
51021 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
51022 if (!ep) {
51023- atomic_inc(&mp->stats.no_free_exch);
51024+ atomic_inc_unchecked(&mp->stats.no_free_exch);
51025 goto out;
51026 }
51027 memset(ep, 0, sizeof(*ep));
51028@@ -874,7 +874,7 @@ out:
51029 return ep;
51030 err:
51031 spin_unlock_bh(&pool->lock);
51032- atomic_inc(&mp->stats.no_free_exch_xid);
51033+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
51034 mempool_free(ep, mp->ep_pool);
51035 return NULL;
51036 }
51037@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51038 xid = ntohs(fh->fh_ox_id); /* we originated exch */
51039 ep = fc_exch_find(mp, xid);
51040 if (!ep) {
51041- atomic_inc(&mp->stats.xid_not_found);
51042+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51043 reject = FC_RJT_OX_ID;
51044 goto out;
51045 }
51046@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51047 ep = fc_exch_find(mp, xid);
51048 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
51049 if (ep) {
51050- atomic_inc(&mp->stats.xid_busy);
51051+ atomic_inc_unchecked(&mp->stats.xid_busy);
51052 reject = FC_RJT_RX_ID;
51053 goto rel;
51054 }
51055@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51056 }
51057 xid = ep->xid; /* get our XID */
51058 } else if (!ep) {
51059- atomic_inc(&mp->stats.xid_not_found);
51060+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51061 reject = FC_RJT_RX_ID; /* XID not found */
51062 goto out;
51063 }
51064@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51065 } else {
51066 sp = &ep->seq;
51067 if (sp->id != fh->fh_seq_id) {
51068- atomic_inc(&mp->stats.seq_not_found);
51069+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51070 if (f_ctl & FC_FC_END_SEQ) {
51071 /*
51072 * Update sequence_id based on incoming last
51073@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51074
51075 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
51076 if (!ep) {
51077- atomic_inc(&mp->stats.xid_not_found);
51078+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51079 goto out;
51080 }
51081 if (ep->esb_stat & ESB_ST_COMPLETE) {
51082- atomic_inc(&mp->stats.xid_not_found);
51083+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51084 goto rel;
51085 }
51086 if (ep->rxid == FC_XID_UNKNOWN)
51087 ep->rxid = ntohs(fh->fh_rx_id);
51088 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
51089- atomic_inc(&mp->stats.xid_not_found);
51090+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51091 goto rel;
51092 }
51093 if (ep->did != ntoh24(fh->fh_s_id) &&
51094 ep->did != FC_FID_FLOGI) {
51095- atomic_inc(&mp->stats.xid_not_found);
51096+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51097 goto rel;
51098 }
51099 sof = fr_sof(fp);
51100@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51101 sp->ssb_stat |= SSB_ST_RESP;
51102 sp->id = fh->fh_seq_id;
51103 } else if (sp->id != fh->fh_seq_id) {
51104- atomic_inc(&mp->stats.seq_not_found);
51105+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51106 goto rel;
51107 }
51108
51109@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51110 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
51111
51112 if (!sp)
51113- atomic_inc(&mp->stats.xid_not_found);
51114+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51115 else
51116- atomic_inc(&mp->stats.non_bls_resp);
51117+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
51118
51119 fc_frame_free(fp);
51120 }
51121@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
51122
51123 list_for_each_entry(ema, &lport->ema_list, ema_list) {
51124 mp = ema->mp;
51125- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
51126+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
51127 st->fc_no_free_exch_xid +=
51128- atomic_read(&mp->stats.no_free_exch_xid);
51129- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
51130- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
51131- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
51132- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
51133+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
51134+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
51135+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
51136+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
51137+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
51138 }
51139 }
51140 EXPORT_SYMBOL(fc_exch_update_stats);
51141diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
51142index 932d9cc..50c7ee9 100644
51143--- a/drivers/scsi/libsas/sas_ata.c
51144+++ b/drivers/scsi/libsas/sas_ata.c
51145@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
51146 .postreset = ata_std_postreset,
51147 .error_handler = ata_std_error_handler,
51148 .post_internal_cmd = sas_ata_post_internal,
51149- .qc_defer = ata_std_qc_defer,
51150+ .qc_defer = ata_std_qc_defer,
51151 .qc_prep = ata_noop_qc_prep,
51152 .qc_issue = sas_ata_qc_issue,
51153 .qc_fill_rtf = sas_ata_qc_fill_rtf,
51154diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
51155index 434e903..5a4a79b 100644
51156--- a/drivers/scsi/lpfc/lpfc.h
51157+++ b/drivers/scsi/lpfc/lpfc.h
51158@@ -430,7 +430,7 @@ struct lpfc_vport {
51159 struct dentry *debug_nodelist;
51160 struct dentry *vport_debugfs_root;
51161 struct lpfc_debugfs_trc *disc_trc;
51162- atomic_t disc_trc_cnt;
51163+ atomic_unchecked_t disc_trc_cnt;
51164 #endif
51165 uint8_t stat_data_enabled;
51166 uint8_t stat_data_blocked;
51167@@ -880,8 +880,8 @@ struct lpfc_hba {
51168 struct timer_list fabric_block_timer;
51169 unsigned long bit_flags;
51170 #define FABRIC_COMANDS_BLOCKED 0
51171- atomic_t num_rsrc_err;
51172- atomic_t num_cmd_success;
51173+ atomic_unchecked_t num_rsrc_err;
51174+ atomic_unchecked_t num_cmd_success;
51175 unsigned long last_rsrc_error_time;
51176 unsigned long last_ramp_down_time;
51177 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
51178@@ -916,7 +916,7 @@ struct lpfc_hba {
51179
51180 struct dentry *debug_slow_ring_trc;
51181 struct lpfc_debugfs_trc *slow_ring_trc;
51182- atomic_t slow_ring_trc_cnt;
51183+ atomic_unchecked_t slow_ring_trc_cnt;
51184 /* iDiag debugfs sub-directory */
51185 struct dentry *idiag_root;
51186 struct dentry *idiag_pci_cfg;
51187diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
51188index 5633e7d..8272114 100644
51189--- a/drivers/scsi/lpfc/lpfc_debugfs.c
51190+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
51191@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
51192
51193 #include <linux/debugfs.h>
51194
51195-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51196+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51197 static unsigned long lpfc_debugfs_start_time = 0L;
51198
51199 /* iDiag */
51200@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
51201 lpfc_debugfs_enable = 0;
51202
51203 len = 0;
51204- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
51205+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
51206 (lpfc_debugfs_max_disc_trc - 1);
51207 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
51208 dtp = vport->disc_trc + i;
51209@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
51210 lpfc_debugfs_enable = 0;
51211
51212 len = 0;
51213- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
51214+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
51215 (lpfc_debugfs_max_slow_ring_trc - 1);
51216 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
51217 dtp = phba->slow_ring_trc + i;
51218@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
51219 !vport || !vport->disc_trc)
51220 return;
51221
51222- index = atomic_inc_return(&vport->disc_trc_cnt) &
51223+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
51224 (lpfc_debugfs_max_disc_trc - 1);
51225 dtp = vport->disc_trc + index;
51226 dtp->fmt = fmt;
51227 dtp->data1 = data1;
51228 dtp->data2 = data2;
51229 dtp->data3 = data3;
51230- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51231+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51232 dtp->jif = jiffies;
51233 #endif
51234 return;
51235@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
51236 !phba || !phba->slow_ring_trc)
51237 return;
51238
51239- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
51240+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
51241 (lpfc_debugfs_max_slow_ring_trc - 1);
51242 dtp = phba->slow_ring_trc + index;
51243 dtp->fmt = fmt;
51244 dtp->data1 = data1;
51245 dtp->data2 = data2;
51246 dtp->data3 = data3;
51247- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51248+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51249 dtp->jif = jiffies;
51250 #endif
51251 return;
51252@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51253 "slow_ring buffer\n");
51254 goto debug_failed;
51255 }
51256- atomic_set(&phba->slow_ring_trc_cnt, 0);
51257+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
51258 memset(phba->slow_ring_trc, 0,
51259 (sizeof(struct lpfc_debugfs_trc) *
51260 lpfc_debugfs_max_slow_ring_trc));
51261@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51262 "buffer\n");
51263 goto debug_failed;
51264 }
51265- atomic_set(&vport->disc_trc_cnt, 0);
51266+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
51267
51268 snprintf(name, sizeof(name), "discovery_trace");
51269 vport->debug_disc_trc =
51270diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
51271index 0b2c53a..aec2b45 100644
51272--- a/drivers/scsi/lpfc/lpfc_init.c
51273+++ b/drivers/scsi/lpfc/lpfc_init.c
51274@@ -11290,8 +11290,10 @@ lpfc_init(void)
51275 "misc_register returned with status %d", error);
51276
51277 if (lpfc_enable_npiv) {
51278- lpfc_transport_functions.vport_create = lpfc_vport_create;
51279- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51280+ pax_open_kernel();
51281+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
51282+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51283+ pax_close_kernel();
51284 }
51285 lpfc_transport_template =
51286 fc_attach_transport(&lpfc_transport_functions);
51287diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
51288index 4f9222e..f1850e3 100644
51289--- a/drivers/scsi/lpfc/lpfc_scsi.c
51290+++ b/drivers/scsi/lpfc/lpfc_scsi.c
51291@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
51292 unsigned long expires;
51293
51294 spin_lock_irqsave(&phba->hbalock, flags);
51295- atomic_inc(&phba->num_rsrc_err);
51296+ atomic_inc_unchecked(&phba->num_rsrc_err);
51297 phba->last_rsrc_error_time = jiffies;
51298
51299 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
51300@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51301 unsigned long num_rsrc_err, num_cmd_success;
51302 int i;
51303
51304- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
51305- num_cmd_success = atomic_read(&phba->num_cmd_success);
51306+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
51307+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
51308
51309 /*
51310 * The error and success command counters are global per
51311@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51312 }
51313 }
51314 lpfc_destroy_vport_work_array(phba, vports);
51315- atomic_set(&phba->num_rsrc_err, 0);
51316- atomic_set(&phba->num_cmd_success, 0);
51317+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
51318+ atomic_set_unchecked(&phba->num_cmd_success, 0);
51319 }
51320
51321 /**
51322diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51323index 6a1c036..38e0e8d 100644
51324--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51325+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51326@@ -1508,7 +1508,7 @@ _scsih_get_resync(struct device *dev)
51327 {
51328 struct scsi_device *sdev = to_scsi_device(dev);
51329 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51330- static struct _raid_device *raid_device;
51331+ struct _raid_device *raid_device;
51332 unsigned long flags;
51333 Mpi2RaidVolPage0_t vol_pg0;
51334 Mpi2ConfigReply_t mpi_reply;
51335@@ -1560,7 +1560,7 @@ _scsih_get_state(struct device *dev)
51336 {
51337 struct scsi_device *sdev = to_scsi_device(dev);
51338 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51339- static struct _raid_device *raid_device;
51340+ struct _raid_device *raid_device;
51341 unsigned long flags;
51342 Mpi2RaidVolPage0_t vol_pg0;
51343 Mpi2ConfigReply_t mpi_reply;
51344@@ -6602,7 +6602,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
51345 Mpi2EventDataIrOperationStatus_t *event_data =
51346 (Mpi2EventDataIrOperationStatus_t *)
51347 fw_event->event_data;
51348- static struct _raid_device *raid_device;
51349+ struct _raid_device *raid_device;
51350 unsigned long flags;
51351 u16 handle;
51352
51353@@ -7073,7 +7073,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
51354 u64 sas_address;
51355 struct _sas_device *sas_device;
51356 struct _sas_node *expander_device;
51357- static struct _raid_device *raid_device;
51358+ struct _raid_device *raid_device;
51359 u8 retry_count;
51360 unsigned long flags;
51361
51362diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
51363index 8c27b6a..607f56e 100644
51364--- a/drivers/scsi/pmcraid.c
51365+++ b/drivers/scsi/pmcraid.c
51366@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
51367 res->scsi_dev = scsi_dev;
51368 scsi_dev->hostdata = res;
51369 res->change_detected = 0;
51370- atomic_set(&res->read_failures, 0);
51371- atomic_set(&res->write_failures, 0);
51372+ atomic_set_unchecked(&res->read_failures, 0);
51373+ atomic_set_unchecked(&res->write_failures, 0);
51374 rc = 0;
51375 }
51376 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
51377@@ -2646,9 +2646,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
51378
51379 /* If this was a SCSI read/write command keep count of errors */
51380 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
51381- atomic_inc(&res->read_failures);
51382+ atomic_inc_unchecked(&res->read_failures);
51383 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
51384- atomic_inc(&res->write_failures);
51385+ atomic_inc_unchecked(&res->write_failures);
51386
51387 if (!RES_IS_GSCSI(res->cfg_entry) &&
51388 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
51389@@ -3474,7 +3474,7 @@ static int pmcraid_queuecommand_lck(
51390 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51391 * hrrq_id assigned here in queuecommand
51392 */
51393- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51394+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51395 pinstance->num_hrrq;
51396 cmd->cmd_done = pmcraid_io_done;
51397
51398@@ -3788,7 +3788,7 @@ static long pmcraid_ioctl_passthrough(
51399 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51400 * hrrq_id assigned here in queuecommand
51401 */
51402- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51403+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51404 pinstance->num_hrrq;
51405
51406 if (request_size) {
51407@@ -4426,7 +4426,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
51408
51409 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
51410 /* add resources only after host is added into system */
51411- if (!atomic_read(&pinstance->expose_resources))
51412+ if (!atomic_read_unchecked(&pinstance->expose_resources))
51413 return;
51414
51415 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
51416@@ -5243,8 +5243,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
51417 init_waitqueue_head(&pinstance->reset_wait_q);
51418
51419 atomic_set(&pinstance->outstanding_cmds, 0);
51420- atomic_set(&pinstance->last_message_id, 0);
51421- atomic_set(&pinstance->expose_resources, 0);
51422+ atomic_set_unchecked(&pinstance->last_message_id, 0);
51423+ atomic_set_unchecked(&pinstance->expose_resources, 0);
51424
51425 INIT_LIST_HEAD(&pinstance->free_res_q);
51426 INIT_LIST_HEAD(&pinstance->used_res_q);
51427@@ -5957,7 +5957,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
51428 /* Schedule worker thread to handle CCN and take care of adding and
51429 * removing devices to OS
51430 */
51431- atomic_set(&pinstance->expose_resources, 1);
51432+ atomic_set_unchecked(&pinstance->expose_resources, 1);
51433 schedule_work(&pinstance->worker_q);
51434 return rc;
51435
51436diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
51437index e1d150f..6c6df44 100644
51438--- a/drivers/scsi/pmcraid.h
51439+++ b/drivers/scsi/pmcraid.h
51440@@ -748,7 +748,7 @@ struct pmcraid_instance {
51441 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
51442
51443 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
51444- atomic_t last_message_id;
51445+ atomic_unchecked_t last_message_id;
51446
51447 /* configuration table */
51448 struct pmcraid_config_table *cfg_table;
51449@@ -777,7 +777,7 @@ struct pmcraid_instance {
51450 atomic_t outstanding_cmds;
51451
51452 /* should add/delete resources to mid-layer now ?*/
51453- atomic_t expose_resources;
51454+ atomic_unchecked_t expose_resources;
51455
51456
51457
51458@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
51459 struct pmcraid_config_table_entry_ext cfg_entry_ext;
51460 };
51461 struct scsi_device *scsi_dev; /* Link scsi_device structure */
51462- atomic_t read_failures; /* count of failed READ commands */
51463- atomic_t write_failures; /* count of failed WRITE commands */
51464+ atomic_unchecked_t read_failures; /* count of failed READ commands */
51465+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
51466
51467 /* To indicate add/delete/modify during CCN */
51468 u8 change_detected;
51469diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
51470index 82b92c4..3178171 100644
51471--- a/drivers/scsi/qla2xxx/qla_attr.c
51472+++ b/drivers/scsi/qla2xxx/qla_attr.c
51473@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
51474 return 0;
51475 }
51476
51477-struct fc_function_template qla2xxx_transport_functions = {
51478+fc_function_template_no_const qla2xxx_transport_functions = {
51479
51480 .show_host_node_name = 1,
51481 .show_host_port_name = 1,
51482@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
51483 .bsg_timeout = qla24xx_bsg_timeout,
51484 };
51485
51486-struct fc_function_template qla2xxx_transport_vport_functions = {
51487+fc_function_template_no_const qla2xxx_transport_vport_functions = {
51488
51489 .show_host_node_name = 1,
51490 .show_host_port_name = 1,
51491diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
51492index 7686bfe..4710893 100644
51493--- a/drivers/scsi/qla2xxx/qla_gbl.h
51494+++ b/drivers/scsi/qla2xxx/qla_gbl.h
51495@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
51496 struct device_attribute;
51497 extern struct device_attribute *qla2x00_host_attrs[];
51498 struct fc_function_template;
51499-extern struct fc_function_template qla2xxx_transport_functions;
51500-extern struct fc_function_template qla2xxx_transport_vport_functions;
51501+extern fc_function_template_no_const qla2xxx_transport_functions;
51502+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
51503 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
51504 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
51505 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
51506diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
51507index cce1cbc..5b9f0fe 100644
51508--- a/drivers/scsi/qla2xxx/qla_os.c
51509+++ b/drivers/scsi/qla2xxx/qla_os.c
51510@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
51511 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
51512 /* Ok, a 64bit DMA mask is applicable. */
51513 ha->flags.enable_64bit_addressing = 1;
51514- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51515- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51516+ pax_open_kernel();
51517+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51518+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51519+ pax_close_kernel();
51520 return;
51521 }
51522 }
51523diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
51524index 8f6d0fb..1b21097 100644
51525--- a/drivers/scsi/qla4xxx/ql4_def.h
51526+++ b/drivers/scsi/qla4xxx/ql4_def.h
51527@@ -305,7 +305,7 @@ struct ddb_entry {
51528 * (4000 only) */
51529 atomic_t relogin_timer; /* Max Time to wait for
51530 * relogin to complete */
51531- atomic_t relogin_retry_count; /* Num of times relogin has been
51532+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
51533 * retried */
51534 uint32_t default_time2wait; /* Default Min time between
51535 * relogins (+aens) */
51536diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
51537index 6d25879..3031a9f 100644
51538--- a/drivers/scsi/qla4xxx/ql4_os.c
51539+++ b/drivers/scsi/qla4xxx/ql4_os.c
51540@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
51541 */
51542 if (!iscsi_is_session_online(cls_sess)) {
51543 /* Reset retry relogin timer */
51544- atomic_inc(&ddb_entry->relogin_retry_count);
51545+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
51546 DEBUG2(ql4_printk(KERN_INFO, ha,
51547 "%s: index[%d] relogin timed out-retrying"
51548 " relogin (%d), retry (%d)\n", __func__,
51549 ddb_entry->fw_ddb_index,
51550- atomic_read(&ddb_entry->relogin_retry_count),
51551+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
51552 ddb_entry->default_time2wait + 4));
51553 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
51554 atomic_set(&ddb_entry->retry_relogin_timer,
51555@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
51556
51557 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
51558 atomic_set(&ddb_entry->relogin_timer, 0);
51559- atomic_set(&ddb_entry->relogin_retry_count, 0);
51560+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
51561 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
51562 ddb_entry->default_relogin_timeout =
51563 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
51564diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
51565index 17bb541..85f4508 100644
51566--- a/drivers/scsi/scsi_lib.c
51567+++ b/drivers/scsi/scsi_lib.c
51568@@ -1595,7 +1595,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
51569 shost = sdev->host;
51570 scsi_init_cmd_errh(cmd);
51571 cmd->result = DID_NO_CONNECT << 16;
51572- atomic_inc(&cmd->device->iorequest_cnt);
51573+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51574
51575 /*
51576 * SCSI request completion path will do scsi_device_unbusy(),
51577@@ -1618,9 +1618,9 @@ static void scsi_softirq_done(struct request *rq)
51578
51579 INIT_LIST_HEAD(&cmd->eh_entry);
51580
51581- atomic_inc(&cmd->device->iodone_cnt);
51582+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
51583 if (cmd->result)
51584- atomic_inc(&cmd->device->ioerr_cnt);
51585+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
51586
51587 disposition = scsi_decide_disposition(cmd);
51588 if (disposition != SUCCESS &&
51589@@ -1661,7 +1661,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
51590 struct Scsi_Host *host = cmd->device->host;
51591 int rtn = 0;
51592
51593- atomic_inc(&cmd->device->iorequest_cnt);
51594+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51595
51596 /* check if the device is still usable */
51597 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
51598diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
51599index 1ac38e7..6acc656 100644
51600--- a/drivers/scsi/scsi_sysfs.c
51601+++ b/drivers/scsi/scsi_sysfs.c
51602@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
51603 char *buf) \
51604 { \
51605 struct scsi_device *sdev = to_scsi_device(dev); \
51606- unsigned long long count = atomic_read(&sdev->field); \
51607+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
51608 return snprintf(buf, 20, "0x%llx\n", count); \
51609 } \
51610 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
51611diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
51612index 5d6f348..18778a6b 100644
51613--- a/drivers/scsi/scsi_transport_fc.c
51614+++ b/drivers/scsi/scsi_transport_fc.c
51615@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
51616 * Netlink Infrastructure
51617 */
51618
51619-static atomic_t fc_event_seq;
51620+static atomic_unchecked_t fc_event_seq;
51621
51622 /**
51623 * fc_get_event_number - Obtain the next sequential FC event number
51624@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
51625 u32
51626 fc_get_event_number(void)
51627 {
51628- return atomic_add_return(1, &fc_event_seq);
51629+ return atomic_add_return_unchecked(1, &fc_event_seq);
51630 }
51631 EXPORT_SYMBOL(fc_get_event_number);
51632
51633@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
51634 {
51635 int error;
51636
51637- atomic_set(&fc_event_seq, 0);
51638+ atomic_set_unchecked(&fc_event_seq, 0);
51639
51640 error = transport_class_register(&fc_host_class);
51641 if (error)
51642@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
51643 char *cp;
51644
51645 *val = simple_strtoul(buf, &cp, 0);
51646- if ((*cp && (*cp != '\n')) || (*val < 0))
51647+ if (*cp && (*cp != '\n'))
51648 return -EINVAL;
51649 /*
51650 * Check for overflow; dev_loss_tmo is u32
51651diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
51652index 67d43e3..8cee73c 100644
51653--- a/drivers/scsi/scsi_transport_iscsi.c
51654+++ b/drivers/scsi/scsi_transport_iscsi.c
51655@@ -79,7 +79,7 @@ struct iscsi_internal {
51656 struct transport_container session_cont;
51657 };
51658
51659-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
51660+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
51661 static struct workqueue_struct *iscsi_eh_timer_workq;
51662
51663 static DEFINE_IDA(iscsi_sess_ida);
51664@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
51665 int err;
51666
51667 ihost = shost->shost_data;
51668- session->sid = atomic_add_return(1, &iscsi_session_nr);
51669+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
51670
51671 if (target_id == ISCSI_MAX_TARGET) {
51672 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
51673@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
51674 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
51675 ISCSI_TRANSPORT_VERSION);
51676
51677- atomic_set(&iscsi_session_nr, 0);
51678+ atomic_set_unchecked(&iscsi_session_nr, 0);
51679
51680 err = class_register(&iscsi_transport_class);
51681 if (err)
51682diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
51683index ae45bd9..c32a586 100644
51684--- a/drivers/scsi/scsi_transport_srp.c
51685+++ b/drivers/scsi/scsi_transport_srp.c
51686@@ -35,7 +35,7 @@
51687 #include "scsi_priv.h"
51688
51689 struct srp_host_attrs {
51690- atomic_t next_port_id;
51691+ atomic_unchecked_t next_port_id;
51692 };
51693 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
51694
51695@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
51696 struct Scsi_Host *shost = dev_to_shost(dev);
51697 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
51698
51699- atomic_set(&srp_host->next_port_id, 0);
51700+ atomic_set_unchecked(&srp_host->next_port_id, 0);
51701 return 0;
51702 }
51703
51704@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
51705 rport_fast_io_fail_timedout);
51706 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
51707
51708- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
51709+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
51710 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
51711
51712 transport_setup_device(&rport->dev);
51713diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
51714index 05ea0d4..5af8049 100644
51715--- a/drivers/scsi/sd.c
51716+++ b/drivers/scsi/sd.c
51717@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
51718 sdkp->disk = gd;
51719 sdkp->index = index;
51720 atomic_set(&sdkp->openers, 0);
51721- atomic_set(&sdkp->device->ioerr_cnt, 0);
51722+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
51723
51724 if (!sdp->request_queue->rq_timeout) {
51725 if (sdp->type != TYPE_MOD)
51726diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
51727index dbf8e77..0d565c7 100644
51728--- a/drivers/scsi/sg.c
51729+++ b/drivers/scsi/sg.c
51730@@ -1098,7 +1098,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
51731 sdp->disk->disk_name,
51732 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
51733 NULL,
51734- (char *)arg);
51735+ (char __user *)arg);
51736 case BLKTRACESTART:
51737 return blk_trace_startstop(sdp->device->request_queue, 1);
51738 case BLKTRACESTOP:
51739diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
51740index 011a336..fb2b7a0 100644
51741--- a/drivers/soc/tegra/fuse/fuse-tegra.c
51742+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
51743@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
51744 return i;
51745 }
51746
51747-static struct bin_attribute fuse_bin_attr = {
51748+static bin_attribute_no_const fuse_bin_attr = {
51749 .attr = { .name = "fuse", .mode = S_IRUGO, },
51750 .read = fuse_read,
51751 };
51752diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
51753index 66a70e9..f82cea4 100644
51754--- a/drivers/spi/spi.c
51755+++ b/drivers/spi/spi.c
51756@@ -2238,7 +2238,7 @@ int spi_bus_unlock(struct spi_master *master)
51757 EXPORT_SYMBOL_GPL(spi_bus_unlock);
51758
51759 /* portable code must never pass more than 32 bytes */
51760-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
51761+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
51762
51763 static u8 *buf;
51764
51765diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
51766index b41429f..2de5373 100644
51767--- a/drivers/staging/android/timed_output.c
51768+++ b/drivers/staging/android/timed_output.c
51769@@ -25,7 +25,7 @@
51770 #include "timed_output.h"
51771
51772 static struct class *timed_output_class;
51773-static atomic_t device_count;
51774+static atomic_unchecked_t device_count;
51775
51776 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
51777 char *buf)
51778@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
51779 timed_output_class = class_create(THIS_MODULE, "timed_output");
51780 if (IS_ERR(timed_output_class))
51781 return PTR_ERR(timed_output_class);
51782- atomic_set(&device_count, 0);
51783+ atomic_set_unchecked(&device_count, 0);
51784 timed_output_class->dev_groups = timed_output_groups;
51785 }
51786
51787@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
51788 if (ret < 0)
51789 return ret;
51790
51791- tdev->index = atomic_inc_return(&device_count);
51792+ tdev->index = atomic_inc_return_unchecked(&device_count);
51793 tdev->dev = device_create(timed_output_class, NULL,
51794 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
51795 if (IS_ERR(tdev->dev))
51796diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
51797index f143cb6..6fb8255 100644
51798--- a/drivers/staging/comedi/comedi_fops.c
51799+++ b/drivers/staging/comedi/comedi_fops.c
51800@@ -273,8 +273,8 @@ static void comedi_file_reset(struct file *file)
51801 }
51802 cfp->last_attached = dev->attached;
51803 cfp->last_detach_count = dev->detach_count;
51804- ACCESS_ONCE(cfp->read_subdev) = read_s;
51805- ACCESS_ONCE(cfp->write_subdev) = write_s;
51806+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
51807+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
51808 }
51809
51810 static void comedi_file_check(struct file *file)
51811@@ -1885,7 +1885,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51812 !(s_old->async->cmd.flags & CMDF_WRITE))
51813 return -EBUSY;
51814
51815- ACCESS_ONCE(cfp->read_subdev) = s_new;
51816+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
51817 return 0;
51818 }
51819
51820@@ -1927,7 +1927,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51821 (s_old->async->cmd.flags & CMDF_WRITE))
51822 return -EBUSY;
51823
51824- ACCESS_ONCE(cfp->write_subdev) = s_new;
51825+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
51826 return 0;
51827 }
51828
51829diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
51830index 001348c..cfaac8a 100644
51831--- a/drivers/staging/gdm724x/gdm_tty.c
51832+++ b/drivers/staging/gdm724x/gdm_tty.c
51833@@ -44,7 +44,7 @@
51834 #define gdm_tty_send_control(n, r, v, d, l) (\
51835 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
51836
51837-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
51838+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
51839
51840 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
51841 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
51842diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
51843index 503b2d7..c904931 100644
51844--- a/drivers/staging/line6/driver.c
51845+++ b/drivers/staging/line6/driver.c
51846@@ -463,7 +463,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
51847 {
51848 struct usb_device *usbdev = line6->usbdev;
51849 int ret;
51850- unsigned char len;
51851+ unsigned char *plen;
51852
51853 /* query the serial number: */
51854 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
51855@@ -476,27 +476,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
51856 return ret;
51857 }
51858
51859+ plen = kmalloc(1, GFP_KERNEL);
51860+ if (plen == NULL)
51861+ return -ENOMEM;
51862+
51863 /* Wait for data length. We'll get 0xff until length arrives. */
51864 do {
51865 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
51866 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
51867 USB_DIR_IN,
51868- 0x0012, 0x0000, &len, 1,
51869+ 0x0012, 0x0000, plen, 1,
51870 LINE6_TIMEOUT * HZ);
51871 if (ret < 0) {
51872 dev_err(line6->ifcdev,
51873 "receive length failed (error %d)\n", ret);
51874+ kfree(plen);
51875 return ret;
51876 }
51877- } while (len == 0xff);
51878+ } while (*plen == 0xff);
51879
51880- if (len != datalen) {
51881+ if (*plen != datalen) {
51882 /* should be equal or something went wrong */
51883 dev_err(line6->ifcdev,
51884 "length mismatch (expected %d, got %d)\n",
51885- (int)datalen, (int)len);
51886+ (int)datalen, (int)*plen);
51887+ kfree(plen);
51888 return -EINVAL;
51889 }
51890+ kfree(plen);
51891
51892 /* receive the result: */
51893 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
51894@@ -520,7 +527,7 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
51895 {
51896 struct usb_device *usbdev = line6->usbdev;
51897 int ret;
51898- unsigned char status;
51899+ unsigned char *status;
51900
51901 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
51902 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
51903@@ -533,26 +540,34 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
51904 return ret;
51905 }
51906
51907+ status = kmalloc(1, GFP_KERNEL);
51908+ if (status == NULL)
51909+ return -ENOMEM;
51910+
51911 do {
51912 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
51913 0x67,
51914 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
51915 USB_DIR_IN,
51916 0x0012, 0x0000,
51917- &status, 1, LINE6_TIMEOUT * HZ);
51918+ status, 1, LINE6_TIMEOUT * HZ);
51919
51920 if (ret < 0) {
51921 dev_err(line6->ifcdev,
51922 "receiving status failed (error %d)\n", ret);
51923+ kfree(status);
51924 return ret;
51925 }
51926- } while (status == 0xff);
51927+ } while (*status == 0xff);
51928
51929- if (status != 0) {
51930+ if (*status != 0) {
51931 dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
51932+ kfree(status);
51933 return -EINVAL;
51934 }
51935
51936+ kfree(status);
51937+
51938 return 0;
51939 }
51940
51941diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
51942index 6943715..0a93632 100644
51943--- a/drivers/staging/line6/toneport.c
51944+++ b/drivers/staging/line6/toneport.c
51945@@ -11,6 +11,7 @@
51946 */
51947
51948 #include <linux/wait.h>
51949+#include <linux/slab.h>
51950 #include <sound/control.h>
51951
51952 #include "audio.h"
51953@@ -307,14 +308,20 @@ static void toneport_destruct(struct usb_interface *interface)
51954 */
51955 static void toneport_setup(struct usb_line6_toneport *toneport)
51956 {
51957- int ticks;
51958+ int *ticks;
51959 struct usb_line6 *line6 = &toneport->line6;
51960 struct usb_device *usbdev = line6->usbdev;
51961 u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
51962
51963+ ticks = kmalloc(sizeof(int), GFP_KERNEL);
51964+ if (ticks == NULL)
51965+ return;
51966+
51967 /* sync time on device with host: */
51968- ticks = (int)get_seconds();
51969- line6_write_data(line6, 0x80c6, &ticks, 4);
51970+ *ticks = (int)get_seconds();
51971+ line6_write_data(line6, 0x80c6, ticks, sizeof(int));
51972+
51973+ kfree(ticks);
51974
51975 /* enable device: */
51976 toneport_send_cmd(usbdev, 0x0301, 0x0000);
51977diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
51978index 463da07..e791ce9 100644
51979--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
51980+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
51981@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
51982 return 0;
51983 }
51984
51985-sfw_test_client_ops_t brw_test_client;
51986-void brw_init_test_client(void)
51987-{
51988- brw_test_client.tso_init = brw_client_init;
51989- brw_test_client.tso_fini = brw_client_fini;
51990- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
51991- brw_test_client.tso_done_rpc = brw_client_done_rpc;
51992+sfw_test_client_ops_t brw_test_client = {
51993+ .tso_init = brw_client_init,
51994+ .tso_fini = brw_client_fini,
51995+ .tso_prep_rpc = brw_client_prep_rpc,
51996+ .tso_done_rpc = brw_client_done_rpc,
51997 };
51998
51999 srpc_service_t brw_test_service;
52000diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
52001index cc9d182..8fabce3 100644
52002--- a/drivers/staging/lustre/lnet/selftest/framework.c
52003+++ b/drivers/staging/lustre/lnet/selftest/framework.c
52004@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
52005
52006 extern sfw_test_client_ops_t ping_test_client;
52007 extern srpc_service_t ping_test_service;
52008-extern void ping_init_test_client(void);
52009 extern void ping_init_test_service(void);
52010
52011 extern sfw_test_client_ops_t brw_test_client;
52012 extern srpc_service_t brw_test_service;
52013-extern void brw_init_test_client(void);
52014 extern void brw_init_test_service(void);
52015
52016
52017@@ -1675,12 +1673,10 @@ sfw_startup (void)
52018 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
52019 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
52020
52021- brw_init_test_client();
52022 brw_init_test_service();
52023 rc = sfw_register_test(&brw_test_service, &brw_test_client);
52024 LASSERT (rc == 0);
52025
52026- ping_init_test_client();
52027 ping_init_test_service();
52028 rc = sfw_register_test(&ping_test_service, &ping_test_client);
52029 LASSERT (rc == 0);
52030diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
52031index d8c0df6..5041cbb 100644
52032--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
52033+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
52034@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
52035 return 0;
52036 }
52037
52038-sfw_test_client_ops_t ping_test_client;
52039-void ping_init_test_client(void)
52040-{
52041- ping_test_client.tso_init = ping_client_init;
52042- ping_test_client.tso_fini = ping_client_fini;
52043- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
52044- ping_test_client.tso_done_rpc = ping_client_done_rpc;
52045-}
52046+sfw_test_client_ops_t ping_test_client = {
52047+ .tso_init = ping_client_init,
52048+ .tso_fini = ping_client_fini,
52049+ .tso_prep_rpc = ping_client_prep_rpc,
52050+ .tso_done_rpc = ping_client_done_rpc,
52051+};
52052
52053 srpc_service_t ping_test_service;
52054 void ping_init_test_service(void)
52055diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52056index 83bc0a9..12ba00a 100644
52057--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
52058+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52059@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
52060 ldlm_completion_callback lcs_completion;
52061 ldlm_blocking_callback lcs_blocking;
52062 ldlm_glimpse_callback lcs_glimpse;
52063-};
52064+} __no_const;
52065
52066 /* ldlm_lockd.c */
52067 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
52068diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
52069index 2a88b80..62e7e5f 100644
52070--- a/drivers/staging/lustre/lustre/include/obd.h
52071+++ b/drivers/staging/lustre/lustre/include/obd.h
52072@@ -1362,7 +1362,7 @@ struct md_ops {
52073 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
52074 * wrapper function in include/linux/obd_class.h.
52075 */
52076-};
52077+} __no_const;
52078
52079 struct lsm_operations {
52080 void (*lsm_free)(struct lov_stripe_md *);
52081diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52082index a4c252f..b21acac 100644
52083--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52084+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52085@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
52086 int added = (mode == LCK_NL);
52087 int overlaps = 0;
52088 int splitted = 0;
52089- const struct ldlm_callback_suite null_cbs = { NULL };
52090+ const struct ldlm_callback_suite null_cbs = { };
52091
52092 CDEBUG(D_DLMTRACE,
52093 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
52094diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52095index 83d3f08..b03adad 100644
52096--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52097+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52098@@ -236,7 +236,7 @@ int proc_console_max_delay_cs(struct ctl_table *table, int write,
52099 void __user *buffer, size_t *lenp, loff_t *ppos)
52100 {
52101 int rc, max_delay_cs;
52102- struct ctl_table dummy = *table;
52103+ ctl_table_no_const dummy = *table;
52104 long d;
52105
52106 dummy.data = &max_delay_cs;
52107@@ -268,7 +268,7 @@ int proc_console_min_delay_cs(struct ctl_table *table, int write,
52108 void __user *buffer, size_t *lenp, loff_t *ppos)
52109 {
52110 int rc, min_delay_cs;
52111- struct ctl_table dummy = *table;
52112+ ctl_table_no_const dummy = *table;
52113 long d;
52114
52115 dummy.data = &min_delay_cs;
52116@@ -300,7 +300,7 @@ int proc_console_backoff(struct ctl_table *table, int write,
52117 void __user *buffer, size_t *lenp, loff_t *ppos)
52118 {
52119 int rc, backoff;
52120- struct ctl_table dummy = *table;
52121+ ctl_table_no_const dummy = *table;
52122
52123 dummy.data = &backoff;
52124 dummy.proc_handler = &proc_dointvec;
52125diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
52126index 2c4fc74..b04ca79 100644
52127--- a/drivers/staging/lustre/lustre/libcfs/module.c
52128+++ b/drivers/staging/lustre/lustre/libcfs/module.c
52129@@ -315,11 +315,11 @@ out:
52130
52131
52132 struct cfs_psdev_ops libcfs_psdev_ops = {
52133- libcfs_psdev_open,
52134- libcfs_psdev_release,
52135- NULL,
52136- NULL,
52137- libcfs_ioctl
52138+ .p_open = libcfs_psdev_open,
52139+ .p_close = libcfs_psdev_release,
52140+ .p_read = NULL,
52141+ .p_write = NULL,
52142+ .p_ioctl = libcfs_ioctl
52143 };
52144
52145 extern int insert_proc(void);
52146diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
52147index fcbe836..8a7ada4 100644
52148--- a/drivers/staging/octeon/ethernet-rx.c
52149+++ b/drivers/staging/octeon/ethernet-rx.c
52150@@ -352,14 +352,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52151 /* Increment RX stats for virtual ports */
52152 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
52153 #ifdef CONFIG_64BIT
52154- atomic64_add(1,
52155+ atomic64_add_unchecked(1,
52156 (atomic64_t *)&priv->stats.rx_packets);
52157- atomic64_add(skb->len,
52158+ atomic64_add_unchecked(skb->len,
52159 (atomic64_t *)&priv->stats.rx_bytes);
52160 #else
52161- atomic_add(1,
52162+ atomic_add_unchecked(1,
52163 (atomic_t *)&priv->stats.rx_packets);
52164- atomic_add(skb->len,
52165+ atomic_add_unchecked(skb->len,
52166 (atomic_t *)&priv->stats.rx_bytes);
52167 #endif
52168 }
52169@@ -371,10 +371,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52170 dev->name);
52171 */
52172 #ifdef CONFIG_64BIT
52173- atomic64_add(1,
52174+ atomic64_add_unchecked(1,
52175 (atomic64_t *)&priv->stats.rx_dropped);
52176 #else
52177- atomic_add(1,
52178+ atomic_add_unchecked(1,
52179 (atomic_t *)&priv->stats.rx_dropped);
52180 #endif
52181 dev_kfree_skb_irq(skb);
52182diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
52183index ee32149..052d1836 100644
52184--- a/drivers/staging/octeon/ethernet.c
52185+++ b/drivers/staging/octeon/ethernet.c
52186@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
52187 * since the RX tasklet also increments it.
52188 */
52189 #ifdef CONFIG_64BIT
52190- atomic64_add(rx_status.dropped_packets,
52191- (atomic64_t *)&priv->stats.rx_dropped);
52192+ atomic64_add_unchecked(rx_status.dropped_packets,
52193+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
52194 #else
52195- atomic_add(rx_status.dropped_packets,
52196- (atomic_t *)&priv->stats.rx_dropped);
52197+ atomic_add_unchecked(rx_status.dropped_packets,
52198+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
52199 #endif
52200 }
52201
52202diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
52203index 3b476d8..f522d68 100644
52204--- a/drivers/staging/rtl8188eu/include/hal_intf.h
52205+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
52206@@ -225,7 +225,7 @@ struct hal_ops {
52207
52208 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
52209 void (*hal_reset_security_engine)(struct adapter *adapter);
52210-};
52211+} __no_const;
52212
52213 enum rt_eeprom_type {
52214 EEPROM_93C46,
52215diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
52216index 070cc03..6806e37 100644
52217--- a/drivers/staging/rtl8712/rtl871x_io.h
52218+++ b/drivers/staging/rtl8712/rtl871x_io.h
52219@@ -108,7 +108,7 @@ struct _io_ops {
52220 u8 *pmem);
52221 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
52222 u8 *pmem);
52223-};
52224+} __no_const;
52225
52226 struct io_req {
52227 struct list_head list;
52228diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
52229index 46dad63..fe4acdc 100644
52230--- a/drivers/staging/unisys/visorchipset/visorchipset.h
52231+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
52232@@ -226,7 +226,7 @@ struct visorchipset_busdev_notifiers {
52233 void (*device_resume)(ulong bus_no, ulong dev_no);
52234 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
52235 ulong *max_size);
52236-};
52237+} __no_const;
52238
52239 /* These functions live inside visorchipset, and will be called to indicate
52240 * responses to specific events (by code outside of visorchipset).
52241@@ -241,7 +241,7 @@ struct visorchipset_busdev_responders {
52242 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
52243 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
52244 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
52245-};
52246+} __no_const;
52247
52248 /** Register functions (in the bus driver) to get called by visorchipset
52249 * whenever a bus or device appears for which this service partition is
52250diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
52251index 9512af6..045bf5a 100644
52252--- a/drivers/target/sbp/sbp_target.c
52253+++ b/drivers/target/sbp/sbp_target.c
52254@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
52255
52256 #define SESSION_MAINTENANCE_INTERVAL HZ
52257
52258-static atomic_t login_id = ATOMIC_INIT(0);
52259+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
52260
52261 static void session_maintenance_work(struct work_struct *);
52262 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
52263@@ -444,7 +444,7 @@ static void sbp_management_request_login(
52264 login->lun = se_lun;
52265 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
52266 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
52267- login->login_id = atomic_inc_return(&login_id);
52268+ login->login_id = atomic_inc_return_unchecked(&login_id);
52269
52270 login->tgt_agt = sbp_target_agent_register(login);
52271 if (IS_ERR(login->tgt_agt)) {
52272diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
52273index 54da2a4..3dd6f57 100644
52274--- a/drivers/target/target_core_device.c
52275+++ b/drivers/target/target_core_device.c
52276@@ -1469,7 +1469,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
52277 spin_lock_init(&dev->se_tmr_lock);
52278 spin_lock_init(&dev->qf_cmd_lock);
52279 sema_init(&dev->caw_sem, 1);
52280- atomic_set(&dev->dev_ordered_id, 0);
52281+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
52282 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
52283 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
52284 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
52285diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
52286index ac3cbab..f0d1dd2 100644
52287--- a/drivers/target/target_core_transport.c
52288+++ b/drivers/target/target_core_transport.c
52289@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
52290 * Used to determine when ORDERED commands should go from
52291 * Dormant to Active status.
52292 */
52293- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
52294+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
52295 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
52296 cmd->se_ordered_id, cmd->sam_task_attr,
52297 dev->transport->name);
52298diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
52299index 65a98a9..d93d3a8 100644
52300--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
52301+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
52302@@ -277,8 +277,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
52303 platform_set_drvdata(pdev, priv);
52304
52305 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
52306- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52307- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52308+ pax_open_kernel();
52309+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52310+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52311+ pax_close_kernel();
52312 }
52313 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
52314 priv, &int3400_thermal_ops,
52315diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
52316index d717f3d..cae1cc3e 100644
52317--- a/drivers/thermal/of-thermal.c
52318+++ b/drivers/thermal/of-thermal.c
52319@@ -31,6 +31,7 @@
52320 #include <linux/export.h>
52321 #include <linux/string.h>
52322 #include <linux/thermal.h>
52323+#include <linux/mm.h>
52324
52325 #include "thermal_core.h"
52326
52327@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
52328 tz->ops = ops;
52329 tz->sensor_data = data;
52330
52331- tzd->ops->get_temp = of_thermal_get_temp;
52332- tzd->ops->get_trend = of_thermal_get_trend;
52333- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52334+ pax_open_kernel();
52335+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
52336+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
52337+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52338+ pax_close_kernel();
52339 mutex_unlock(&tzd->lock);
52340
52341 return tzd;
52342@@ -541,9 +544,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
52343 return;
52344
52345 mutex_lock(&tzd->lock);
52346- tzd->ops->get_temp = NULL;
52347- tzd->ops->get_trend = NULL;
52348- tzd->ops->set_emul_temp = NULL;
52349+ pax_open_kernel();
52350+ *(void **)&tzd->ops->get_temp = NULL;
52351+ *(void **)&tzd->ops->get_trend = NULL;
52352+ *(void **)&tzd->ops->set_emul_temp = NULL;
52353+ pax_close_kernel();
52354
52355 tz->ops = NULL;
52356 tz->sensor_data = NULL;
52357diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
52358index fd66f57..48e6376 100644
52359--- a/drivers/tty/cyclades.c
52360+++ b/drivers/tty/cyclades.c
52361@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
52362 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
52363 info->port.count);
52364 #endif
52365- info->port.count++;
52366+ atomic_inc(&info->port.count);
52367 #ifdef CY_DEBUG_COUNT
52368 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
52369- current->pid, info->port.count);
52370+ current->pid, atomic_read(&info->port.count));
52371 #endif
52372
52373 /*
52374@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
52375 for (j = 0; j < cy_card[i].nports; j++) {
52376 info = &cy_card[i].ports[j];
52377
52378- if (info->port.count) {
52379+ if (atomic_read(&info->port.count)) {
52380 /* XXX is the ldisc num worth this? */
52381 struct tty_struct *tty;
52382 struct tty_ldisc *ld;
52383diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
52384index 4fcec1d..5a036f7 100644
52385--- a/drivers/tty/hvc/hvc_console.c
52386+++ b/drivers/tty/hvc/hvc_console.c
52387@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
52388
52389 spin_lock_irqsave(&hp->port.lock, flags);
52390 /* Check and then increment for fast path open. */
52391- if (hp->port.count++ > 0) {
52392+ if (atomic_inc_return(&hp->port.count) > 1) {
52393 spin_unlock_irqrestore(&hp->port.lock, flags);
52394 hvc_kick();
52395 return 0;
52396@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52397
52398 spin_lock_irqsave(&hp->port.lock, flags);
52399
52400- if (--hp->port.count == 0) {
52401+ if (atomic_dec_return(&hp->port.count) == 0) {
52402 spin_unlock_irqrestore(&hp->port.lock, flags);
52403 /* We are done with the tty pointer now. */
52404 tty_port_tty_set(&hp->port, NULL);
52405@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52406 */
52407 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
52408 } else {
52409- if (hp->port.count < 0)
52410+ if (atomic_read(&hp->port.count) < 0)
52411 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
52412- hp->vtermno, hp->port.count);
52413+ hp->vtermno, atomic_read(&hp->port.count));
52414 spin_unlock_irqrestore(&hp->port.lock, flags);
52415 }
52416 }
52417@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
52418 * open->hangup case this can be called after the final close so prevent
52419 * that from happening for now.
52420 */
52421- if (hp->port.count <= 0) {
52422+ if (atomic_read(&hp->port.count) <= 0) {
52423 spin_unlock_irqrestore(&hp->port.lock, flags);
52424 return;
52425 }
52426
52427- hp->port.count = 0;
52428+ atomic_set(&hp->port.count, 0);
52429 spin_unlock_irqrestore(&hp->port.lock, flags);
52430 tty_port_tty_set(&hp->port, NULL);
52431
52432@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
52433 return -EPIPE;
52434
52435 /* FIXME what's this (unprotected) check for? */
52436- if (hp->port.count <= 0)
52437+ if (atomic_read(&hp->port.count) <= 0)
52438 return -EIO;
52439
52440 spin_lock_irqsave(&hp->lock, flags);
52441diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
52442index 81ff7e1..dfb7b71 100644
52443--- a/drivers/tty/hvc/hvcs.c
52444+++ b/drivers/tty/hvc/hvcs.c
52445@@ -83,6 +83,7 @@
52446 #include <asm/hvcserver.h>
52447 #include <asm/uaccess.h>
52448 #include <asm/vio.h>
52449+#include <asm/local.h>
52450
52451 /*
52452 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
52453@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
52454
52455 spin_lock_irqsave(&hvcsd->lock, flags);
52456
52457- if (hvcsd->port.count > 0) {
52458+ if (atomic_read(&hvcsd->port.count) > 0) {
52459 spin_unlock_irqrestore(&hvcsd->lock, flags);
52460 printk(KERN_INFO "HVCS: vterm state unchanged. "
52461 "The hvcs device node is still in use.\n");
52462@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
52463 }
52464 }
52465
52466- hvcsd->port.count = 0;
52467+ atomic_set(&hvcsd->port.count, 0);
52468 hvcsd->port.tty = tty;
52469 tty->driver_data = hvcsd;
52470
52471@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
52472 unsigned long flags;
52473
52474 spin_lock_irqsave(&hvcsd->lock, flags);
52475- hvcsd->port.count++;
52476+ atomic_inc(&hvcsd->port.count);
52477 hvcsd->todo_mask |= HVCS_SCHED_READ;
52478 spin_unlock_irqrestore(&hvcsd->lock, flags);
52479
52480@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52481 hvcsd = tty->driver_data;
52482
52483 spin_lock_irqsave(&hvcsd->lock, flags);
52484- if (--hvcsd->port.count == 0) {
52485+ if (atomic_dec_and_test(&hvcsd->port.count)) {
52486
52487 vio_disable_interrupts(hvcsd->vdev);
52488
52489@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52490
52491 free_irq(irq, hvcsd);
52492 return;
52493- } else if (hvcsd->port.count < 0) {
52494+ } else if (atomic_read(&hvcsd->port.count) < 0) {
52495 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
52496 " is missmanaged.\n",
52497- hvcsd->vdev->unit_address, hvcsd->port.count);
52498+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
52499 }
52500
52501 spin_unlock_irqrestore(&hvcsd->lock, flags);
52502@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52503
52504 spin_lock_irqsave(&hvcsd->lock, flags);
52505 /* Preserve this so that we know how many kref refs to put */
52506- temp_open_count = hvcsd->port.count;
52507+ temp_open_count = atomic_read(&hvcsd->port.count);
52508
52509 /*
52510 * Don't kref put inside the spinlock because the destruction
52511@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52512 tty->driver_data = NULL;
52513 hvcsd->port.tty = NULL;
52514
52515- hvcsd->port.count = 0;
52516+ atomic_set(&hvcsd->port.count, 0);
52517
52518 /* This will drop any buffered data on the floor which is OK in a hangup
52519 * scenario. */
52520@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
52521 * the middle of a write operation? This is a crummy place to do this
52522 * but we want to keep it all in the spinlock.
52523 */
52524- if (hvcsd->port.count <= 0) {
52525+ if (atomic_read(&hvcsd->port.count) <= 0) {
52526 spin_unlock_irqrestore(&hvcsd->lock, flags);
52527 return -ENODEV;
52528 }
52529@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
52530 {
52531 struct hvcs_struct *hvcsd = tty->driver_data;
52532
52533- if (!hvcsd || hvcsd->port.count <= 0)
52534+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
52535 return 0;
52536
52537 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
52538diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
52539index 4190199..06d5bfa 100644
52540--- a/drivers/tty/hvc/hvsi.c
52541+++ b/drivers/tty/hvc/hvsi.c
52542@@ -85,7 +85,7 @@ struct hvsi_struct {
52543 int n_outbuf;
52544 uint32_t vtermno;
52545 uint32_t virq;
52546- atomic_t seqno; /* HVSI packet sequence number */
52547+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
52548 uint16_t mctrl;
52549 uint8_t state; /* HVSI protocol state */
52550 uint8_t flags;
52551@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
52552
52553 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
52554 packet.hdr.len = sizeof(struct hvsi_query_response);
52555- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52556+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52557 packet.verb = VSV_SEND_VERSION_NUMBER;
52558 packet.u.version = HVSI_VERSION;
52559 packet.query_seqno = query_seqno+1;
52560@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
52561
52562 packet.hdr.type = VS_QUERY_PACKET_HEADER;
52563 packet.hdr.len = sizeof(struct hvsi_query);
52564- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52565+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52566 packet.verb = verb;
52567
52568 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
52569@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
52570 int wrote;
52571
52572 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
52573- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52574+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52575 packet.hdr.len = sizeof(struct hvsi_control);
52576 packet.verb = VSV_SET_MODEM_CTL;
52577 packet.mask = HVSI_TSDTR;
52578@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
52579 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
52580
52581 packet.hdr.type = VS_DATA_PACKET_HEADER;
52582- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52583+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52584 packet.hdr.len = count + sizeof(struct hvsi_header);
52585 memcpy(&packet.data, buf, count);
52586
52587@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
52588 struct hvsi_control packet __ALIGNED__;
52589
52590 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
52591- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52592+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52593 packet.hdr.len = 6;
52594 packet.verb = VSV_CLOSE_PROTOCOL;
52595
52596@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
52597
52598 tty_port_tty_set(&hp->port, tty);
52599 spin_lock_irqsave(&hp->lock, flags);
52600- hp->port.count++;
52601+ atomic_inc(&hp->port.count);
52602 atomic_set(&hp->seqno, 0);
52603 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
52604 spin_unlock_irqrestore(&hp->lock, flags);
52605@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52606
52607 spin_lock_irqsave(&hp->lock, flags);
52608
52609- if (--hp->port.count == 0) {
52610+ if (atomic_dec_return(&hp->port.count) == 0) {
52611 tty_port_tty_set(&hp->port, NULL);
52612 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
52613
52614@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52615
52616 spin_lock_irqsave(&hp->lock, flags);
52617 }
52618- } else if (hp->port.count < 0)
52619+ } else if (atomic_read(&hp->port.count) < 0)
52620 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
52621- hp - hvsi_ports, hp->port.count);
52622+ hp - hvsi_ports, atomic_read(&hp->port.count));
52623
52624 spin_unlock_irqrestore(&hp->lock, flags);
52625 }
52626@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
52627 tty_port_tty_set(&hp->port, NULL);
52628
52629 spin_lock_irqsave(&hp->lock, flags);
52630- hp->port.count = 0;
52631+ atomic_set(&hp->port.count, 0);
52632 hp->n_outbuf = 0;
52633 spin_unlock_irqrestore(&hp->lock, flags);
52634 }
52635diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
52636index a270f04..7c77b5d 100644
52637--- a/drivers/tty/hvc/hvsi_lib.c
52638+++ b/drivers/tty/hvc/hvsi_lib.c
52639@@ -8,7 +8,7 @@
52640
52641 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
52642 {
52643- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
52644+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
52645
52646 /* Assumes that always succeeds, works in practice */
52647 return pv->put_chars(pv->termno, (char *)packet, packet->len);
52648@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
52649
52650 /* Reset state */
52651 pv->established = 0;
52652- atomic_set(&pv->seqno, 0);
52653+ atomic_set_unchecked(&pv->seqno, 0);
52654
52655 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
52656
52657diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
52658index 345cebb..d5a1e9e 100644
52659--- a/drivers/tty/ipwireless/tty.c
52660+++ b/drivers/tty/ipwireless/tty.c
52661@@ -28,6 +28,7 @@
52662 #include <linux/tty_driver.h>
52663 #include <linux/tty_flip.h>
52664 #include <linux/uaccess.h>
52665+#include <asm/local.h>
52666
52667 #include "tty.h"
52668 #include "network.h"
52669@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52670 return -ENODEV;
52671
52672 mutex_lock(&tty->ipw_tty_mutex);
52673- if (tty->port.count == 0)
52674+ if (atomic_read(&tty->port.count) == 0)
52675 tty->tx_bytes_queued = 0;
52676
52677- tty->port.count++;
52678+ atomic_inc(&tty->port.count);
52679
52680 tty->port.tty = linux_tty;
52681 linux_tty->driver_data = tty;
52682@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52683
52684 static void do_ipw_close(struct ipw_tty *tty)
52685 {
52686- tty->port.count--;
52687-
52688- if (tty->port.count == 0) {
52689+ if (atomic_dec_return(&tty->port.count) == 0) {
52690 struct tty_struct *linux_tty = tty->port.tty;
52691
52692 if (linux_tty != NULL) {
52693@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
52694 return;
52695
52696 mutex_lock(&tty->ipw_tty_mutex);
52697- if (tty->port.count == 0) {
52698+ if (atomic_read(&tty->port.count) == 0) {
52699 mutex_unlock(&tty->ipw_tty_mutex);
52700 return;
52701 }
52702@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
52703
52704 mutex_lock(&tty->ipw_tty_mutex);
52705
52706- if (!tty->port.count) {
52707+ if (!atomic_read(&tty->port.count)) {
52708 mutex_unlock(&tty->ipw_tty_mutex);
52709 return;
52710 }
52711@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
52712 return -ENODEV;
52713
52714 mutex_lock(&tty->ipw_tty_mutex);
52715- if (!tty->port.count) {
52716+ if (!atomic_read(&tty->port.count)) {
52717 mutex_unlock(&tty->ipw_tty_mutex);
52718 return -EINVAL;
52719 }
52720@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
52721 if (!tty)
52722 return -ENODEV;
52723
52724- if (!tty->port.count)
52725+ if (!atomic_read(&tty->port.count))
52726 return -EINVAL;
52727
52728 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
52729@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
52730 if (!tty)
52731 return 0;
52732
52733- if (!tty->port.count)
52734+ if (!atomic_read(&tty->port.count))
52735 return 0;
52736
52737 return tty->tx_bytes_queued;
52738@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
52739 if (!tty)
52740 return -ENODEV;
52741
52742- if (!tty->port.count)
52743+ if (!atomic_read(&tty->port.count))
52744 return -EINVAL;
52745
52746 return get_control_lines(tty);
52747@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
52748 if (!tty)
52749 return -ENODEV;
52750
52751- if (!tty->port.count)
52752+ if (!atomic_read(&tty->port.count))
52753 return -EINVAL;
52754
52755 return set_control_lines(tty, set, clear);
52756@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
52757 if (!tty)
52758 return -ENODEV;
52759
52760- if (!tty->port.count)
52761+ if (!atomic_read(&tty->port.count))
52762 return -EINVAL;
52763
52764 /* FIXME: Exactly how is the tty object locked here .. */
52765@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
52766 * are gone */
52767 mutex_lock(&ttyj->ipw_tty_mutex);
52768 }
52769- while (ttyj->port.count)
52770+ while (atomic_read(&ttyj->port.count))
52771 do_ipw_close(ttyj);
52772 ipwireless_disassociate_network_ttys(network,
52773 ttyj->channel_idx);
52774diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
52775index 14c54e0..1efd4f2 100644
52776--- a/drivers/tty/moxa.c
52777+++ b/drivers/tty/moxa.c
52778@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
52779 }
52780
52781 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
52782- ch->port.count++;
52783+ atomic_inc(&ch->port.count);
52784 tty->driver_data = ch;
52785 tty_port_tty_set(&ch->port, tty);
52786 mutex_lock(&ch->port.mutex);
52787diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
52788index c434376..114ce13 100644
52789--- a/drivers/tty/n_gsm.c
52790+++ b/drivers/tty/n_gsm.c
52791@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
52792 spin_lock_init(&dlci->lock);
52793 mutex_init(&dlci->mutex);
52794 dlci->fifo = &dlci->_fifo;
52795- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
52796+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
52797 kfree(dlci);
52798 return NULL;
52799 }
52800@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
52801 struct gsm_dlci *dlci = tty->driver_data;
52802 struct tty_port *port = &dlci->port;
52803
52804- port->count++;
52805+ atomic_inc(&port->count);
52806 tty_port_tty_set(port, tty);
52807
52808 dlci->modem_rx = 0;
52809diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
52810index 4ddfa60..1b7e112 100644
52811--- a/drivers/tty/n_tty.c
52812+++ b/drivers/tty/n_tty.c
52813@@ -115,7 +115,7 @@ struct n_tty_data {
52814 int minimum_to_wake;
52815
52816 /* consumer-published */
52817- size_t read_tail;
52818+ size_t read_tail __intentional_overflow(-1);
52819 size_t line_start;
52820
52821 /* protected by output lock */
52822@@ -2503,6 +2503,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
52823 {
52824 *ops = tty_ldisc_N_TTY;
52825 ops->owner = NULL;
52826- ops->refcount = ops->flags = 0;
52827+ atomic_set(&ops->refcount, 0);
52828+ ops->flags = 0;
52829 }
52830 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
52831diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
52832index 6e1f150..c3ba598 100644
52833--- a/drivers/tty/pty.c
52834+++ b/drivers/tty/pty.c
52835@@ -850,8 +850,10 @@ static void __init unix98_pty_init(void)
52836 panic("Couldn't register Unix98 pts driver");
52837
52838 /* Now create the /dev/ptmx special device */
52839+ pax_open_kernel();
52840 tty_default_fops(&ptmx_fops);
52841- ptmx_fops.open = ptmx_open;
52842+ *(void **)&ptmx_fops.open = ptmx_open;
52843+ pax_close_kernel();
52844
52845 cdev_init(&ptmx_cdev, &ptmx_fops);
52846 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
52847diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
52848index 383c4c7..d408e21 100644
52849--- a/drivers/tty/rocket.c
52850+++ b/drivers/tty/rocket.c
52851@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
52852 tty->driver_data = info;
52853 tty_port_tty_set(port, tty);
52854
52855- if (port->count++ == 0) {
52856+ if (atomic_inc_return(&port->count) == 1) {
52857 atomic_inc(&rp_num_ports_open);
52858
52859 #ifdef ROCKET_DEBUG_OPEN
52860@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
52861 #endif
52862 }
52863 #ifdef ROCKET_DEBUG_OPEN
52864- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
52865+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
52866 #endif
52867
52868 /*
52869@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
52870 spin_unlock_irqrestore(&info->port.lock, flags);
52871 return;
52872 }
52873- if (info->port.count)
52874+ if (atomic_read(&info->port.count))
52875 atomic_dec(&rp_num_ports_open);
52876 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
52877 spin_unlock_irqrestore(&info->port.lock, flags);
52878diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
52879index aa28209..e08fb85 100644
52880--- a/drivers/tty/serial/ioc4_serial.c
52881+++ b/drivers/tty/serial/ioc4_serial.c
52882@@ -437,7 +437,7 @@ struct ioc4_soft {
52883 } is_intr_info[MAX_IOC4_INTR_ENTS];
52884
52885 /* Number of entries active in the above array */
52886- atomic_t is_num_intrs;
52887+ atomic_unchecked_t is_num_intrs;
52888 } is_intr_type[IOC4_NUM_INTR_TYPES];
52889
52890 /* is_ir_lock must be held while
52891@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
52892 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
52893 || (type == IOC4_OTHER_INTR_TYPE)));
52894
52895- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
52896+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
52897 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
52898
52899 /* Save off the lower level interrupt handler */
52900@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
52901
52902 soft = arg;
52903 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
52904- num_intrs = (int)atomic_read(
52905+ num_intrs = (int)atomic_read_unchecked(
52906 &soft->is_intr_type[intr_type].is_num_intrs);
52907
52908 this_mir = this_ir = pending_intrs(soft, intr_type);
52909diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
52910index 129dc5b..1da5bb8 100644
52911--- a/drivers/tty/serial/kgdb_nmi.c
52912+++ b/drivers/tty/serial/kgdb_nmi.c
52913@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
52914 * I/O utilities that messages sent to the console will automatically
52915 * be displayed on the dbg_io.
52916 */
52917- dbg_io_ops->is_console = true;
52918+ pax_open_kernel();
52919+ *(int *)&dbg_io_ops->is_console = true;
52920+ pax_close_kernel();
52921
52922 return 0;
52923 }
52924diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
52925index a260cde..6b2b5ce 100644
52926--- a/drivers/tty/serial/kgdboc.c
52927+++ b/drivers/tty/serial/kgdboc.c
52928@@ -24,8 +24,9 @@
52929 #define MAX_CONFIG_LEN 40
52930
52931 static struct kgdb_io kgdboc_io_ops;
52932+static struct kgdb_io kgdboc_io_ops_console;
52933
52934-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
52935+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
52936 static int configured = -1;
52937
52938 static char config[MAX_CONFIG_LEN];
52939@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
52940 kgdboc_unregister_kbd();
52941 if (configured == 1)
52942 kgdb_unregister_io_module(&kgdboc_io_ops);
52943+ else if (configured == 2)
52944+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
52945 }
52946
52947 static int configure_kgdboc(void)
52948@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
52949 int err;
52950 char *cptr = config;
52951 struct console *cons;
52952+ int is_console = 0;
52953
52954 err = kgdboc_option_setup(config);
52955 if (err || !strlen(config) || isspace(config[0]))
52956 goto noconfig;
52957
52958 err = -ENODEV;
52959- kgdboc_io_ops.is_console = 0;
52960 kgdb_tty_driver = NULL;
52961
52962 kgdboc_use_kms = 0;
52963@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
52964 int idx;
52965 if (cons->device && cons->device(cons, &idx) == p &&
52966 idx == tty_line) {
52967- kgdboc_io_ops.is_console = 1;
52968+ is_console = 1;
52969 break;
52970 }
52971 cons = cons->next;
52972@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
52973 kgdb_tty_line = tty_line;
52974
52975 do_register:
52976- err = kgdb_register_io_module(&kgdboc_io_ops);
52977+ if (is_console) {
52978+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
52979+ configured = 2;
52980+ } else {
52981+ err = kgdb_register_io_module(&kgdboc_io_ops);
52982+ configured = 1;
52983+ }
52984 if (err)
52985 goto noconfig;
52986
52987@@ -205,8 +214,6 @@ do_register:
52988 if (err)
52989 goto nmi_con_failed;
52990
52991- configured = 1;
52992-
52993 return 0;
52994
52995 nmi_con_failed:
52996@@ -223,7 +230,7 @@ noconfig:
52997 static int __init init_kgdboc(void)
52998 {
52999 /* Already configured? */
53000- if (configured == 1)
53001+ if (configured >= 1)
53002 return 0;
53003
53004 return configure_kgdboc();
53005@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
53006 if (config[len - 1] == '\n')
53007 config[len - 1] = '\0';
53008
53009- if (configured == 1)
53010+ if (configured >= 1)
53011 cleanup_kgdboc();
53012
53013 /* Go and configure with the new params. */
53014@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
53015 .post_exception = kgdboc_post_exp_handler,
53016 };
53017
53018+static struct kgdb_io kgdboc_io_ops_console = {
53019+ .name = "kgdboc",
53020+ .read_char = kgdboc_get_char,
53021+ .write_char = kgdboc_put_char,
53022+ .pre_exception = kgdboc_pre_exp_handler,
53023+ .post_exception = kgdboc_post_exp_handler,
53024+ .is_console = 1
53025+};
53026+
53027 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
53028 /* This is only available if kgdboc is a built in for early debugging */
53029 static int __init kgdboc_early_init(char *opt)
53030diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
53031index c88b522..e763029 100644
53032--- a/drivers/tty/serial/msm_serial.c
53033+++ b/drivers/tty/serial/msm_serial.c
53034@@ -1028,7 +1028,7 @@ static struct uart_driver msm_uart_driver = {
53035 .cons = MSM_CONSOLE,
53036 };
53037
53038-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
53039+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
53040
53041 static const struct of_device_id msm_uartdm_table[] = {
53042 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
53043@@ -1052,7 +1052,7 @@ static int msm_serial_probe(struct platform_device *pdev)
53044 line = pdev->id;
53045
53046 if (line < 0)
53047- line = atomic_inc_return(&msm_uart_next_id) - 1;
53048+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
53049
53050 if (unlikely(line < 0 || line >= UART_NR))
53051 return -ENXIO;
53052diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
53053index 107e807..d4a02fa 100644
53054--- a/drivers/tty/serial/samsung.c
53055+++ b/drivers/tty/serial/samsung.c
53056@@ -480,11 +480,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
53057 }
53058 }
53059
53060+static int s3c64xx_serial_startup(struct uart_port *port);
53061 static int s3c24xx_serial_startup(struct uart_port *port)
53062 {
53063 struct s3c24xx_uart_port *ourport = to_ourport(port);
53064 int ret;
53065
53066+ /* Startup sequence is different for s3c64xx and higher SoC's */
53067+ if (s3c24xx_serial_has_interrupt_mask(port))
53068+ return s3c64xx_serial_startup(port);
53069+
53070 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
53071 port, (unsigned long long)port->mapbase, port->membase);
53072
53073@@ -1169,10 +1174,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
53074 /* setup info for port */
53075 port->dev = &platdev->dev;
53076
53077- /* Startup sequence is different for s3c64xx and higher SoC's */
53078- if (s3c24xx_serial_has_interrupt_mask(port))
53079- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
53080-
53081 port->uartclk = 1;
53082
53083 if (cfg->uart_flags & UPF_CONS_FLOW) {
53084diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
53085index 984605b..e538330 100644
53086--- a/drivers/tty/serial/serial_core.c
53087+++ b/drivers/tty/serial/serial_core.c
53088@@ -1396,7 +1396,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53089 state = drv->state + tty->index;
53090 port = &state->port;
53091 spin_lock_irq(&port->lock);
53092- --port->count;
53093+ atomic_dec(&port->count);
53094 spin_unlock_irq(&port->lock);
53095 return;
53096 }
53097@@ -1406,7 +1406,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53098
53099 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
53100
53101- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
53102+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
53103 return;
53104
53105 /*
53106@@ -1530,7 +1530,7 @@ static void uart_hangup(struct tty_struct *tty)
53107 uart_flush_buffer(tty);
53108 uart_shutdown(tty, state);
53109 spin_lock_irqsave(&port->lock, flags);
53110- port->count = 0;
53111+ atomic_set(&port->count, 0);
53112 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
53113 spin_unlock_irqrestore(&port->lock, flags);
53114 tty_port_tty_set(port, NULL);
53115@@ -1617,7 +1617,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
53116 pr_debug("uart_open(%d) called\n", line);
53117
53118 spin_lock_irq(&port->lock);
53119- ++port->count;
53120+ atomic_inc(&port->count);
53121 spin_unlock_irq(&port->lock);
53122
53123 /*
53124diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
53125index b799170..87dafd5 100644
53126--- a/drivers/tty/synclink.c
53127+++ b/drivers/tty/synclink.c
53128@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53129
53130 if (debug_level >= DEBUG_LEVEL_INFO)
53131 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
53132- __FILE__,__LINE__, info->device_name, info->port.count);
53133+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53134
53135 if (tty_port_close_start(&info->port, tty, filp) == 0)
53136 goto cleanup;
53137@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53138 cleanup:
53139 if (debug_level >= DEBUG_LEVEL_INFO)
53140 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
53141- tty->driver->name, info->port.count);
53142+ tty->driver->name, atomic_read(&info->port.count));
53143
53144 } /* end of mgsl_close() */
53145
53146@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
53147
53148 mgsl_flush_buffer(tty);
53149 shutdown(info);
53150-
53151- info->port.count = 0;
53152+
53153+ atomic_set(&info->port.count, 0);
53154 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53155 info->port.tty = NULL;
53156
53157@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53158
53159 if (debug_level >= DEBUG_LEVEL_INFO)
53160 printk("%s(%d):block_til_ready before block on %s count=%d\n",
53161- __FILE__,__LINE__, tty->driver->name, port->count );
53162+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53163
53164 spin_lock_irqsave(&info->irq_spinlock, flags);
53165- port->count--;
53166+ atomic_dec(&port->count);
53167 spin_unlock_irqrestore(&info->irq_spinlock, flags);
53168 port->blocked_open++;
53169
53170@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53171
53172 if (debug_level >= DEBUG_LEVEL_INFO)
53173 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
53174- __FILE__,__LINE__, tty->driver->name, port->count );
53175+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53176
53177 tty_unlock(tty);
53178 schedule();
53179@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53180
53181 /* FIXME: Racy on hangup during close wait */
53182 if (!tty_hung_up_p(filp))
53183- port->count++;
53184+ atomic_inc(&port->count);
53185 port->blocked_open--;
53186
53187 if (debug_level >= DEBUG_LEVEL_INFO)
53188 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
53189- __FILE__,__LINE__, tty->driver->name, port->count );
53190+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53191
53192 if (!retval)
53193 port->flags |= ASYNC_NORMAL_ACTIVE;
53194@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53195
53196 if (debug_level >= DEBUG_LEVEL_INFO)
53197 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
53198- __FILE__,__LINE__,tty->driver->name, info->port.count);
53199+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53200
53201 /* If port is closing, signal caller to try again */
53202 if (info->port.flags & ASYNC_CLOSING){
53203@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53204 spin_unlock_irqrestore(&info->netlock, flags);
53205 goto cleanup;
53206 }
53207- info->port.count++;
53208+ atomic_inc(&info->port.count);
53209 spin_unlock_irqrestore(&info->netlock, flags);
53210
53211- if (info->port.count == 1) {
53212+ if (atomic_read(&info->port.count) == 1) {
53213 /* 1st open on this device, init hardware */
53214 retval = startup(info);
53215 if (retval < 0)
53216@@ -3442,8 +3442,8 @@ cleanup:
53217 if (retval) {
53218 if (tty->count == 1)
53219 info->port.tty = NULL; /* tty layer will release tty struct */
53220- if(info->port.count)
53221- info->port.count--;
53222+ if (atomic_read(&info->port.count))
53223+ atomic_dec(&info->port.count);
53224 }
53225
53226 return retval;
53227@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53228 unsigned short new_crctype;
53229
53230 /* return error if TTY interface open */
53231- if (info->port.count)
53232+ if (atomic_read(&info->port.count))
53233 return -EBUSY;
53234
53235 switch (encoding)
53236@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
53237
53238 /* arbitrate between network and tty opens */
53239 spin_lock_irqsave(&info->netlock, flags);
53240- if (info->port.count != 0 || info->netcount != 0) {
53241+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53242 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53243 spin_unlock_irqrestore(&info->netlock, flags);
53244 return -EBUSY;
53245@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53246 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53247
53248 /* return error if TTY interface open */
53249- if (info->port.count)
53250+ if (atomic_read(&info->port.count))
53251 return -EBUSY;
53252
53253 if (cmd != SIOCWANDEV)
53254diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
53255index 0e8c39b..e0cb171 100644
53256--- a/drivers/tty/synclink_gt.c
53257+++ b/drivers/tty/synclink_gt.c
53258@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53259 tty->driver_data = info;
53260 info->port.tty = tty;
53261
53262- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
53263+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
53264
53265 /* If port is closing, signal caller to try again */
53266 if (info->port.flags & ASYNC_CLOSING){
53267@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53268 mutex_unlock(&info->port.mutex);
53269 goto cleanup;
53270 }
53271- info->port.count++;
53272+ atomic_inc(&info->port.count);
53273 spin_unlock_irqrestore(&info->netlock, flags);
53274
53275- if (info->port.count == 1) {
53276+ if (atomic_read(&info->port.count) == 1) {
53277 /* 1st open on this device, init hardware */
53278 retval = startup(info);
53279 if (retval < 0) {
53280@@ -715,8 +715,8 @@ cleanup:
53281 if (retval) {
53282 if (tty->count == 1)
53283 info->port.tty = NULL; /* tty layer will release tty struct */
53284- if(info->port.count)
53285- info->port.count--;
53286+ if(atomic_read(&info->port.count))
53287+ atomic_dec(&info->port.count);
53288 }
53289
53290 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
53291@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53292
53293 if (sanity_check(info, tty->name, "close"))
53294 return;
53295- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
53296+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
53297
53298 if (tty_port_close_start(&info->port, tty, filp) == 0)
53299 goto cleanup;
53300@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53301 tty_port_close_end(&info->port, tty);
53302 info->port.tty = NULL;
53303 cleanup:
53304- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
53305+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
53306 }
53307
53308 static void hangup(struct tty_struct *tty)
53309@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
53310 shutdown(info);
53311
53312 spin_lock_irqsave(&info->port.lock, flags);
53313- info->port.count = 0;
53314+ atomic_set(&info->port.count, 0);
53315 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53316 info->port.tty = NULL;
53317 spin_unlock_irqrestore(&info->port.lock, flags);
53318@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53319 unsigned short new_crctype;
53320
53321 /* return error if TTY interface open */
53322- if (info->port.count)
53323+ if (atomic_read(&info->port.count))
53324 return -EBUSY;
53325
53326 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
53327@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
53328
53329 /* arbitrate between network and tty opens */
53330 spin_lock_irqsave(&info->netlock, flags);
53331- if (info->port.count != 0 || info->netcount != 0) {
53332+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53333 DBGINFO(("%s hdlc_open busy\n", dev->name));
53334 spin_unlock_irqrestore(&info->netlock, flags);
53335 return -EBUSY;
53336@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53337 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
53338
53339 /* return error if TTY interface open */
53340- if (info->port.count)
53341+ if (atomic_read(&info->port.count))
53342 return -EBUSY;
53343
53344 if (cmd != SIOCWANDEV)
53345@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
53346 if (port == NULL)
53347 continue;
53348 spin_lock(&port->lock);
53349- if ((port->port.count || port->netcount) &&
53350+ if ((atomic_read(&port->port.count) || port->netcount) &&
53351 port->pending_bh && !port->bh_running &&
53352 !port->bh_requested) {
53353 DBGISR(("%s bh queued\n", port->device_name));
53354@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53355 add_wait_queue(&port->open_wait, &wait);
53356
53357 spin_lock_irqsave(&info->lock, flags);
53358- port->count--;
53359+ atomic_dec(&port->count);
53360 spin_unlock_irqrestore(&info->lock, flags);
53361 port->blocked_open++;
53362
53363@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53364 remove_wait_queue(&port->open_wait, &wait);
53365
53366 if (!tty_hung_up_p(filp))
53367- port->count++;
53368+ atomic_inc(&port->count);
53369 port->blocked_open--;
53370
53371 if (!retval)
53372diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
53373index c3f9091..abe4601 100644
53374--- a/drivers/tty/synclinkmp.c
53375+++ b/drivers/tty/synclinkmp.c
53376@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53377
53378 if (debug_level >= DEBUG_LEVEL_INFO)
53379 printk("%s(%d):%s open(), old ref count = %d\n",
53380- __FILE__,__LINE__,tty->driver->name, info->port.count);
53381+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53382
53383 /* If port is closing, signal caller to try again */
53384 if (info->port.flags & ASYNC_CLOSING){
53385@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53386 spin_unlock_irqrestore(&info->netlock, flags);
53387 goto cleanup;
53388 }
53389- info->port.count++;
53390+ atomic_inc(&info->port.count);
53391 spin_unlock_irqrestore(&info->netlock, flags);
53392
53393- if (info->port.count == 1) {
53394+ if (atomic_read(&info->port.count) == 1) {
53395 /* 1st open on this device, init hardware */
53396 retval = startup(info);
53397 if (retval < 0)
53398@@ -796,8 +796,8 @@ cleanup:
53399 if (retval) {
53400 if (tty->count == 1)
53401 info->port.tty = NULL; /* tty layer will release tty struct */
53402- if(info->port.count)
53403- info->port.count--;
53404+ if(atomic_read(&info->port.count))
53405+ atomic_dec(&info->port.count);
53406 }
53407
53408 return retval;
53409@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53410
53411 if (debug_level >= DEBUG_LEVEL_INFO)
53412 printk("%s(%d):%s close() entry, count=%d\n",
53413- __FILE__,__LINE__, info->device_name, info->port.count);
53414+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53415
53416 if (tty_port_close_start(&info->port, tty, filp) == 0)
53417 goto cleanup;
53418@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53419 cleanup:
53420 if (debug_level >= DEBUG_LEVEL_INFO)
53421 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
53422- tty->driver->name, info->port.count);
53423+ tty->driver->name, atomic_read(&info->port.count));
53424 }
53425
53426 /* Called by tty_hangup() when a hangup is signaled.
53427@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
53428 shutdown(info);
53429
53430 spin_lock_irqsave(&info->port.lock, flags);
53431- info->port.count = 0;
53432+ atomic_set(&info->port.count, 0);
53433 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53434 info->port.tty = NULL;
53435 spin_unlock_irqrestore(&info->port.lock, flags);
53436@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53437 unsigned short new_crctype;
53438
53439 /* return error if TTY interface open */
53440- if (info->port.count)
53441+ if (atomic_read(&info->port.count))
53442 return -EBUSY;
53443
53444 switch (encoding)
53445@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
53446
53447 /* arbitrate between network and tty opens */
53448 spin_lock_irqsave(&info->netlock, flags);
53449- if (info->port.count != 0 || info->netcount != 0) {
53450+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53451 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53452 spin_unlock_irqrestore(&info->netlock, flags);
53453 return -EBUSY;
53454@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53455 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53456
53457 /* return error if TTY interface open */
53458- if (info->port.count)
53459+ if (atomic_read(&info->port.count))
53460 return -EBUSY;
53461
53462 if (cmd != SIOCWANDEV)
53463@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
53464 * do not request bottom half processing if the
53465 * device is not open in a normal mode.
53466 */
53467- if ( port && (port->port.count || port->netcount) &&
53468+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
53469 port->pending_bh && !port->bh_running &&
53470 !port->bh_requested ) {
53471 if ( debug_level >= DEBUG_LEVEL_ISR )
53472@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53473
53474 if (debug_level >= DEBUG_LEVEL_INFO)
53475 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
53476- __FILE__,__LINE__, tty->driver->name, port->count );
53477+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53478
53479 spin_lock_irqsave(&info->lock, flags);
53480- port->count--;
53481+ atomic_dec(&port->count);
53482 spin_unlock_irqrestore(&info->lock, flags);
53483 port->blocked_open++;
53484
53485@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53486
53487 if (debug_level >= DEBUG_LEVEL_INFO)
53488 printk("%s(%d):%s block_til_ready() count=%d\n",
53489- __FILE__,__LINE__, tty->driver->name, port->count );
53490+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53491
53492 tty_unlock(tty);
53493 schedule();
53494@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53495 set_current_state(TASK_RUNNING);
53496 remove_wait_queue(&port->open_wait, &wait);
53497 if (!tty_hung_up_p(filp))
53498- port->count++;
53499+ atomic_inc(&port->count);
53500 port->blocked_open--;
53501
53502 if (debug_level >= DEBUG_LEVEL_INFO)
53503 printk("%s(%d):%s block_til_ready() after, count=%d\n",
53504- __FILE__,__LINE__, tty->driver->name, port->count );
53505+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53506
53507 if (!retval)
53508 port->flags |= ASYNC_NORMAL_ACTIVE;
53509diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
53510index 42bad18..447d7a2 100644
53511--- a/drivers/tty/sysrq.c
53512+++ b/drivers/tty/sysrq.c
53513@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
53514 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
53515 size_t count, loff_t *ppos)
53516 {
53517- if (count) {
53518+ if (count && capable(CAP_SYS_ADMIN)) {
53519 char c;
53520
53521 if (get_user(c, buf))
53522diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
53523index 2bb4dfc..a7f6e86 100644
53524--- a/drivers/tty/tty_io.c
53525+++ b/drivers/tty/tty_io.c
53526@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
53527
53528 void tty_default_fops(struct file_operations *fops)
53529 {
53530- *fops = tty_fops;
53531+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
53532 }
53533
53534 /*
53535diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
53536index 3737f55..7cef448 100644
53537--- a/drivers/tty/tty_ldisc.c
53538+++ b/drivers/tty/tty_ldisc.c
53539@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
53540 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53541 tty_ldiscs[disc] = new_ldisc;
53542 new_ldisc->num = disc;
53543- new_ldisc->refcount = 0;
53544+ atomic_set(&new_ldisc->refcount, 0);
53545 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53546
53547 return ret;
53548@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
53549 return -EINVAL;
53550
53551 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53552- if (tty_ldiscs[disc]->refcount)
53553+ if (atomic_read(&tty_ldiscs[disc]->refcount))
53554 ret = -EBUSY;
53555 else
53556 tty_ldiscs[disc] = NULL;
53557@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
53558 if (ldops) {
53559 ret = ERR_PTR(-EAGAIN);
53560 if (try_module_get(ldops->owner)) {
53561- ldops->refcount++;
53562+ atomic_inc(&ldops->refcount);
53563 ret = ldops;
53564 }
53565 }
53566@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
53567 unsigned long flags;
53568
53569 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53570- ldops->refcount--;
53571+ atomic_dec(&ldops->refcount);
53572 module_put(ldops->owner);
53573 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53574 }
53575diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
53576index 40b31835..94d92ae 100644
53577--- a/drivers/tty/tty_port.c
53578+++ b/drivers/tty/tty_port.c
53579@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
53580 unsigned long flags;
53581
53582 spin_lock_irqsave(&port->lock, flags);
53583- port->count = 0;
53584+ atomic_set(&port->count, 0);
53585 port->flags &= ~ASYNC_NORMAL_ACTIVE;
53586 tty = port->tty;
53587 if (tty)
53588@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53589
53590 /* The port lock protects the port counts */
53591 spin_lock_irqsave(&port->lock, flags);
53592- port->count--;
53593+ atomic_dec(&port->count);
53594 port->blocked_open++;
53595 spin_unlock_irqrestore(&port->lock, flags);
53596
53597@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53598 we must not mess that up further */
53599 spin_lock_irqsave(&port->lock, flags);
53600 if (!tty_hung_up_p(filp))
53601- port->count++;
53602+ atomic_inc(&port->count);
53603 port->blocked_open--;
53604 if (retval == 0)
53605 port->flags |= ASYNC_NORMAL_ACTIVE;
53606@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
53607 return 0;
53608
53609 spin_lock_irqsave(&port->lock, flags);
53610- if (tty->count == 1 && port->count != 1) {
53611+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
53612 printk(KERN_WARNING
53613 "tty_port_close_start: tty->count = 1 port count = %d.\n",
53614- port->count);
53615- port->count = 1;
53616+ atomic_read(&port->count));
53617+ atomic_set(&port->count, 1);
53618 }
53619- if (--port->count < 0) {
53620+ if (atomic_dec_return(&port->count) < 0) {
53621 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
53622- port->count);
53623- port->count = 0;
53624+ atomic_read(&port->count));
53625+ atomic_set(&port->count, 0);
53626 }
53627
53628- if (port->count) {
53629+ if (atomic_read(&port->count)) {
53630 spin_unlock_irqrestore(&port->lock, flags);
53631 return 0;
53632 }
53633@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
53634 struct file *filp)
53635 {
53636 spin_lock_irq(&port->lock);
53637- ++port->count;
53638+ atomic_inc(&port->count);
53639 spin_unlock_irq(&port->lock);
53640 tty_port_tty_set(port, tty);
53641
53642diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
53643index 8a89f6e..50b32af 100644
53644--- a/drivers/tty/vt/keyboard.c
53645+++ b/drivers/tty/vt/keyboard.c
53646@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
53647 kbd->kbdmode == VC_OFF) &&
53648 value != KVAL(K_SAK))
53649 return; /* SAK is allowed even in raw mode */
53650+
53651+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53652+ {
53653+ void *func = fn_handler[value];
53654+ if (func == fn_show_state || func == fn_show_ptregs ||
53655+ func == fn_show_mem)
53656+ return;
53657+ }
53658+#endif
53659+
53660 fn_handler[value](vc);
53661 }
53662
53663@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53664 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
53665 return -EFAULT;
53666
53667- if (!capable(CAP_SYS_TTY_CONFIG))
53668- perm = 0;
53669-
53670 switch (cmd) {
53671 case KDGKBENT:
53672 /* Ensure another thread doesn't free it under us */
53673@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53674 spin_unlock_irqrestore(&kbd_event_lock, flags);
53675 return put_user(val, &user_kbe->kb_value);
53676 case KDSKBENT:
53677+ if (!capable(CAP_SYS_TTY_CONFIG))
53678+ perm = 0;
53679+
53680 if (!perm)
53681 return -EPERM;
53682 if (!i && v == K_NOSUCHMAP) {
53683@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53684 int i, j, k;
53685 int ret;
53686
53687- if (!capable(CAP_SYS_TTY_CONFIG))
53688- perm = 0;
53689-
53690 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
53691 if (!kbs) {
53692 ret = -ENOMEM;
53693@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53694 kfree(kbs);
53695 return ((p && *p) ? -EOVERFLOW : 0);
53696 case KDSKBSENT:
53697+ if (!capable(CAP_SYS_TTY_CONFIG))
53698+ perm = 0;
53699+
53700 if (!perm) {
53701 ret = -EPERM;
53702 goto reterr;
53703diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
53704index 6276f13..84f2449 100644
53705--- a/drivers/uio/uio.c
53706+++ b/drivers/uio/uio.c
53707@@ -25,6 +25,7 @@
53708 #include <linux/kobject.h>
53709 #include <linux/cdev.h>
53710 #include <linux/uio_driver.h>
53711+#include <asm/local.h>
53712
53713 #define UIO_MAX_DEVICES (1U << MINORBITS)
53714
53715@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
53716 struct device_attribute *attr, char *buf)
53717 {
53718 struct uio_device *idev = dev_get_drvdata(dev);
53719- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
53720+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
53721 }
53722 static DEVICE_ATTR_RO(event);
53723
53724@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
53725 {
53726 struct uio_device *idev = info->uio_dev;
53727
53728- atomic_inc(&idev->event);
53729+ atomic_inc_unchecked(&idev->event);
53730 wake_up_interruptible(&idev->wait);
53731 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
53732 }
53733@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
53734 }
53735
53736 listener->dev = idev;
53737- listener->event_count = atomic_read(&idev->event);
53738+ listener->event_count = atomic_read_unchecked(&idev->event);
53739 filep->private_data = listener;
53740
53741 if (idev->info->open) {
53742@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
53743 return -EIO;
53744
53745 poll_wait(filep, &idev->wait, wait);
53746- if (listener->event_count != atomic_read(&idev->event))
53747+ if (listener->event_count != atomic_read_unchecked(&idev->event))
53748 return POLLIN | POLLRDNORM;
53749 return 0;
53750 }
53751@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
53752 do {
53753 set_current_state(TASK_INTERRUPTIBLE);
53754
53755- event_count = atomic_read(&idev->event);
53756+ event_count = atomic_read_unchecked(&idev->event);
53757 if (event_count != listener->event_count) {
53758 if (copy_to_user(buf, &event_count, count))
53759 retval = -EFAULT;
53760@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
53761 static int uio_find_mem_index(struct vm_area_struct *vma)
53762 {
53763 struct uio_device *idev = vma->vm_private_data;
53764+ unsigned long size;
53765
53766 if (vma->vm_pgoff < MAX_UIO_MAPS) {
53767- if (idev->info->mem[vma->vm_pgoff].size == 0)
53768+ size = idev->info->mem[vma->vm_pgoff].size;
53769+ if (size == 0)
53770+ return -1;
53771+ if (vma->vm_end - vma->vm_start > size)
53772 return -1;
53773 return (int)vma->vm_pgoff;
53774 }
53775@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
53776 idev->owner = owner;
53777 idev->info = info;
53778 init_waitqueue_head(&idev->wait);
53779- atomic_set(&idev->event, 0);
53780+ atomic_set_unchecked(&idev->event, 0);
53781
53782 ret = uio_get_minor(idev);
53783 if (ret)
53784diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
53785index 813d4d3..a71934f 100644
53786--- a/drivers/usb/atm/cxacru.c
53787+++ b/drivers/usb/atm/cxacru.c
53788@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
53789 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
53790 if (ret < 2)
53791 return -EINVAL;
53792- if (index < 0 || index > 0x7f)
53793+ if (index > 0x7f)
53794 return -EINVAL;
53795 pos += tmp;
53796
53797diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
53798index dada014..1d0d517 100644
53799--- a/drivers/usb/atm/usbatm.c
53800+++ b/drivers/usb/atm/usbatm.c
53801@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53802 if (printk_ratelimit())
53803 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
53804 __func__, vpi, vci);
53805- atomic_inc(&vcc->stats->rx_err);
53806+ atomic_inc_unchecked(&vcc->stats->rx_err);
53807 return;
53808 }
53809
53810@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53811 if (length > ATM_MAX_AAL5_PDU) {
53812 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
53813 __func__, length, vcc);
53814- atomic_inc(&vcc->stats->rx_err);
53815+ atomic_inc_unchecked(&vcc->stats->rx_err);
53816 goto out;
53817 }
53818
53819@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53820 if (sarb->len < pdu_length) {
53821 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
53822 __func__, pdu_length, sarb->len, vcc);
53823- atomic_inc(&vcc->stats->rx_err);
53824+ atomic_inc_unchecked(&vcc->stats->rx_err);
53825 goto out;
53826 }
53827
53828 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
53829 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
53830 __func__, vcc);
53831- atomic_inc(&vcc->stats->rx_err);
53832+ atomic_inc_unchecked(&vcc->stats->rx_err);
53833 goto out;
53834 }
53835
53836@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53837 if (printk_ratelimit())
53838 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
53839 __func__, length);
53840- atomic_inc(&vcc->stats->rx_drop);
53841+ atomic_inc_unchecked(&vcc->stats->rx_drop);
53842 goto out;
53843 }
53844
53845@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53846
53847 vcc->push(vcc, skb);
53848
53849- atomic_inc(&vcc->stats->rx);
53850+ atomic_inc_unchecked(&vcc->stats->rx);
53851 out:
53852 skb_trim(sarb, 0);
53853 }
53854@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
53855 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
53856
53857 usbatm_pop(vcc, skb);
53858- atomic_inc(&vcc->stats->tx);
53859+ atomic_inc_unchecked(&vcc->stats->tx);
53860
53861 skb = skb_dequeue(&instance->sndqueue);
53862 }
53863@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
53864 if (!left--)
53865 return sprintf(page,
53866 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
53867- atomic_read(&atm_dev->stats.aal5.tx),
53868- atomic_read(&atm_dev->stats.aal5.tx_err),
53869- atomic_read(&atm_dev->stats.aal5.rx),
53870- atomic_read(&atm_dev->stats.aal5.rx_err),
53871- atomic_read(&atm_dev->stats.aal5.rx_drop));
53872+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
53873+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
53874+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
53875+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
53876+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
53877
53878 if (!left--) {
53879 if (instance->disconnected)
53880diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
53881index 2a3bbdf..91d72cf 100644
53882--- a/drivers/usb/core/devices.c
53883+++ b/drivers/usb/core/devices.c
53884@@ -126,7 +126,7 @@ static const char format_endpt[] =
53885 * time it gets called.
53886 */
53887 static struct device_connect_event {
53888- atomic_t count;
53889+ atomic_unchecked_t count;
53890 wait_queue_head_t wait;
53891 } device_event = {
53892 .count = ATOMIC_INIT(1),
53893@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
53894
53895 void usbfs_conn_disc_event(void)
53896 {
53897- atomic_add(2, &device_event.count);
53898+ atomic_add_unchecked(2, &device_event.count);
53899 wake_up(&device_event.wait);
53900 }
53901
53902@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
53903
53904 poll_wait(file, &device_event.wait, wait);
53905
53906- event_count = atomic_read(&device_event.count);
53907+ event_count = atomic_read_unchecked(&device_event.count);
53908 if (file->f_version != event_count) {
53909 file->f_version = event_count;
53910 return POLLIN | POLLRDNORM;
53911diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
53912index e500243..401300f 100644
53913--- a/drivers/usb/core/devio.c
53914+++ b/drivers/usb/core/devio.c
53915@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
53916 struct usb_dev_state *ps = file->private_data;
53917 struct usb_device *dev = ps->dev;
53918 ssize_t ret = 0;
53919- unsigned len;
53920+ size_t len;
53921 loff_t pos;
53922 int i;
53923
53924@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
53925 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
53926 struct usb_config_descriptor *config =
53927 (struct usb_config_descriptor *)dev->rawdescriptors[i];
53928- unsigned int length = le16_to_cpu(config->wTotalLength);
53929+ size_t length = le16_to_cpu(config->wTotalLength);
53930
53931 if (*ppos < pos + length) {
53932
53933 /* The descriptor may claim to be longer than it
53934 * really is. Here is the actual allocated length. */
53935- unsigned alloclen =
53936+ size_t alloclen =
53937 le16_to_cpu(dev->config[i].desc.wTotalLength);
53938
53939- len = length - (*ppos - pos);
53940+ len = length + pos - *ppos;
53941 if (len > nbytes)
53942 len = nbytes;
53943
53944 /* Simply don't write (skip over) unallocated parts */
53945 if (alloclen > (*ppos - pos)) {
53946- alloclen -= (*ppos - pos);
53947+ alloclen = alloclen + pos - *ppos;
53948 if (copy_to_user(buf,
53949 dev->rawdescriptors[i] + (*ppos - pos),
53950 min(len, alloclen))) {
53951diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
53952index 45a915c..09f9735 100644
53953--- a/drivers/usb/core/hcd.c
53954+++ b/drivers/usb/core/hcd.c
53955@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
53956 */
53957 usb_get_urb(urb);
53958 atomic_inc(&urb->use_count);
53959- atomic_inc(&urb->dev->urbnum);
53960+ atomic_inc_unchecked(&urb->dev->urbnum);
53961 usbmon_urb_submit(&hcd->self, urb);
53962
53963 /* NOTE requirements on root-hub callers (usbfs and the hub
53964@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
53965 urb->hcpriv = NULL;
53966 INIT_LIST_HEAD(&urb->urb_list);
53967 atomic_dec(&urb->use_count);
53968- atomic_dec(&urb->dev->urbnum);
53969+ atomic_dec_unchecked(&urb->dev->urbnum);
53970 if (atomic_read(&urb->reject))
53971 wake_up(&usb_kill_urb_queue);
53972 usb_put_urb(urb);
53973diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
53974index b4bfa3a..008f926 100644
53975--- a/drivers/usb/core/hub.c
53976+++ b/drivers/usb/core/hub.c
53977@@ -26,6 +26,7 @@
53978 #include <linux/mutex.h>
53979 #include <linux/random.h>
53980 #include <linux/pm_qos.h>
53981+#include <linux/grsecurity.h>
53982
53983 #include <asm/uaccess.h>
53984 #include <asm/byteorder.h>
53985@@ -4664,6 +4665,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
53986 goto done;
53987 return;
53988 }
53989+
53990+ if (gr_handle_new_usb())
53991+ goto done;
53992+
53993 if (hub_is_superspeed(hub->hdev))
53994 unit_load = 150;
53995 else
53996diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
53997index f368d20..0c30ac5 100644
53998--- a/drivers/usb/core/message.c
53999+++ b/drivers/usb/core/message.c
54000@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
54001 * Return: If successful, the number of bytes transferred. Otherwise, a negative
54002 * error number.
54003 */
54004-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54005+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54006 __u8 requesttype, __u16 value, __u16 index, void *data,
54007 __u16 size, int timeout)
54008 {
54009@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
54010 * If successful, 0. Otherwise a negative error number. The number of actual
54011 * bytes transferred will be stored in the @actual_length parameter.
54012 */
54013-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54014+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54015 void *data, int len, int *actual_length, int timeout)
54016 {
54017 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
54018@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
54019 * bytes transferred will be stored in the @actual_length parameter.
54020 *
54021 */
54022-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54023+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54024 void *data, int len, int *actual_length, int timeout)
54025 {
54026 struct urb *urb;
54027diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
54028index d269738..7340cd7 100644
54029--- a/drivers/usb/core/sysfs.c
54030+++ b/drivers/usb/core/sysfs.c
54031@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
54032 struct usb_device *udev;
54033
54034 udev = to_usb_device(dev);
54035- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
54036+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
54037 }
54038 static DEVICE_ATTR_RO(urbnum);
54039
54040diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
54041index b1fb9ae..4224885 100644
54042--- a/drivers/usb/core/usb.c
54043+++ b/drivers/usb/core/usb.c
54044@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
54045 set_dev_node(&dev->dev, dev_to_node(bus->controller));
54046 dev->state = USB_STATE_ATTACHED;
54047 dev->lpm_disable_count = 1;
54048- atomic_set(&dev->urbnum, 0);
54049+ atomic_set_unchecked(&dev->urbnum, 0);
54050
54051 INIT_LIST_HEAD(&dev->ep0.urb_list);
54052 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
54053diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
54054index 8cfc319..4868255 100644
54055--- a/drivers/usb/early/ehci-dbgp.c
54056+++ b/drivers/usb/early/ehci-dbgp.c
54057@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
54058
54059 #ifdef CONFIG_KGDB
54060 static struct kgdb_io kgdbdbgp_io_ops;
54061-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
54062+static struct kgdb_io kgdbdbgp_io_ops_console;
54063+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
54064 #else
54065 #define dbgp_kgdb_mode (0)
54066 #endif
54067@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
54068 .write_char = kgdbdbgp_write_char,
54069 };
54070
54071+static struct kgdb_io kgdbdbgp_io_ops_console = {
54072+ .name = "kgdbdbgp",
54073+ .read_char = kgdbdbgp_read_char,
54074+ .write_char = kgdbdbgp_write_char,
54075+ .is_console = 1
54076+};
54077+
54078 static int kgdbdbgp_wait_time;
54079
54080 static int __init kgdbdbgp_parse_config(char *str)
54081@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
54082 ptr++;
54083 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
54084 }
54085- kgdb_register_io_module(&kgdbdbgp_io_ops);
54086- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
54087+ if (early_dbgp_console.index != -1)
54088+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
54089+ else
54090+ kgdb_register_io_module(&kgdbdbgp_io_ops);
54091
54092 return 0;
54093 }
54094diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
54095index e971584..03495ab 100644
54096--- a/drivers/usb/gadget/function/f_uac1.c
54097+++ b/drivers/usb/gadget/function/f_uac1.c
54098@@ -14,6 +14,7 @@
54099 #include <linux/module.h>
54100 #include <linux/device.h>
54101 #include <linux/atomic.h>
54102+#include <linux/module.h>
54103
54104 #include "u_uac1.h"
54105
54106diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
54107index 491082a..dfd7d17 100644
54108--- a/drivers/usb/gadget/function/u_serial.c
54109+++ b/drivers/usb/gadget/function/u_serial.c
54110@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54111 spin_lock_irq(&port->port_lock);
54112
54113 /* already open? Great. */
54114- if (port->port.count) {
54115+ if (atomic_read(&port->port.count)) {
54116 status = 0;
54117- port->port.count++;
54118+ atomic_inc(&port->port.count);
54119
54120 /* currently opening/closing? wait ... */
54121 } else if (port->openclose) {
54122@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54123 tty->driver_data = port;
54124 port->port.tty = tty;
54125
54126- port->port.count = 1;
54127+ atomic_set(&port->port.count, 1);
54128 port->openclose = false;
54129
54130 /* if connected, start the I/O stream */
54131@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54132
54133 spin_lock_irq(&port->port_lock);
54134
54135- if (port->port.count != 1) {
54136- if (port->port.count == 0)
54137+ if (atomic_read(&port->port.count) != 1) {
54138+ if (atomic_read(&port->port.count) == 0)
54139 WARN_ON(1);
54140 else
54141- --port->port.count;
54142+ atomic_dec(&port->port.count);
54143 goto exit;
54144 }
54145
54146@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54147 * and sleep if necessary
54148 */
54149 port->openclose = true;
54150- port->port.count = 0;
54151+ atomic_set(&port->port.count, 0);
54152
54153 gser = port->port_usb;
54154 if (gser && gser->disconnect)
54155@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
54156 int cond;
54157
54158 spin_lock_irq(&port->port_lock);
54159- cond = (port->port.count == 0) && !port->openclose;
54160+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
54161 spin_unlock_irq(&port->port_lock);
54162 return cond;
54163 }
54164@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
54165 /* if it's already open, start I/O ... and notify the serial
54166 * protocol about open/close status (connect/disconnect).
54167 */
54168- if (port->port.count) {
54169+ if (atomic_read(&port->port.count)) {
54170 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
54171 gs_start_io(port);
54172 if (gser->connect)
54173@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
54174
54175 port->port_usb = NULL;
54176 gser->ioport = NULL;
54177- if (port->port.count > 0 || port->openclose) {
54178+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
54179 wake_up_interruptible(&port->drain_wait);
54180 if (port->port.tty)
54181 tty_hangup(port->port.tty);
54182@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
54183
54184 /* finally, free any unused/unusable I/O buffers */
54185 spin_lock_irqsave(&port->port_lock, flags);
54186- if (port->port.count == 0 && !port->openclose)
54187+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
54188 gs_buf_free(&port->port_write_buf);
54189 gs_free_requests(gser->out, &port->read_pool, NULL);
54190 gs_free_requests(gser->out, &port->read_queue, NULL);
54191diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
54192index 53842a1..2bef3b6 100644
54193--- a/drivers/usb/gadget/function/u_uac1.c
54194+++ b/drivers/usb/gadget/function/u_uac1.c
54195@@ -17,6 +17,7 @@
54196 #include <linux/ctype.h>
54197 #include <linux/random.h>
54198 #include <linux/syscalls.h>
54199+#include <linux/module.h>
54200
54201 #include "u_uac1.h"
54202
54203diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
54204index 118edb7..7a6415f 100644
54205--- a/drivers/usb/host/ehci-hub.c
54206+++ b/drivers/usb/host/ehci-hub.c
54207@@ -769,7 +769,7 @@ static struct urb *request_single_step_set_feature_urb(
54208 urb->transfer_flags = URB_DIR_IN;
54209 usb_get_urb(urb);
54210 atomic_inc(&urb->use_count);
54211- atomic_inc(&urb->dev->urbnum);
54212+ atomic_inc_unchecked(&urb->dev->urbnum);
54213 urb->setup_dma = dma_map_single(
54214 hcd->self.controller,
54215 urb->setup_packet,
54216@@ -836,7 +836,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
54217 urb->status = -EINPROGRESS;
54218 usb_get_urb(urb);
54219 atomic_inc(&urb->use_count);
54220- atomic_inc(&urb->dev->urbnum);
54221+ atomic_inc_unchecked(&urb->dev->urbnum);
54222 retval = submit_single_step_set_feature(hcd, urb, 0);
54223 if (!retval && !wait_for_completion_timeout(&done,
54224 msecs_to_jiffies(2000))) {
54225diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
54226index 1db0626..4948782 100644
54227--- a/drivers/usb/host/hwa-hc.c
54228+++ b/drivers/usb/host/hwa-hc.c
54229@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54230 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
54231 struct wahc *wa = &hwahc->wa;
54232 struct device *dev = &wa->usb_iface->dev;
54233- u8 mas_le[UWB_NUM_MAS/8];
54234+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
54235+
54236+ if (mas_le == NULL)
54237+ return -ENOMEM;
54238
54239 /* Set the stream index */
54240 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
54241@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54242 WUSB_REQ_SET_WUSB_MAS,
54243 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
54244 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
54245- mas_le, 32, USB_CTRL_SET_TIMEOUT);
54246+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
54247 if (result < 0)
54248 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
54249 out:
54250+ kfree(mas_le);
54251+
54252 return result;
54253 }
54254
54255diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
54256index b3d245e..99549ed 100644
54257--- a/drivers/usb/misc/appledisplay.c
54258+++ b/drivers/usb/misc/appledisplay.c
54259@@ -84,7 +84,7 @@ struct appledisplay {
54260 struct mutex sysfslock; /* concurrent read and write */
54261 };
54262
54263-static atomic_t count_displays = ATOMIC_INIT(0);
54264+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
54265 static struct workqueue_struct *wq;
54266
54267 static void appledisplay_complete(struct urb *urb)
54268@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
54269
54270 /* Register backlight device */
54271 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
54272- atomic_inc_return(&count_displays) - 1);
54273+ atomic_inc_return_unchecked(&count_displays) - 1);
54274 memset(&props, 0, sizeof(struct backlight_properties));
54275 props.type = BACKLIGHT_RAW;
54276 props.max_brightness = 0xff;
54277diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
54278index 29fa1c3..a57b08e 100644
54279--- a/drivers/usb/serial/console.c
54280+++ b/drivers/usb/serial/console.c
54281@@ -125,7 +125,7 @@ static int usb_console_setup(struct console *co, char *options)
54282
54283 info->port = port;
54284
54285- ++port->port.count;
54286+ atomic_inc(&port->port.count);
54287 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
54288 if (serial->type->set_termios) {
54289 /*
54290@@ -173,7 +173,7 @@ static int usb_console_setup(struct console *co, char *options)
54291 }
54292 /* Now that any required fake tty operations are completed restore
54293 * the tty port count */
54294- --port->port.count;
54295+ atomic_dec(&port->port.count);
54296 /* The console is special in terms of closing the device so
54297 * indicate this port is now acting as a system console. */
54298 port->port.console = 1;
54299@@ -186,7 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
54300 put_tty:
54301 tty_kref_put(tty);
54302 reset_open_count:
54303- port->port.count = 0;
54304+ atomic_set(&port->port.count, 0);
54305 usb_autopm_put_interface(serial->interface);
54306 error_get_interface:
54307 usb_serial_put(serial);
54308@@ -197,7 +197,7 @@ static int usb_console_setup(struct console *co, char *options)
54309 static void usb_console_write(struct console *co,
54310 const char *buf, unsigned count)
54311 {
54312- static struct usbcons_info *info = &usbcons_info;
54313+ struct usbcons_info *info = &usbcons_info;
54314 struct usb_serial_port *port = info->port;
54315 struct usb_serial *serial;
54316 int retval = -ENODEV;
54317diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
54318index 307e339..6aa97cb 100644
54319--- a/drivers/usb/storage/usb.h
54320+++ b/drivers/usb/storage/usb.h
54321@@ -63,7 +63,7 @@ struct us_unusual_dev {
54322 __u8 useProtocol;
54323 __u8 useTransport;
54324 int (*initFunction)(struct us_data *);
54325-};
54326+} __do_const;
54327
54328
54329 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
54330diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
54331index a863a98..d272795 100644
54332--- a/drivers/usb/usbip/vhci.h
54333+++ b/drivers/usb/usbip/vhci.h
54334@@ -83,7 +83,7 @@ struct vhci_hcd {
54335 unsigned resuming:1;
54336 unsigned long re_timeout;
54337
54338- atomic_t seqnum;
54339+ atomic_unchecked_t seqnum;
54340
54341 /*
54342 * NOTE:
54343diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
54344index 1ae9d40..c62604b 100644
54345--- a/drivers/usb/usbip/vhci_hcd.c
54346+++ b/drivers/usb/usbip/vhci_hcd.c
54347@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
54348
54349 spin_lock(&vdev->priv_lock);
54350
54351- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
54352+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54353 if (priv->seqnum == 0xffff)
54354 dev_info(&urb->dev->dev, "seqnum max\n");
54355
54356@@ -684,7 +684,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
54357 return -ENOMEM;
54358 }
54359
54360- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
54361+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54362 if (unlink->seqnum == 0xffff)
54363 pr_info("seqnum max\n");
54364
54365@@ -888,7 +888,7 @@ static int vhci_start(struct usb_hcd *hcd)
54366 vdev->rhport = rhport;
54367 }
54368
54369- atomic_set(&vhci->seqnum, 0);
54370+ atomic_set_unchecked(&vhci->seqnum, 0);
54371 spin_lock_init(&vhci->lock);
54372
54373 hcd->power_budget = 0; /* no limit */
54374diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
54375index 00e4a54..d676f85 100644
54376--- a/drivers/usb/usbip/vhci_rx.c
54377+++ b/drivers/usb/usbip/vhci_rx.c
54378@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
54379 if (!urb) {
54380 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
54381 pr_info("max seqnum %d\n",
54382- atomic_read(&the_controller->seqnum));
54383+ atomic_read_unchecked(&the_controller->seqnum));
54384 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
54385 return;
54386 }
54387diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
54388index edc7267..9f65ce2 100644
54389--- a/drivers/usb/wusbcore/wa-hc.h
54390+++ b/drivers/usb/wusbcore/wa-hc.h
54391@@ -240,7 +240,7 @@ struct wahc {
54392 spinlock_t xfer_list_lock;
54393 struct work_struct xfer_enqueue_work;
54394 struct work_struct xfer_error_work;
54395- atomic_t xfer_id_count;
54396+ atomic_unchecked_t xfer_id_count;
54397
54398 kernel_ulong_t quirks;
54399 };
54400@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
54401 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
54402 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
54403 wa->dto_in_use = 0;
54404- atomic_set(&wa->xfer_id_count, 1);
54405+ atomic_set_unchecked(&wa->xfer_id_count, 1);
54406 /* init the buf in URBs */
54407 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
54408 usb_init_urb(&(wa->buf_in_urbs[index]));
54409diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
54410index 69af4fd..da390d7 100644
54411--- a/drivers/usb/wusbcore/wa-xfer.c
54412+++ b/drivers/usb/wusbcore/wa-xfer.c
54413@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
54414 */
54415 static void wa_xfer_id_init(struct wa_xfer *xfer)
54416 {
54417- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
54418+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
54419 }
54420
54421 /* Return the xfer's ID. */
54422diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
54423index f018d8d..ccab63f 100644
54424--- a/drivers/vfio/vfio.c
54425+++ b/drivers/vfio/vfio.c
54426@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
54427 return 0;
54428
54429 /* TODO Prevent device auto probing */
54430- WARN("Device %s added to live group %d!\n", dev_name(dev),
54431+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
54432 iommu_group_id(group->iommu_group));
54433
54434 return 0;
54435diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
54436index 9484d56..d415d69 100644
54437--- a/drivers/vhost/net.c
54438+++ b/drivers/vhost/net.c
54439@@ -650,10 +650,8 @@ static void handle_rx(struct vhost_net *net)
54440 break;
54441 }
54442 /* TODO: Should check and handle checksum. */
54443-
54444- hdr.num_buffers = cpu_to_vhost16(vq, headcount);
54445 if (likely(mergeable) &&
54446- memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers,
54447+ memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
54448 offsetof(typeof(hdr), num_buffers),
54449 sizeof hdr.num_buffers)) {
54450 vq_err(vq, "Failed num_buffers write");
54451diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
54452index 3bb02c6..a01ff38 100644
54453--- a/drivers/vhost/vringh.c
54454+++ b/drivers/vhost/vringh.c
54455@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
54456 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
54457 {
54458 __virtio16 v = 0;
54459- int rc = get_user(v, (__force __virtio16 __user *)p);
54460+ int rc = get_user(v, (__force_user __virtio16 *)p);
54461 *val = vringh16_to_cpu(vrh, v);
54462 return rc;
54463 }
54464@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
54465 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
54466 {
54467 __virtio16 v = cpu_to_vringh16(vrh, val);
54468- return put_user(v, (__force __virtio16 __user *)p);
54469+ return put_user(v, (__force_user __virtio16 *)p);
54470 }
54471
54472 static inline int copydesc_user(void *dst, const void *src, size_t len)
54473 {
54474- return copy_from_user(dst, (__force void __user *)src, len) ?
54475+ return copy_from_user(dst, (void __force_user *)src, len) ?
54476 -EFAULT : 0;
54477 }
54478
54479@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
54480 const struct vring_used_elem *src,
54481 unsigned int num)
54482 {
54483- return copy_to_user((__force void __user *)dst, src,
54484+ return copy_to_user((void __force_user *)dst, src,
54485 sizeof(*dst) * num) ? -EFAULT : 0;
54486 }
54487
54488 static inline int xfer_from_user(void *src, void *dst, size_t len)
54489 {
54490- return copy_from_user(dst, (__force void __user *)src, len) ?
54491+ return copy_from_user(dst, (void __force_user *)src, len) ?
54492 -EFAULT : 0;
54493 }
54494
54495 static inline int xfer_to_user(void *dst, void *src, size_t len)
54496 {
54497- return copy_to_user((__force void __user *)dst, src, len) ?
54498+ return copy_to_user((void __force_user *)dst, src, len) ?
54499 -EFAULT : 0;
54500 }
54501
54502@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
54503 vrh->last_used_idx = 0;
54504 vrh->vring.num = num;
54505 /* vring expects kernel addresses, but only used via accessors. */
54506- vrh->vring.desc = (__force struct vring_desc *)desc;
54507- vrh->vring.avail = (__force struct vring_avail *)avail;
54508- vrh->vring.used = (__force struct vring_used *)used;
54509+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
54510+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
54511+ vrh->vring.used = (__force_kernel struct vring_used *)used;
54512 return 0;
54513 }
54514 EXPORT_SYMBOL(vringh_init_user);
54515@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
54516
54517 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
54518 {
54519- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
54520+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
54521 return 0;
54522 }
54523
54524diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
54525index 84a110a..96312c3 100644
54526--- a/drivers/video/backlight/kb3886_bl.c
54527+++ b/drivers/video/backlight/kb3886_bl.c
54528@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
54529 static unsigned long kb3886bl_flags;
54530 #define KB3886BL_SUSPENDED 0x01
54531
54532-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
54533+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
54534 {
54535 .ident = "Sahara Touch-iT",
54536 .matches = {
54537diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
54538index 1b0b233..6f34c2c 100644
54539--- a/drivers/video/fbdev/arcfb.c
54540+++ b/drivers/video/fbdev/arcfb.c
54541@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
54542 return -ENOSPC;
54543
54544 err = 0;
54545- if ((count + p) > fbmemlength) {
54546+ if (count > (fbmemlength - p)) {
54547 count = fbmemlength - p;
54548 err = -ENOSPC;
54549 }
54550diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
54551index aedf2fb..47c9aca 100644
54552--- a/drivers/video/fbdev/aty/aty128fb.c
54553+++ b/drivers/video/fbdev/aty/aty128fb.c
54554@@ -149,7 +149,7 @@ enum {
54555 };
54556
54557 /* Must match above enum */
54558-static char * const r128_family[] = {
54559+static const char * const r128_family[] = {
54560 "AGP",
54561 "PCI",
54562 "PRO AGP",
54563diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
54564index 37ec09b..98f8862 100644
54565--- a/drivers/video/fbdev/aty/atyfb_base.c
54566+++ b/drivers/video/fbdev/aty/atyfb_base.c
54567@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
54568 par->accel_flags = var->accel_flags; /* hack */
54569
54570 if (var->accel_flags) {
54571- info->fbops->fb_sync = atyfb_sync;
54572+ pax_open_kernel();
54573+ *(void **)&info->fbops->fb_sync = atyfb_sync;
54574+ pax_close_kernel();
54575 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54576 } else {
54577- info->fbops->fb_sync = NULL;
54578+ pax_open_kernel();
54579+ *(void **)&info->fbops->fb_sync = NULL;
54580+ pax_close_kernel();
54581 info->flags |= FBINFO_HWACCEL_DISABLED;
54582 }
54583
54584diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
54585index 2fa0317..4983f2a 100644
54586--- a/drivers/video/fbdev/aty/mach64_cursor.c
54587+++ b/drivers/video/fbdev/aty/mach64_cursor.c
54588@@ -8,6 +8,7 @@
54589 #include "../core/fb_draw.h"
54590
54591 #include <asm/io.h>
54592+#include <asm/pgtable.h>
54593
54594 #ifdef __sparc__
54595 #include <asm/fbio.h>
54596@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
54597 info->sprite.buf_align = 16; /* and 64 lines tall. */
54598 info->sprite.flags = FB_PIXMAP_IO;
54599
54600- info->fbops->fb_cursor = atyfb_cursor;
54601+ pax_open_kernel();
54602+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
54603+ pax_close_kernel();
54604
54605 return 0;
54606 }
54607diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
54608index d6cab1f..112f680 100644
54609--- a/drivers/video/fbdev/core/fb_defio.c
54610+++ b/drivers/video/fbdev/core/fb_defio.c
54611@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
54612
54613 BUG_ON(!fbdefio);
54614 mutex_init(&fbdefio->lock);
54615- info->fbops->fb_mmap = fb_deferred_io_mmap;
54616+ pax_open_kernel();
54617+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
54618+ pax_close_kernel();
54619 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
54620 INIT_LIST_HEAD(&fbdefio->pagelist);
54621 if (fbdefio->delay == 0) /* set a default of 1 s */
54622@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
54623 page->mapping = NULL;
54624 }
54625
54626- info->fbops->fb_mmap = NULL;
54627+ *(void **)&info->fbops->fb_mmap = NULL;
54628 mutex_destroy(&fbdefio->lock);
54629 }
54630 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
54631diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
54632index 0705d88..d9429bf 100644
54633--- a/drivers/video/fbdev/core/fbmem.c
54634+++ b/drivers/video/fbdev/core/fbmem.c
54635@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
54636 __u32 data;
54637 int err;
54638
54639- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
54640+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
54641
54642 data = (__u32) (unsigned long) fix->smem_start;
54643 err |= put_user(data, &fix32->smem_start);
54644diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
54645index 4254336..282567e 100644
54646--- a/drivers/video/fbdev/hyperv_fb.c
54647+++ b/drivers/video/fbdev/hyperv_fb.c
54648@@ -240,7 +240,7 @@ static uint screen_fb_size;
54649 static inline int synthvid_send(struct hv_device *hdev,
54650 struct synthvid_msg *msg)
54651 {
54652- static atomic64_t request_id = ATOMIC64_INIT(0);
54653+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
54654 int ret;
54655
54656 msg->pipe_hdr.type = PIPE_MSG_DATA;
54657@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
54658
54659 ret = vmbus_sendpacket(hdev->channel, msg,
54660 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
54661- atomic64_inc_return(&request_id),
54662+ atomic64_inc_return_unchecked(&request_id),
54663 VM_PKT_DATA_INBAND, 0);
54664
54665 if (ret)
54666diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
54667index 7672d2e..b56437f 100644
54668--- a/drivers/video/fbdev/i810/i810_accel.c
54669+++ b/drivers/video/fbdev/i810/i810_accel.c
54670@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
54671 }
54672 }
54673 printk("ringbuffer lockup!!!\n");
54674+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
54675 i810_report_error(mmio);
54676 par->dev_flags |= LOCKUP;
54677 info->pixmap.scan_align = 1;
54678diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54679index a01147f..5d896f8 100644
54680--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54681+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54682@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
54683
54684 #ifdef CONFIG_FB_MATROX_MYSTIQUE
54685 struct matrox_switch matrox_mystique = {
54686- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
54687+ .preinit = MGA1064_preinit,
54688+ .reset = MGA1064_reset,
54689+ .init = MGA1064_init,
54690+ .restore = MGA1064_restore,
54691 };
54692 EXPORT_SYMBOL(matrox_mystique);
54693 #endif
54694
54695 #ifdef CONFIG_FB_MATROX_G
54696 struct matrox_switch matrox_G100 = {
54697- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
54698+ .preinit = MGAG100_preinit,
54699+ .reset = MGAG100_reset,
54700+ .init = MGAG100_init,
54701+ .restore = MGAG100_restore,
54702 };
54703 EXPORT_SYMBOL(matrox_G100);
54704 #endif
54705diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54706index 195ad7c..09743fc 100644
54707--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54708+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54709@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
54710 }
54711
54712 struct matrox_switch matrox_millennium = {
54713- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
54714+ .preinit = Ti3026_preinit,
54715+ .reset = Ti3026_reset,
54716+ .init = Ti3026_init,
54717+ .restore = Ti3026_restore
54718 };
54719 EXPORT_SYMBOL(matrox_millennium);
54720 #endif
54721diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54722index fe92eed..106e085 100644
54723--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54724+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54725@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
54726 struct mb862xxfb_par *par = info->par;
54727
54728 if (info->var.bits_per_pixel == 32) {
54729- info->fbops->fb_fillrect = cfb_fillrect;
54730- info->fbops->fb_copyarea = cfb_copyarea;
54731- info->fbops->fb_imageblit = cfb_imageblit;
54732+ pax_open_kernel();
54733+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54734+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54735+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54736+ pax_close_kernel();
54737 } else {
54738 outreg(disp, GC_L0EM, 3);
54739- info->fbops->fb_fillrect = mb86290fb_fillrect;
54740- info->fbops->fb_copyarea = mb86290fb_copyarea;
54741- info->fbops->fb_imageblit = mb86290fb_imageblit;
54742+ pax_open_kernel();
54743+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
54744+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
54745+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
54746+ pax_close_kernel();
54747 }
54748 outreg(draw, GDC_REG_DRAW_BASE, 0);
54749 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
54750diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
54751index def0412..fed6529 100644
54752--- a/drivers/video/fbdev/nvidia/nvidia.c
54753+++ b/drivers/video/fbdev/nvidia/nvidia.c
54754@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
54755 info->fix.line_length = (info->var.xres_virtual *
54756 info->var.bits_per_pixel) >> 3;
54757 if (info->var.accel_flags) {
54758- info->fbops->fb_imageblit = nvidiafb_imageblit;
54759- info->fbops->fb_fillrect = nvidiafb_fillrect;
54760- info->fbops->fb_copyarea = nvidiafb_copyarea;
54761- info->fbops->fb_sync = nvidiafb_sync;
54762+ pax_open_kernel();
54763+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
54764+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
54765+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
54766+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
54767+ pax_close_kernel();
54768 info->pixmap.scan_align = 4;
54769 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54770 info->flags |= FBINFO_READS_FAST;
54771 NVResetGraphics(info);
54772 } else {
54773- info->fbops->fb_imageblit = cfb_imageblit;
54774- info->fbops->fb_fillrect = cfb_fillrect;
54775- info->fbops->fb_copyarea = cfb_copyarea;
54776- info->fbops->fb_sync = NULL;
54777+ pax_open_kernel();
54778+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54779+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54780+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54781+ *(void **)&info->fbops->fb_sync = NULL;
54782+ pax_close_kernel();
54783 info->pixmap.scan_align = 1;
54784 info->flags |= FBINFO_HWACCEL_DISABLED;
54785 info->flags &= ~FBINFO_READS_FAST;
54786@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
54787 info->pixmap.size = 8 * 1024;
54788 info->pixmap.flags = FB_PIXMAP_SYSTEM;
54789
54790- if (!hwcur)
54791- info->fbops->fb_cursor = NULL;
54792+ if (!hwcur) {
54793+ pax_open_kernel();
54794+ *(void **)&info->fbops->fb_cursor = NULL;
54795+ pax_close_kernel();
54796+ }
54797
54798 info->var.accel_flags = (!noaccel);
54799
54800diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
54801index 2412a0d..294215b 100644
54802--- a/drivers/video/fbdev/omap2/dss/display.c
54803+++ b/drivers/video/fbdev/omap2/dss/display.c
54804@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
54805 if (dssdev->name == NULL)
54806 dssdev->name = dssdev->alias;
54807
54808+ pax_open_kernel();
54809 if (drv && drv->get_resolution == NULL)
54810- drv->get_resolution = omapdss_default_get_resolution;
54811+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
54812 if (drv && drv->get_recommended_bpp == NULL)
54813- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54814+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54815 if (drv && drv->get_timings == NULL)
54816- drv->get_timings = omapdss_default_get_timings;
54817+ *(void **)&drv->get_timings = omapdss_default_get_timings;
54818+ pax_close_kernel();
54819
54820 mutex_lock(&panel_list_mutex);
54821 list_add_tail(&dssdev->panel_list, &panel_list);
54822diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
54823index 83433cb..71e9b98 100644
54824--- a/drivers/video/fbdev/s1d13xxxfb.c
54825+++ b/drivers/video/fbdev/s1d13xxxfb.c
54826@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
54827
54828 switch(prod_id) {
54829 case S1D13506_PROD_ID: /* activate acceleration */
54830- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54831- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54832+ pax_open_kernel();
54833+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54834+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54835+ pax_close_kernel();
54836 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
54837 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
54838 break;
54839diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
54840index d3013cd..95b8285 100644
54841--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
54842+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
54843@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
54844 }
54845
54846 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
54847- lcdc_sys_write_index,
54848- lcdc_sys_write_data,
54849- lcdc_sys_read_data,
54850+ .write_index = lcdc_sys_write_index,
54851+ .write_data = lcdc_sys_write_data,
54852+ .read_data = lcdc_sys_read_data,
54853 };
54854
54855 static int sh_mobile_lcdc_sginit(struct fb_info *info,
54856diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
54857index 9279e5f..d5f5276 100644
54858--- a/drivers/video/fbdev/smscufx.c
54859+++ b/drivers/video/fbdev/smscufx.c
54860@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
54861 fb_deferred_io_cleanup(info);
54862 kfree(info->fbdefio);
54863 info->fbdefio = NULL;
54864- info->fbops->fb_mmap = ufx_ops_mmap;
54865+ pax_open_kernel();
54866+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
54867+ pax_close_kernel();
54868 }
54869
54870 pr_debug("released /dev/fb%d user=%d count=%d",
54871diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
54872index ff2b873..626a8d5 100644
54873--- a/drivers/video/fbdev/udlfb.c
54874+++ b/drivers/video/fbdev/udlfb.c
54875@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
54876 dlfb_urb_completion(urb);
54877
54878 error:
54879- atomic_add(bytes_sent, &dev->bytes_sent);
54880- atomic_add(bytes_identical, &dev->bytes_identical);
54881- atomic_add(width*height*2, &dev->bytes_rendered);
54882+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54883+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54884+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
54885 end_cycles = get_cycles();
54886- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54887+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54888 >> 10)), /* Kcycles */
54889 &dev->cpu_kcycles_used);
54890
54891@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
54892 dlfb_urb_completion(urb);
54893
54894 error:
54895- atomic_add(bytes_sent, &dev->bytes_sent);
54896- atomic_add(bytes_identical, &dev->bytes_identical);
54897- atomic_add(bytes_rendered, &dev->bytes_rendered);
54898+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54899+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54900+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
54901 end_cycles = get_cycles();
54902- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54903+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54904 >> 10)), /* Kcycles */
54905 &dev->cpu_kcycles_used);
54906 }
54907@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
54908 fb_deferred_io_cleanup(info);
54909 kfree(info->fbdefio);
54910 info->fbdefio = NULL;
54911- info->fbops->fb_mmap = dlfb_ops_mmap;
54912+ pax_open_kernel();
54913+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
54914+ pax_close_kernel();
54915 }
54916
54917 pr_warn("released /dev/fb%d user=%d count=%d\n",
54918@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
54919 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54920 struct dlfb_data *dev = fb_info->par;
54921 return snprintf(buf, PAGE_SIZE, "%u\n",
54922- atomic_read(&dev->bytes_rendered));
54923+ atomic_read_unchecked(&dev->bytes_rendered));
54924 }
54925
54926 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
54927@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
54928 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54929 struct dlfb_data *dev = fb_info->par;
54930 return snprintf(buf, PAGE_SIZE, "%u\n",
54931- atomic_read(&dev->bytes_identical));
54932+ atomic_read_unchecked(&dev->bytes_identical));
54933 }
54934
54935 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
54936@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
54937 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54938 struct dlfb_data *dev = fb_info->par;
54939 return snprintf(buf, PAGE_SIZE, "%u\n",
54940- atomic_read(&dev->bytes_sent));
54941+ atomic_read_unchecked(&dev->bytes_sent));
54942 }
54943
54944 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
54945@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
54946 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54947 struct dlfb_data *dev = fb_info->par;
54948 return snprintf(buf, PAGE_SIZE, "%u\n",
54949- atomic_read(&dev->cpu_kcycles_used));
54950+ atomic_read_unchecked(&dev->cpu_kcycles_used));
54951 }
54952
54953 static ssize_t edid_show(
54954@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
54955 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54956 struct dlfb_data *dev = fb_info->par;
54957
54958- atomic_set(&dev->bytes_rendered, 0);
54959- atomic_set(&dev->bytes_identical, 0);
54960- atomic_set(&dev->bytes_sent, 0);
54961- atomic_set(&dev->cpu_kcycles_used, 0);
54962+ atomic_set_unchecked(&dev->bytes_rendered, 0);
54963+ atomic_set_unchecked(&dev->bytes_identical, 0);
54964+ atomic_set_unchecked(&dev->bytes_sent, 0);
54965+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
54966
54967 return count;
54968 }
54969diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
54970index d32d1c4..46722e6 100644
54971--- a/drivers/video/fbdev/uvesafb.c
54972+++ b/drivers/video/fbdev/uvesafb.c
54973@@ -19,6 +19,7 @@
54974 #include <linux/io.h>
54975 #include <linux/mutex.h>
54976 #include <linux/slab.h>
54977+#include <linux/moduleloader.h>
54978 #include <video/edid.h>
54979 #include <video/uvesafb.h>
54980 #ifdef CONFIG_X86
54981@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
54982 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
54983 par->pmi_setpal = par->ypan = 0;
54984 } else {
54985+
54986+#ifdef CONFIG_PAX_KERNEXEC
54987+#ifdef CONFIG_MODULES
54988+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
54989+#endif
54990+ if (!par->pmi_code) {
54991+ par->pmi_setpal = par->ypan = 0;
54992+ return 0;
54993+ }
54994+#endif
54995+
54996 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
54997 + task->t.regs.edi);
54998+
54999+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55000+ pax_open_kernel();
55001+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
55002+ pax_close_kernel();
55003+
55004+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
55005+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
55006+#else
55007 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
55008 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
55009+#endif
55010+
55011 printk(KERN_INFO "uvesafb: protected mode interface info at "
55012 "%04x:%04x\n",
55013 (u16)task->t.regs.es, (u16)task->t.regs.edi);
55014@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
55015 par->ypan = ypan;
55016
55017 if (par->pmi_setpal || par->ypan) {
55018+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
55019 if (__supported_pte_mask & _PAGE_NX) {
55020 par->pmi_setpal = par->ypan = 0;
55021 printk(KERN_WARNING "uvesafb: NX protection is active, "
55022 "better not use the PMI.\n");
55023- } else {
55024+ } else
55025+#endif
55026 uvesafb_vbe_getpmi(task, par);
55027- }
55028 }
55029 #else
55030 /* The protected mode interface is not available on non-x86. */
55031@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55032 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
55033
55034 /* Disable blanking if the user requested so. */
55035- if (!blank)
55036- info->fbops->fb_blank = NULL;
55037+ if (!blank) {
55038+ pax_open_kernel();
55039+ *(void **)&info->fbops->fb_blank = NULL;
55040+ pax_close_kernel();
55041+ }
55042
55043 /*
55044 * Find out how much IO memory is required for the mode with
55045@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55046 info->flags = FBINFO_FLAG_DEFAULT |
55047 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
55048
55049- if (!par->ypan)
55050- info->fbops->fb_pan_display = NULL;
55051+ if (!par->ypan) {
55052+ pax_open_kernel();
55053+ *(void **)&info->fbops->fb_pan_display = NULL;
55054+ pax_close_kernel();
55055+ }
55056 }
55057
55058 static void uvesafb_init_mtrr(struct fb_info *info)
55059@@ -1786,6 +1816,11 @@ out_mode:
55060 out:
55061 kfree(par->vbe_modes);
55062
55063+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55064+ if (par->pmi_code)
55065+ module_memfree_exec(par->pmi_code);
55066+#endif
55067+
55068 framebuffer_release(info);
55069 return err;
55070 }
55071@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
55072 kfree(par->vbe_state_orig);
55073 kfree(par->vbe_state_saved);
55074
55075+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55076+ if (par->pmi_code)
55077+ module_memfree_exec(par->pmi_code);
55078+#endif
55079+
55080 framebuffer_release(info);
55081 }
55082 return 0;
55083diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
55084index d79a0ac..2d0c3d4 100644
55085--- a/drivers/video/fbdev/vesafb.c
55086+++ b/drivers/video/fbdev/vesafb.c
55087@@ -9,6 +9,7 @@
55088 */
55089
55090 #include <linux/module.h>
55091+#include <linux/moduleloader.h>
55092 #include <linux/kernel.h>
55093 #include <linux/errno.h>
55094 #include <linux/string.h>
55095@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
55096 static int vram_total; /* Set total amount of memory */
55097 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
55098 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
55099-static void (*pmi_start)(void) __read_mostly;
55100-static void (*pmi_pal) (void) __read_mostly;
55101+static void (*pmi_start)(void) __read_only;
55102+static void (*pmi_pal) (void) __read_only;
55103 static int depth __read_mostly;
55104 static int vga_compat __read_mostly;
55105 /* --------------------------------------------------------------------- */
55106@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
55107 unsigned int size_remap;
55108 unsigned int size_total;
55109 char *option = NULL;
55110+ void *pmi_code = NULL;
55111
55112 /* ignore error return of fb_get_options */
55113 fb_get_options("vesafb", &option);
55114@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
55115 size_remap = size_total;
55116 vesafb_fix.smem_len = size_remap;
55117
55118-#ifndef __i386__
55119- screen_info.vesapm_seg = 0;
55120-#endif
55121-
55122 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
55123 printk(KERN_WARNING
55124 "vesafb: cannot reserve video memory at 0x%lx\n",
55125@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
55126 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
55127 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
55128
55129+#ifdef __i386__
55130+
55131+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55132+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
55133+ if (!pmi_code)
55134+#elif !defined(CONFIG_PAX_KERNEXEC)
55135+ if (0)
55136+#endif
55137+
55138+#endif
55139+ screen_info.vesapm_seg = 0;
55140+
55141 if (screen_info.vesapm_seg) {
55142- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
55143- screen_info.vesapm_seg,screen_info.vesapm_off);
55144+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
55145+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
55146 }
55147
55148 if (screen_info.vesapm_seg < 0xc000)
55149@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
55150
55151 if (ypan || pmi_setpal) {
55152 unsigned short *pmi_base;
55153+
55154 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
55155- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
55156- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
55157+
55158+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55159+ pax_open_kernel();
55160+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
55161+#else
55162+ pmi_code = pmi_base;
55163+#endif
55164+
55165+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
55166+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
55167+
55168+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55169+ pmi_start = ktva_ktla(pmi_start);
55170+ pmi_pal = ktva_ktla(pmi_pal);
55171+ pax_close_kernel();
55172+#endif
55173+
55174 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
55175 if (pmi_base[3]) {
55176 printk(KERN_INFO "vesafb: pmi: ports = ");
55177@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
55178 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
55179 (ypan ? FBINFO_HWACCEL_YPAN : 0);
55180
55181- if (!ypan)
55182- info->fbops->fb_pan_display = NULL;
55183+ if (!ypan) {
55184+ pax_open_kernel();
55185+ *(void **)&info->fbops->fb_pan_display = NULL;
55186+ pax_close_kernel();
55187+ }
55188
55189 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
55190 err = -ENOMEM;
55191@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
55192 fb_info(info, "%s frame buffer device\n", info->fix.id);
55193 return 0;
55194 err:
55195+
55196+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55197+ module_memfree_exec(pmi_code);
55198+#endif
55199+
55200 if (info->screen_base)
55201 iounmap(info->screen_base);
55202 framebuffer_release(info);
55203diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
55204index 88714ae..16c2e11 100644
55205--- a/drivers/video/fbdev/via/via_clock.h
55206+++ b/drivers/video/fbdev/via/via_clock.h
55207@@ -56,7 +56,7 @@ struct via_clock {
55208
55209 void (*set_engine_pll_state)(u8 state);
55210 void (*set_engine_pll)(struct via_pll_config config);
55211-};
55212+} __no_const;
55213
55214
55215 static inline u32 get_pll_internal_frequency(u32 ref_freq,
55216diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
55217index 3c14e43..2630570 100644
55218--- a/drivers/video/logo/logo_linux_clut224.ppm
55219+++ b/drivers/video/logo/logo_linux_clut224.ppm
55220@@ -2,1603 +2,1123 @@ P3
55221 # Standard 224-color Linux logo
55222 80 80
55223 255
55224- 0 0 0 0 0 0 0 0 0 0 0 0
55225- 0 0 0 0 0 0 0 0 0 0 0 0
55226- 0 0 0 0 0 0 0 0 0 0 0 0
55227- 0 0 0 0 0 0 0 0 0 0 0 0
55228- 0 0 0 0 0 0 0 0 0 0 0 0
55229- 0 0 0 0 0 0 0 0 0 0 0 0
55230- 0 0 0 0 0 0 0 0 0 0 0 0
55231- 0 0 0 0 0 0 0 0 0 0 0 0
55232- 0 0 0 0 0 0 0 0 0 0 0 0
55233- 6 6 6 6 6 6 10 10 10 10 10 10
55234- 10 10 10 6 6 6 6 6 6 6 6 6
55235- 0 0 0 0 0 0 0 0 0 0 0 0
55236- 0 0 0 0 0 0 0 0 0 0 0 0
55237- 0 0 0 0 0 0 0 0 0 0 0 0
55238- 0 0 0 0 0 0 0 0 0 0 0 0
55239- 0 0 0 0 0 0 0 0 0 0 0 0
55240- 0 0 0 0 0 0 0 0 0 0 0 0
55241- 0 0 0 0 0 0 0 0 0 0 0 0
55242- 0 0 0 0 0 0 0 0 0 0 0 0
55243- 0 0 0 0 0 0 0 0 0 0 0 0
55244- 0 0 0 0 0 0 0 0 0 0 0 0
55245- 0 0 0 0 0 0 0 0 0 0 0 0
55246- 0 0 0 0 0 0 0 0 0 0 0 0
55247- 0 0 0 0 0 0 0 0 0 0 0 0
55248- 0 0 0 0 0 0 0 0 0 0 0 0
55249- 0 0 0 0 0 0 0 0 0 0 0 0
55250- 0 0 0 0 0 0 0 0 0 0 0 0
55251- 0 0 0 0 0 0 0 0 0 0 0 0
55252- 0 0 0 6 6 6 10 10 10 14 14 14
55253- 22 22 22 26 26 26 30 30 30 34 34 34
55254- 30 30 30 30 30 30 26 26 26 18 18 18
55255- 14 14 14 10 10 10 6 6 6 0 0 0
55256- 0 0 0 0 0 0 0 0 0 0 0 0
55257- 0 0 0 0 0 0 0 0 0 0 0 0
55258- 0 0 0 0 0 0 0 0 0 0 0 0
55259- 0 0 0 0 0 0 0 0 0 0 0 0
55260- 0 0 0 0 0 0 0 0 0 0 0 0
55261- 0 0 0 0 0 0 0 0 0 0 0 0
55262- 0 0 0 0 0 0 0 0 0 0 0 0
55263- 0 0 0 0 0 0 0 0 0 0 0 0
55264- 0 0 0 0 0 0 0 0 0 0 0 0
55265- 0 0 0 0 0 1 0 0 1 0 0 0
55266- 0 0 0 0 0 0 0 0 0 0 0 0
55267- 0 0 0 0 0 0 0 0 0 0 0 0
55268- 0 0 0 0 0 0 0 0 0 0 0 0
55269- 0 0 0 0 0 0 0 0 0 0 0 0
55270- 0 0 0 0 0 0 0 0 0 0 0 0
55271- 0 0 0 0 0 0 0 0 0 0 0 0
55272- 6 6 6 14 14 14 26 26 26 42 42 42
55273- 54 54 54 66 66 66 78 78 78 78 78 78
55274- 78 78 78 74 74 74 66 66 66 54 54 54
55275- 42 42 42 26 26 26 18 18 18 10 10 10
55276- 6 6 6 0 0 0 0 0 0 0 0 0
55277- 0 0 0 0 0 0 0 0 0 0 0 0
55278- 0 0 0 0 0 0 0 0 0 0 0 0
55279- 0 0 0 0 0 0 0 0 0 0 0 0
55280- 0 0 0 0 0 0 0 0 0 0 0 0
55281- 0 0 0 0 0 0 0 0 0 0 0 0
55282- 0 0 0 0 0 0 0 0 0 0 0 0
55283- 0 0 0 0 0 0 0 0 0 0 0 0
55284- 0 0 0 0 0 0 0 0 0 0 0 0
55285- 0 0 1 0 0 0 0 0 0 0 0 0
55286- 0 0 0 0 0 0 0 0 0 0 0 0
55287- 0 0 0 0 0 0 0 0 0 0 0 0
55288- 0 0 0 0 0 0 0 0 0 0 0 0
55289- 0 0 0 0 0 0 0 0 0 0 0 0
55290- 0 0 0 0 0 0 0 0 0 0 0 0
55291- 0 0 0 0 0 0 0 0 0 10 10 10
55292- 22 22 22 42 42 42 66 66 66 86 86 86
55293- 66 66 66 38 38 38 38 38 38 22 22 22
55294- 26 26 26 34 34 34 54 54 54 66 66 66
55295- 86 86 86 70 70 70 46 46 46 26 26 26
55296- 14 14 14 6 6 6 0 0 0 0 0 0
55297- 0 0 0 0 0 0 0 0 0 0 0 0
55298- 0 0 0 0 0 0 0 0 0 0 0 0
55299- 0 0 0 0 0 0 0 0 0 0 0 0
55300- 0 0 0 0 0 0 0 0 0 0 0 0
55301- 0 0 0 0 0 0 0 0 0 0 0 0
55302- 0 0 0 0 0 0 0 0 0 0 0 0
55303- 0 0 0 0 0 0 0 0 0 0 0 0
55304- 0 0 0 0 0 0 0 0 0 0 0 0
55305- 0 0 1 0 0 1 0 0 1 0 0 0
55306- 0 0 0 0 0 0 0 0 0 0 0 0
55307- 0 0 0 0 0 0 0 0 0 0 0 0
55308- 0 0 0 0 0 0 0 0 0 0 0 0
55309- 0 0 0 0 0 0 0 0 0 0 0 0
55310- 0 0 0 0 0 0 0 0 0 0 0 0
55311- 0 0 0 0 0 0 10 10 10 26 26 26
55312- 50 50 50 82 82 82 58 58 58 6 6 6
55313- 2 2 6 2 2 6 2 2 6 2 2 6
55314- 2 2 6 2 2 6 2 2 6 2 2 6
55315- 6 6 6 54 54 54 86 86 86 66 66 66
55316- 38 38 38 18 18 18 6 6 6 0 0 0
55317- 0 0 0 0 0 0 0 0 0 0 0 0
55318- 0 0 0 0 0 0 0 0 0 0 0 0
55319- 0 0 0 0 0 0 0 0 0 0 0 0
55320- 0 0 0 0 0 0 0 0 0 0 0 0
55321- 0 0 0 0 0 0 0 0 0 0 0 0
55322- 0 0 0 0 0 0 0 0 0 0 0 0
55323- 0 0 0 0 0 0 0 0 0 0 0 0
55324- 0 0 0 0 0 0 0 0 0 0 0 0
55325- 0 0 0 0 0 0 0 0 0 0 0 0
55326- 0 0 0 0 0 0 0 0 0 0 0 0
55327- 0 0 0 0 0 0 0 0 0 0 0 0
55328- 0 0 0 0 0 0 0 0 0 0 0 0
55329- 0 0 0 0 0 0 0 0 0 0 0 0
55330- 0 0 0 0 0 0 0 0 0 0 0 0
55331- 0 0 0 6 6 6 22 22 22 50 50 50
55332- 78 78 78 34 34 34 2 2 6 2 2 6
55333- 2 2 6 2 2 6 2 2 6 2 2 6
55334- 2 2 6 2 2 6 2 2 6 2 2 6
55335- 2 2 6 2 2 6 6 6 6 70 70 70
55336- 78 78 78 46 46 46 22 22 22 6 6 6
55337- 0 0 0 0 0 0 0 0 0 0 0 0
55338- 0 0 0 0 0 0 0 0 0 0 0 0
55339- 0 0 0 0 0 0 0 0 0 0 0 0
55340- 0 0 0 0 0 0 0 0 0 0 0 0
55341- 0 0 0 0 0 0 0 0 0 0 0 0
55342- 0 0 0 0 0 0 0 0 0 0 0 0
55343- 0 0 0 0 0 0 0 0 0 0 0 0
55344- 0 0 0 0 0 0 0 0 0 0 0 0
55345- 0 0 1 0 0 1 0 0 1 0 0 0
55346- 0 0 0 0 0 0 0 0 0 0 0 0
55347- 0 0 0 0 0 0 0 0 0 0 0 0
55348- 0 0 0 0 0 0 0 0 0 0 0 0
55349- 0 0 0 0 0 0 0 0 0 0 0 0
55350- 0 0 0 0 0 0 0 0 0 0 0 0
55351- 6 6 6 18 18 18 42 42 42 82 82 82
55352- 26 26 26 2 2 6 2 2 6 2 2 6
55353- 2 2 6 2 2 6 2 2 6 2 2 6
55354- 2 2 6 2 2 6 2 2 6 14 14 14
55355- 46 46 46 34 34 34 6 6 6 2 2 6
55356- 42 42 42 78 78 78 42 42 42 18 18 18
55357- 6 6 6 0 0 0 0 0 0 0 0 0
55358- 0 0 0 0 0 0 0 0 0 0 0 0
55359- 0 0 0 0 0 0 0 0 0 0 0 0
55360- 0 0 0 0 0 0 0 0 0 0 0 0
55361- 0 0 0 0 0 0 0 0 0 0 0 0
55362- 0 0 0 0 0 0 0 0 0 0 0 0
55363- 0 0 0 0 0 0 0 0 0 0 0 0
55364- 0 0 0 0 0 0 0 0 0 0 0 0
55365- 0 0 1 0 0 0 0 0 1 0 0 0
55366- 0 0 0 0 0 0 0 0 0 0 0 0
55367- 0 0 0 0 0 0 0 0 0 0 0 0
55368- 0 0 0 0 0 0 0 0 0 0 0 0
55369- 0 0 0 0 0 0 0 0 0 0 0 0
55370- 0 0 0 0 0 0 0 0 0 0 0 0
55371- 10 10 10 30 30 30 66 66 66 58 58 58
55372- 2 2 6 2 2 6 2 2 6 2 2 6
55373- 2 2 6 2 2 6 2 2 6 2 2 6
55374- 2 2 6 2 2 6 2 2 6 26 26 26
55375- 86 86 86 101 101 101 46 46 46 10 10 10
55376- 2 2 6 58 58 58 70 70 70 34 34 34
55377- 10 10 10 0 0 0 0 0 0 0 0 0
55378- 0 0 0 0 0 0 0 0 0 0 0 0
55379- 0 0 0 0 0 0 0 0 0 0 0 0
55380- 0 0 0 0 0 0 0 0 0 0 0 0
55381- 0 0 0 0 0 0 0 0 0 0 0 0
55382- 0 0 0 0 0 0 0 0 0 0 0 0
55383- 0 0 0 0 0 0 0 0 0 0 0 0
55384- 0 0 0 0 0 0 0 0 0 0 0 0
55385- 0 0 1 0 0 1 0 0 1 0 0 0
55386- 0 0 0 0 0 0 0 0 0 0 0 0
55387- 0 0 0 0 0 0 0 0 0 0 0 0
55388- 0 0 0 0 0 0 0 0 0 0 0 0
55389- 0 0 0 0 0 0 0 0 0 0 0 0
55390- 0 0 0 0 0 0 0 0 0 0 0 0
55391- 14 14 14 42 42 42 86 86 86 10 10 10
55392- 2 2 6 2 2 6 2 2 6 2 2 6
55393- 2 2 6 2 2 6 2 2 6 2 2 6
55394- 2 2 6 2 2 6 2 2 6 30 30 30
55395- 94 94 94 94 94 94 58 58 58 26 26 26
55396- 2 2 6 6 6 6 78 78 78 54 54 54
55397- 22 22 22 6 6 6 0 0 0 0 0 0
55398- 0 0 0 0 0 0 0 0 0 0 0 0
55399- 0 0 0 0 0 0 0 0 0 0 0 0
55400- 0 0 0 0 0 0 0 0 0 0 0 0
55401- 0 0 0 0 0 0 0 0 0 0 0 0
55402- 0 0 0 0 0 0 0 0 0 0 0 0
55403- 0 0 0 0 0 0 0 0 0 0 0 0
55404- 0 0 0 0 0 0 0 0 0 0 0 0
55405- 0 0 0 0 0 0 0 0 0 0 0 0
55406- 0 0 0 0 0 0 0 0 0 0 0 0
55407- 0 0 0 0 0 0 0 0 0 0 0 0
55408- 0 0 0 0 0 0 0 0 0 0 0 0
55409- 0 0 0 0 0 0 0 0 0 0 0 0
55410- 0 0 0 0 0 0 0 0 0 6 6 6
55411- 22 22 22 62 62 62 62 62 62 2 2 6
55412- 2 2 6 2 2 6 2 2 6 2 2 6
55413- 2 2 6 2 2 6 2 2 6 2 2 6
55414- 2 2 6 2 2 6 2 2 6 26 26 26
55415- 54 54 54 38 38 38 18 18 18 10 10 10
55416- 2 2 6 2 2 6 34 34 34 82 82 82
55417- 38 38 38 14 14 14 0 0 0 0 0 0
55418- 0 0 0 0 0 0 0 0 0 0 0 0
55419- 0 0 0 0 0 0 0 0 0 0 0 0
55420- 0 0 0 0 0 0 0 0 0 0 0 0
55421- 0 0 0 0 0 0 0 0 0 0 0 0
55422- 0 0 0 0 0 0 0 0 0 0 0 0
55423- 0 0 0 0 0 0 0 0 0 0 0 0
55424- 0 0 0 0 0 0 0 0 0 0 0 0
55425- 0 0 0 0 0 1 0 0 1 0 0 0
55426- 0 0 0 0 0 0 0 0 0 0 0 0
55427- 0 0 0 0 0 0 0 0 0 0 0 0
55428- 0 0 0 0 0 0 0 0 0 0 0 0
55429- 0 0 0 0 0 0 0 0 0 0 0 0
55430- 0 0 0 0 0 0 0 0 0 6 6 6
55431- 30 30 30 78 78 78 30 30 30 2 2 6
55432- 2 2 6 2 2 6 2 2 6 2 2 6
55433- 2 2 6 2 2 6 2 2 6 2 2 6
55434- 2 2 6 2 2 6 2 2 6 10 10 10
55435- 10 10 10 2 2 6 2 2 6 2 2 6
55436- 2 2 6 2 2 6 2 2 6 78 78 78
55437- 50 50 50 18 18 18 6 6 6 0 0 0
55438- 0 0 0 0 0 0 0 0 0 0 0 0
55439- 0 0 0 0 0 0 0 0 0 0 0 0
55440- 0 0 0 0 0 0 0 0 0 0 0 0
55441- 0 0 0 0 0 0 0 0 0 0 0 0
55442- 0 0 0 0 0 0 0 0 0 0 0 0
55443- 0 0 0 0 0 0 0 0 0 0 0 0
55444- 0 0 0 0 0 0 0 0 0 0 0 0
55445- 0 0 1 0 0 0 0 0 0 0 0 0
55446- 0 0 0 0 0 0 0 0 0 0 0 0
55447- 0 0 0 0 0 0 0 0 0 0 0 0
55448- 0 0 0 0 0 0 0 0 0 0 0 0
55449- 0 0 0 0 0 0 0 0 0 0 0 0
55450- 0 0 0 0 0 0 0 0 0 10 10 10
55451- 38 38 38 86 86 86 14 14 14 2 2 6
55452- 2 2 6 2 2 6 2 2 6 2 2 6
55453- 2 2 6 2 2 6 2 2 6 2 2 6
55454- 2 2 6 2 2 6 2 2 6 2 2 6
55455- 2 2 6 2 2 6 2 2 6 2 2 6
55456- 2 2 6 2 2 6 2 2 6 54 54 54
55457- 66 66 66 26 26 26 6 6 6 0 0 0
55458- 0 0 0 0 0 0 0 0 0 0 0 0
55459- 0 0 0 0 0 0 0 0 0 0 0 0
55460- 0 0 0 0 0 0 0 0 0 0 0 0
55461- 0 0 0 0 0 0 0 0 0 0 0 0
55462- 0 0 0 0 0 0 0 0 0 0 0 0
55463- 0 0 0 0 0 0 0 0 0 0 0 0
55464- 0 0 0 0 0 0 0 0 0 0 0 0
55465- 0 0 0 0 0 1 0 0 1 0 0 0
55466- 0 0 0 0 0 0 0 0 0 0 0 0
55467- 0 0 0 0 0 0 0 0 0 0 0 0
55468- 0 0 0 0 0 0 0 0 0 0 0 0
55469- 0 0 0 0 0 0 0 0 0 0 0 0
55470- 0 0 0 0 0 0 0 0 0 14 14 14
55471- 42 42 42 82 82 82 2 2 6 2 2 6
55472- 2 2 6 6 6 6 10 10 10 2 2 6
55473- 2 2 6 2 2 6 2 2 6 2 2 6
55474- 2 2 6 2 2 6 2 2 6 6 6 6
55475- 14 14 14 10 10 10 2 2 6 2 2 6
55476- 2 2 6 2 2 6 2 2 6 18 18 18
55477- 82 82 82 34 34 34 10 10 10 0 0 0
55478- 0 0 0 0 0 0 0 0 0 0 0 0
55479- 0 0 0 0 0 0 0 0 0 0 0 0
55480- 0 0 0 0 0 0 0 0 0 0 0 0
55481- 0 0 0 0 0 0 0 0 0 0 0 0
55482- 0 0 0 0 0 0 0 0 0 0 0 0
55483- 0 0 0 0 0 0 0 0 0 0 0 0
55484- 0 0 0 0 0 0 0 0 0 0 0 0
55485- 0 0 1 0 0 0 0 0 0 0 0 0
55486- 0 0 0 0 0 0 0 0 0 0 0 0
55487- 0 0 0 0 0 0 0 0 0 0 0 0
55488- 0 0 0 0 0 0 0 0 0 0 0 0
55489- 0 0 0 0 0 0 0 0 0 0 0 0
55490- 0 0 0 0 0 0 0 0 0 14 14 14
55491- 46 46 46 86 86 86 2 2 6 2 2 6
55492- 6 6 6 6 6 6 22 22 22 34 34 34
55493- 6 6 6 2 2 6 2 2 6 2 2 6
55494- 2 2 6 2 2 6 18 18 18 34 34 34
55495- 10 10 10 50 50 50 22 22 22 2 2 6
55496- 2 2 6 2 2 6 2 2 6 10 10 10
55497- 86 86 86 42 42 42 14 14 14 0 0 0
55498- 0 0 0 0 0 0 0 0 0 0 0 0
55499- 0 0 0 0 0 0 0 0 0 0 0 0
55500- 0 0 0 0 0 0 0 0 0 0 0 0
55501- 0 0 0 0 0 0 0 0 0 0 0 0
55502- 0 0 0 0 0 0 0 0 0 0 0 0
55503- 0 0 0 0 0 0 0 0 0 0 0 0
55504- 0 0 0 0 0 0 0 0 0 0 0 0
55505- 0 0 1 0 0 1 0 0 1 0 0 0
55506- 0 0 0 0 0 0 0 0 0 0 0 0
55507- 0 0 0 0 0 0 0 0 0 0 0 0
55508- 0 0 0 0 0 0 0 0 0 0 0 0
55509- 0 0 0 0 0 0 0 0 0 0 0 0
55510- 0 0 0 0 0 0 0 0 0 14 14 14
55511- 46 46 46 86 86 86 2 2 6 2 2 6
55512- 38 38 38 116 116 116 94 94 94 22 22 22
55513- 22 22 22 2 2 6 2 2 6 2 2 6
55514- 14 14 14 86 86 86 138 138 138 162 162 162
55515-154 154 154 38 38 38 26 26 26 6 6 6
55516- 2 2 6 2 2 6 2 2 6 2 2 6
55517- 86 86 86 46 46 46 14 14 14 0 0 0
55518- 0 0 0 0 0 0 0 0 0 0 0 0
55519- 0 0 0 0 0 0 0 0 0 0 0 0
55520- 0 0 0 0 0 0 0 0 0 0 0 0
55521- 0 0 0 0 0 0 0 0 0 0 0 0
55522- 0 0 0 0 0 0 0 0 0 0 0 0
55523- 0 0 0 0 0 0 0 0 0 0 0 0
55524- 0 0 0 0 0 0 0 0 0 0 0 0
55525- 0 0 0 0 0 0 0 0 0 0 0 0
55526- 0 0 0 0 0 0 0 0 0 0 0 0
55527- 0 0 0 0 0 0 0 0 0 0 0 0
55528- 0 0 0 0 0 0 0 0 0 0 0 0
55529- 0 0 0 0 0 0 0 0 0 0 0 0
55530- 0 0 0 0 0 0 0 0 0 14 14 14
55531- 46 46 46 86 86 86 2 2 6 14 14 14
55532-134 134 134 198 198 198 195 195 195 116 116 116
55533- 10 10 10 2 2 6 2 2 6 6 6 6
55534-101 98 89 187 187 187 210 210 210 218 218 218
55535-214 214 214 134 134 134 14 14 14 6 6 6
55536- 2 2 6 2 2 6 2 2 6 2 2 6
55537- 86 86 86 50 50 50 18 18 18 6 6 6
55538- 0 0 0 0 0 0 0 0 0 0 0 0
55539- 0 0 0 0 0 0 0 0 0 0 0 0
55540- 0 0 0 0 0 0 0 0 0 0 0 0
55541- 0 0 0 0 0 0 0 0 0 0 0 0
55542- 0 0 0 0 0 0 0 0 0 0 0 0
55543- 0 0 0 0 0 0 0 0 0 0 0 0
55544- 0 0 0 0 0 0 0 0 1 0 0 0
55545- 0 0 1 0 0 1 0 0 1 0 0 0
55546- 0 0 0 0 0 0 0 0 0 0 0 0
55547- 0 0 0 0 0 0 0 0 0 0 0 0
55548- 0 0 0 0 0 0 0 0 0 0 0 0
55549- 0 0 0 0 0 0 0 0 0 0 0 0
55550- 0 0 0 0 0 0 0 0 0 14 14 14
55551- 46 46 46 86 86 86 2 2 6 54 54 54
55552-218 218 218 195 195 195 226 226 226 246 246 246
55553- 58 58 58 2 2 6 2 2 6 30 30 30
55554-210 210 210 253 253 253 174 174 174 123 123 123
55555-221 221 221 234 234 234 74 74 74 2 2 6
55556- 2 2 6 2 2 6 2 2 6 2 2 6
55557- 70 70 70 58 58 58 22 22 22 6 6 6
55558- 0 0 0 0 0 0 0 0 0 0 0 0
55559- 0 0 0 0 0 0 0 0 0 0 0 0
55560- 0 0 0 0 0 0 0 0 0 0 0 0
55561- 0 0 0 0 0 0 0 0 0 0 0 0
55562- 0 0 0 0 0 0 0 0 0 0 0 0
55563- 0 0 0 0 0 0 0 0 0 0 0 0
55564- 0 0 0 0 0 0 0 0 0 0 0 0
55565- 0 0 0 0 0 0 0 0 0 0 0 0
55566- 0 0 0 0 0 0 0 0 0 0 0 0
55567- 0 0 0 0 0 0 0 0 0 0 0 0
55568- 0 0 0 0 0 0 0 0 0 0 0 0
55569- 0 0 0 0 0 0 0 0 0 0 0 0
55570- 0 0 0 0 0 0 0 0 0 14 14 14
55571- 46 46 46 82 82 82 2 2 6 106 106 106
55572-170 170 170 26 26 26 86 86 86 226 226 226
55573-123 123 123 10 10 10 14 14 14 46 46 46
55574-231 231 231 190 190 190 6 6 6 70 70 70
55575- 90 90 90 238 238 238 158 158 158 2 2 6
55576- 2 2 6 2 2 6 2 2 6 2 2 6
55577- 70 70 70 58 58 58 22 22 22 6 6 6
55578- 0 0 0 0 0 0 0 0 0 0 0 0
55579- 0 0 0 0 0 0 0 0 0 0 0 0
55580- 0 0 0 0 0 0 0 0 0 0 0 0
55581- 0 0 0 0 0 0 0 0 0 0 0 0
55582- 0 0 0 0 0 0 0 0 0 0 0 0
55583- 0 0 0 0 0 0 0 0 0 0 0 0
55584- 0 0 0 0 0 0 0 0 1 0 0 0
55585- 0 0 1 0 0 1 0 0 1 0 0 0
55586- 0 0 0 0 0 0 0 0 0 0 0 0
55587- 0 0 0 0 0 0 0 0 0 0 0 0
55588- 0 0 0 0 0 0 0 0 0 0 0 0
55589- 0 0 0 0 0 0 0 0 0 0 0 0
55590- 0 0 0 0 0 0 0 0 0 14 14 14
55591- 42 42 42 86 86 86 6 6 6 116 116 116
55592-106 106 106 6 6 6 70 70 70 149 149 149
55593-128 128 128 18 18 18 38 38 38 54 54 54
55594-221 221 221 106 106 106 2 2 6 14 14 14
55595- 46 46 46 190 190 190 198 198 198 2 2 6
55596- 2 2 6 2 2 6 2 2 6 2 2 6
55597- 74 74 74 62 62 62 22 22 22 6 6 6
55598- 0 0 0 0 0 0 0 0 0 0 0 0
55599- 0 0 0 0 0 0 0 0 0 0 0 0
55600- 0 0 0 0 0 0 0 0 0 0 0 0
55601- 0 0 0 0 0 0 0 0 0 0 0 0
55602- 0 0 0 0 0 0 0 0 0 0 0 0
55603- 0 0 0 0 0 0 0 0 0 0 0 0
55604- 0 0 0 0 0 0 0 0 1 0 0 0
55605- 0 0 1 0 0 0 0 0 1 0 0 0
55606- 0 0 0 0 0 0 0 0 0 0 0 0
55607- 0 0 0 0 0 0 0 0 0 0 0 0
55608- 0 0 0 0 0 0 0 0 0 0 0 0
55609- 0 0 0 0 0 0 0 0 0 0 0 0
55610- 0 0 0 0 0 0 0 0 0 14 14 14
55611- 42 42 42 94 94 94 14 14 14 101 101 101
55612-128 128 128 2 2 6 18 18 18 116 116 116
55613-118 98 46 121 92 8 121 92 8 98 78 10
55614-162 162 162 106 106 106 2 2 6 2 2 6
55615- 2 2 6 195 195 195 195 195 195 6 6 6
55616- 2 2 6 2 2 6 2 2 6 2 2 6
55617- 74 74 74 62 62 62 22 22 22 6 6 6
55618- 0 0 0 0 0 0 0 0 0 0 0 0
55619- 0 0 0 0 0 0 0 0 0 0 0 0
55620- 0 0 0 0 0 0 0 0 0 0 0 0
55621- 0 0 0 0 0 0 0 0 0 0 0 0
55622- 0 0 0 0 0 0 0 0 0 0 0 0
55623- 0 0 0 0 0 0 0 0 0 0 0 0
55624- 0 0 0 0 0 0 0 0 1 0 0 1
55625- 0 0 1 0 0 0 0 0 1 0 0 0
55626- 0 0 0 0 0 0 0 0 0 0 0 0
55627- 0 0 0 0 0 0 0 0 0 0 0 0
55628- 0 0 0 0 0 0 0 0 0 0 0 0
55629- 0 0 0 0 0 0 0 0 0 0 0 0
55630- 0 0 0 0 0 0 0 0 0 10 10 10
55631- 38 38 38 90 90 90 14 14 14 58 58 58
55632-210 210 210 26 26 26 54 38 6 154 114 10
55633-226 170 11 236 186 11 225 175 15 184 144 12
55634-215 174 15 175 146 61 37 26 9 2 2 6
55635- 70 70 70 246 246 246 138 138 138 2 2 6
55636- 2 2 6 2 2 6 2 2 6 2 2 6
55637- 70 70 70 66 66 66 26 26 26 6 6 6
55638- 0 0 0 0 0 0 0 0 0 0 0 0
55639- 0 0 0 0 0 0 0 0 0 0 0 0
55640- 0 0 0 0 0 0 0 0 0 0 0 0
55641- 0 0 0 0 0 0 0 0 0 0 0 0
55642- 0 0 0 0 0 0 0 0 0 0 0 0
55643- 0 0 0 0 0 0 0 0 0 0 0 0
55644- 0 0 0 0 0 0 0 0 0 0 0 0
55645- 0 0 0 0 0 0 0 0 0 0 0 0
55646- 0 0 0 0 0 0 0 0 0 0 0 0
55647- 0 0 0 0 0 0 0 0 0 0 0 0
55648- 0 0 0 0 0 0 0 0 0 0 0 0
55649- 0 0 0 0 0 0 0 0 0 0 0 0
55650- 0 0 0 0 0 0 0 0 0 10 10 10
55651- 38 38 38 86 86 86 14 14 14 10 10 10
55652-195 195 195 188 164 115 192 133 9 225 175 15
55653-239 182 13 234 190 10 232 195 16 232 200 30
55654-245 207 45 241 208 19 232 195 16 184 144 12
55655-218 194 134 211 206 186 42 42 42 2 2 6
55656- 2 2 6 2 2 6 2 2 6 2 2 6
55657- 50 50 50 74 74 74 30 30 30 6 6 6
55658- 0 0 0 0 0 0 0 0 0 0 0 0
55659- 0 0 0 0 0 0 0 0 0 0 0 0
55660- 0 0 0 0 0 0 0 0 0 0 0 0
55661- 0 0 0 0 0 0 0 0 0 0 0 0
55662- 0 0 0 0 0 0 0 0 0 0 0 0
55663- 0 0 0 0 0 0 0 0 0 0 0 0
55664- 0 0 0 0 0 0 0 0 0 0 0 0
55665- 0 0 0 0 0 0 0 0 0 0 0 0
55666- 0 0 0 0 0 0 0 0 0 0 0 0
55667- 0 0 0 0 0 0 0 0 0 0 0 0
55668- 0 0 0 0 0 0 0 0 0 0 0 0
55669- 0 0 0 0 0 0 0 0 0 0 0 0
55670- 0 0 0 0 0 0 0 0 0 10 10 10
55671- 34 34 34 86 86 86 14 14 14 2 2 6
55672-121 87 25 192 133 9 219 162 10 239 182 13
55673-236 186 11 232 195 16 241 208 19 244 214 54
55674-246 218 60 246 218 38 246 215 20 241 208 19
55675-241 208 19 226 184 13 121 87 25 2 2 6
55676- 2 2 6 2 2 6 2 2 6 2 2 6
55677- 50 50 50 82 82 82 34 34 34 10 10 10
55678- 0 0 0 0 0 0 0 0 0 0 0 0
55679- 0 0 0 0 0 0 0 0 0 0 0 0
55680- 0 0 0 0 0 0 0 0 0 0 0 0
55681- 0 0 0 0 0 0 0 0 0 0 0 0
55682- 0 0 0 0 0 0 0 0 0 0 0 0
55683- 0 0 0 0 0 0 0 0 0 0 0 0
55684- 0 0 0 0 0 0 0 0 0 0 0 0
55685- 0 0 0 0 0 0 0 0 0 0 0 0
55686- 0 0 0 0 0 0 0 0 0 0 0 0
55687- 0 0 0 0 0 0 0 0 0 0 0 0
55688- 0 0 0 0 0 0 0 0 0 0 0 0
55689- 0 0 0 0 0 0 0 0 0 0 0 0
55690- 0 0 0 0 0 0 0 0 0 10 10 10
55691- 34 34 34 82 82 82 30 30 30 61 42 6
55692-180 123 7 206 145 10 230 174 11 239 182 13
55693-234 190 10 238 202 15 241 208 19 246 218 74
55694-246 218 38 246 215 20 246 215 20 246 215 20
55695-226 184 13 215 174 15 184 144 12 6 6 6
55696- 2 2 6 2 2 6 2 2 6 2 2 6
55697- 26 26 26 94 94 94 42 42 42 14 14 14
55698- 0 0 0 0 0 0 0 0 0 0 0 0
55699- 0 0 0 0 0 0 0 0 0 0 0 0
55700- 0 0 0 0 0 0 0 0 0 0 0 0
55701- 0 0 0 0 0 0 0 0 0 0 0 0
55702- 0 0 0 0 0 0 0 0 0 0 0 0
55703- 0 0 0 0 0 0 0 0 0 0 0 0
55704- 0 0 0 0 0 0 0 0 0 0 0 0
55705- 0 0 0 0 0 0 0 0 0 0 0 0
55706- 0 0 0 0 0 0 0 0 0 0 0 0
55707- 0 0 0 0 0 0 0 0 0 0 0 0
55708- 0 0 0 0 0 0 0 0 0 0 0 0
55709- 0 0 0 0 0 0 0 0 0 0 0 0
55710- 0 0 0 0 0 0 0 0 0 10 10 10
55711- 30 30 30 78 78 78 50 50 50 104 69 6
55712-192 133 9 216 158 10 236 178 12 236 186 11
55713-232 195 16 241 208 19 244 214 54 245 215 43
55714-246 215 20 246 215 20 241 208 19 198 155 10
55715-200 144 11 216 158 10 156 118 10 2 2 6
55716- 2 2 6 2 2 6 2 2 6 2 2 6
55717- 6 6 6 90 90 90 54 54 54 18 18 18
55718- 6 6 6 0 0 0 0 0 0 0 0 0
55719- 0 0 0 0 0 0 0 0 0 0 0 0
55720- 0 0 0 0 0 0 0 0 0 0 0 0
55721- 0 0 0 0 0 0 0 0 0 0 0 0
55722- 0 0 0 0 0 0 0 0 0 0 0 0
55723- 0 0 0 0 0 0 0 0 0 0 0 0
55724- 0 0 0 0 0 0 0 0 0 0 0 0
55725- 0 0 0 0 0 0 0 0 0 0 0 0
55726- 0 0 0 0 0 0 0 0 0 0 0 0
55727- 0 0 0 0 0 0 0 0 0 0 0 0
55728- 0 0 0 0 0 0 0 0 0 0 0 0
55729- 0 0 0 0 0 0 0 0 0 0 0 0
55730- 0 0 0 0 0 0 0 0 0 10 10 10
55731- 30 30 30 78 78 78 46 46 46 22 22 22
55732-137 92 6 210 162 10 239 182 13 238 190 10
55733-238 202 15 241 208 19 246 215 20 246 215 20
55734-241 208 19 203 166 17 185 133 11 210 150 10
55735-216 158 10 210 150 10 102 78 10 2 2 6
55736- 6 6 6 54 54 54 14 14 14 2 2 6
55737- 2 2 6 62 62 62 74 74 74 30 30 30
55738- 10 10 10 0 0 0 0 0 0 0 0 0
55739- 0 0 0 0 0 0 0 0 0 0 0 0
55740- 0 0 0 0 0 0 0 0 0 0 0 0
55741- 0 0 0 0 0 0 0 0 0 0 0 0
55742- 0 0 0 0 0 0 0 0 0 0 0 0
55743- 0 0 0 0 0 0 0 0 0 0 0 0
55744- 0 0 0 0 0 0 0 0 0 0 0 0
55745- 0 0 0 0 0 0 0 0 0 0 0 0
55746- 0 0 0 0 0 0 0 0 0 0 0 0
55747- 0 0 0 0 0 0 0 0 0 0 0 0
55748- 0 0 0 0 0 0 0 0 0 0 0 0
55749- 0 0 0 0 0 0 0 0 0 0 0 0
55750- 0 0 0 0 0 0 0 0 0 10 10 10
55751- 34 34 34 78 78 78 50 50 50 6 6 6
55752- 94 70 30 139 102 15 190 146 13 226 184 13
55753-232 200 30 232 195 16 215 174 15 190 146 13
55754-168 122 10 192 133 9 210 150 10 213 154 11
55755-202 150 34 182 157 106 101 98 89 2 2 6
55756- 2 2 6 78 78 78 116 116 116 58 58 58
55757- 2 2 6 22 22 22 90 90 90 46 46 46
55758- 18 18 18 6 6 6 0 0 0 0 0 0
55759- 0 0 0 0 0 0 0 0 0 0 0 0
55760- 0 0 0 0 0 0 0 0 0 0 0 0
55761- 0 0 0 0 0 0 0 0 0 0 0 0
55762- 0 0 0 0 0 0 0 0 0 0 0 0
55763- 0 0 0 0 0 0 0 0 0 0 0 0
55764- 0 0 0 0 0 0 0 0 0 0 0 0
55765- 0 0 0 0 0 0 0 0 0 0 0 0
55766- 0 0 0 0 0 0 0 0 0 0 0 0
55767- 0 0 0 0 0 0 0 0 0 0 0 0
55768- 0 0 0 0 0 0 0 0 0 0 0 0
55769- 0 0 0 0 0 0 0 0 0 0 0 0
55770- 0 0 0 0 0 0 0 0 0 10 10 10
55771- 38 38 38 86 86 86 50 50 50 6 6 6
55772-128 128 128 174 154 114 156 107 11 168 122 10
55773-198 155 10 184 144 12 197 138 11 200 144 11
55774-206 145 10 206 145 10 197 138 11 188 164 115
55775-195 195 195 198 198 198 174 174 174 14 14 14
55776- 2 2 6 22 22 22 116 116 116 116 116 116
55777- 22 22 22 2 2 6 74 74 74 70 70 70
55778- 30 30 30 10 10 10 0 0 0 0 0 0
55779- 0 0 0 0 0 0 0 0 0 0 0 0
55780- 0 0 0 0 0 0 0 0 0 0 0 0
55781- 0 0 0 0 0 0 0 0 0 0 0 0
55782- 0 0 0 0 0 0 0 0 0 0 0 0
55783- 0 0 0 0 0 0 0 0 0 0 0 0
55784- 0 0 0 0 0 0 0 0 0 0 0 0
55785- 0 0 0 0 0 0 0 0 0 0 0 0
55786- 0 0 0 0 0 0 0 0 0 0 0 0
55787- 0 0 0 0 0 0 0 0 0 0 0 0
55788- 0 0 0 0 0 0 0 0 0 0 0 0
55789- 0 0 0 0 0 0 0 0 0 0 0 0
55790- 0 0 0 0 0 0 6 6 6 18 18 18
55791- 50 50 50 101 101 101 26 26 26 10 10 10
55792-138 138 138 190 190 190 174 154 114 156 107 11
55793-197 138 11 200 144 11 197 138 11 192 133 9
55794-180 123 7 190 142 34 190 178 144 187 187 187
55795-202 202 202 221 221 221 214 214 214 66 66 66
55796- 2 2 6 2 2 6 50 50 50 62 62 62
55797- 6 6 6 2 2 6 10 10 10 90 90 90
55798- 50 50 50 18 18 18 6 6 6 0 0 0
55799- 0 0 0 0 0 0 0 0 0 0 0 0
55800- 0 0 0 0 0 0 0 0 0 0 0 0
55801- 0 0 0 0 0 0 0 0 0 0 0 0
55802- 0 0 0 0 0 0 0 0 0 0 0 0
55803- 0 0 0 0 0 0 0 0 0 0 0 0
55804- 0 0 0 0 0 0 0 0 0 0 0 0
55805- 0 0 0 0 0 0 0 0 0 0 0 0
55806- 0 0 0 0 0 0 0 0 0 0 0 0
55807- 0 0 0 0 0 0 0 0 0 0 0 0
55808- 0 0 0 0 0 0 0 0 0 0 0 0
55809- 0 0 0 0 0 0 0 0 0 0 0 0
55810- 0 0 0 0 0 0 10 10 10 34 34 34
55811- 74 74 74 74 74 74 2 2 6 6 6 6
55812-144 144 144 198 198 198 190 190 190 178 166 146
55813-154 121 60 156 107 11 156 107 11 168 124 44
55814-174 154 114 187 187 187 190 190 190 210 210 210
55815-246 246 246 253 253 253 253 253 253 182 182 182
55816- 6 6 6 2 2 6 2 2 6 2 2 6
55817- 2 2 6 2 2 6 2 2 6 62 62 62
55818- 74 74 74 34 34 34 14 14 14 0 0 0
55819- 0 0 0 0 0 0 0 0 0 0 0 0
55820- 0 0 0 0 0 0 0 0 0 0 0 0
55821- 0 0 0 0 0 0 0 0 0 0 0 0
55822- 0 0 0 0 0 0 0 0 0 0 0 0
55823- 0 0 0 0 0 0 0 0 0 0 0 0
55824- 0 0 0 0 0 0 0 0 0 0 0 0
55825- 0 0 0 0 0 0 0 0 0 0 0 0
55826- 0 0 0 0 0 0 0 0 0 0 0 0
55827- 0 0 0 0 0 0 0 0 0 0 0 0
55828- 0 0 0 0 0 0 0 0 0 0 0 0
55829- 0 0 0 0 0 0 0 0 0 0 0 0
55830- 0 0 0 10 10 10 22 22 22 54 54 54
55831- 94 94 94 18 18 18 2 2 6 46 46 46
55832-234 234 234 221 221 221 190 190 190 190 190 190
55833-190 190 190 187 187 187 187 187 187 190 190 190
55834-190 190 190 195 195 195 214 214 214 242 242 242
55835-253 253 253 253 253 253 253 253 253 253 253 253
55836- 82 82 82 2 2 6 2 2 6 2 2 6
55837- 2 2 6 2 2 6 2 2 6 14 14 14
55838- 86 86 86 54 54 54 22 22 22 6 6 6
55839- 0 0 0 0 0 0 0 0 0 0 0 0
55840- 0 0 0 0 0 0 0 0 0 0 0 0
55841- 0 0 0 0 0 0 0 0 0 0 0 0
55842- 0 0 0 0 0 0 0 0 0 0 0 0
55843- 0 0 0 0 0 0 0 0 0 0 0 0
55844- 0 0 0 0 0 0 0 0 0 0 0 0
55845- 0 0 0 0 0 0 0 0 0 0 0 0
55846- 0 0 0 0 0 0 0 0 0 0 0 0
55847- 0 0 0 0 0 0 0 0 0 0 0 0
55848- 0 0 0 0 0 0 0 0 0 0 0 0
55849- 0 0 0 0 0 0 0 0 0 0 0 0
55850- 6 6 6 18 18 18 46 46 46 90 90 90
55851- 46 46 46 18 18 18 6 6 6 182 182 182
55852-253 253 253 246 246 246 206 206 206 190 190 190
55853-190 190 190 190 190 190 190 190 190 190 190 190
55854-206 206 206 231 231 231 250 250 250 253 253 253
55855-253 253 253 253 253 253 253 253 253 253 253 253
55856-202 202 202 14 14 14 2 2 6 2 2 6
55857- 2 2 6 2 2 6 2 2 6 2 2 6
55858- 42 42 42 86 86 86 42 42 42 18 18 18
55859- 6 6 6 0 0 0 0 0 0 0 0 0
55860- 0 0 0 0 0 0 0 0 0 0 0 0
55861- 0 0 0 0 0 0 0 0 0 0 0 0
55862- 0 0 0 0 0 0 0 0 0 0 0 0
55863- 0 0 0 0 0 0 0 0 0 0 0 0
55864- 0 0 0 0 0 0 0 0 0 0 0 0
55865- 0 0 0 0 0 0 0 0 0 0 0 0
55866- 0 0 0 0 0 0 0 0 0 0 0 0
55867- 0 0 0 0 0 0 0 0 0 0 0 0
55868- 0 0 0 0 0 0 0 0 0 0 0 0
55869- 0 0 0 0 0 0 0 0 0 6 6 6
55870- 14 14 14 38 38 38 74 74 74 66 66 66
55871- 2 2 6 6 6 6 90 90 90 250 250 250
55872-253 253 253 253 253 253 238 238 238 198 198 198
55873-190 190 190 190 190 190 195 195 195 221 221 221
55874-246 246 246 253 253 253 253 253 253 253 253 253
55875-253 253 253 253 253 253 253 253 253 253 253 253
55876-253 253 253 82 82 82 2 2 6 2 2 6
55877- 2 2 6 2 2 6 2 2 6 2 2 6
55878- 2 2 6 78 78 78 70 70 70 34 34 34
55879- 14 14 14 6 6 6 0 0 0 0 0 0
55880- 0 0 0 0 0 0 0 0 0 0 0 0
55881- 0 0 0 0 0 0 0 0 0 0 0 0
55882- 0 0 0 0 0 0 0 0 0 0 0 0
55883- 0 0 0 0 0 0 0 0 0 0 0 0
55884- 0 0 0 0 0 0 0 0 0 0 0 0
55885- 0 0 0 0 0 0 0 0 0 0 0 0
55886- 0 0 0 0 0 0 0 0 0 0 0 0
55887- 0 0 0 0 0 0 0 0 0 0 0 0
55888- 0 0 0 0 0 0 0 0 0 0 0 0
55889- 0 0 0 0 0 0 0 0 0 14 14 14
55890- 34 34 34 66 66 66 78 78 78 6 6 6
55891- 2 2 6 18 18 18 218 218 218 253 253 253
55892-253 253 253 253 253 253 253 253 253 246 246 246
55893-226 226 226 231 231 231 246 246 246 253 253 253
55894-253 253 253 253 253 253 253 253 253 253 253 253
55895-253 253 253 253 253 253 253 253 253 253 253 253
55896-253 253 253 178 178 178 2 2 6 2 2 6
55897- 2 2 6 2 2 6 2 2 6 2 2 6
55898- 2 2 6 18 18 18 90 90 90 62 62 62
55899- 30 30 30 10 10 10 0 0 0 0 0 0
55900- 0 0 0 0 0 0 0 0 0 0 0 0
55901- 0 0 0 0 0 0 0 0 0 0 0 0
55902- 0 0 0 0 0 0 0 0 0 0 0 0
55903- 0 0 0 0 0 0 0 0 0 0 0 0
55904- 0 0 0 0 0 0 0 0 0 0 0 0
55905- 0 0 0 0 0 0 0 0 0 0 0 0
55906- 0 0 0 0 0 0 0 0 0 0 0 0
55907- 0 0 0 0 0 0 0 0 0 0 0 0
55908- 0 0 0 0 0 0 0 0 0 0 0 0
55909- 0 0 0 0 0 0 10 10 10 26 26 26
55910- 58 58 58 90 90 90 18 18 18 2 2 6
55911- 2 2 6 110 110 110 253 253 253 253 253 253
55912-253 253 253 253 253 253 253 253 253 253 253 253
55913-250 250 250 253 253 253 253 253 253 253 253 253
55914-253 253 253 253 253 253 253 253 253 253 253 253
55915-253 253 253 253 253 253 253 253 253 253 253 253
55916-253 253 253 231 231 231 18 18 18 2 2 6
55917- 2 2 6 2 2 6 2 2 6 2 2 6
55918- 2 2 6 2 2 6 18 18 18 94 94 94
55919- 54 54 54 26 26 26 10 10 10 0 0 0
55920- 0 0 0 0 0 0 0 0 0 0 0 0
55921- 0 0 0 0 0 0 0 0 0 0 0 0
55922- 0 0 0 0 0 0 0 0 0 0 0 0
55923- 0 0 0 0 0 0 0 0 0 0 0 0
55924- 0 0 0 0 0 0 0 0 0 0 0 0
55925- 0 0 0 0 0 0 0 0 0 0 0 0
55926- 0 0 0 0 0 0 0 0 0 0 0 0
55927- 0 0 0 0 0 0 0 0 0 0 0 0
55928- 0 0 0 0 0 0 0 0 0 0 0 0
55929- 0 0 0 6 6 6 22 22 22 50 50 50
55930- 90 90 90 26 26 26 2 2 6 2 2 6
55931- 14 14 14 195 195 195 250 250 250 253 253 253
55932-253 253 253 253 253 253 253 253 253 253 253 253
55933-253 253 253 253 253 253 253 253 253 253 253 253
55934-253 253 253 253 253 253 253 253 253 253 253 253
55935-253 253 253 253 253 253 253 253 253 253 253 253
55936-250 250 250 242 242 242 54 54 54 2 2 6
55937- 2 2 6 2 2 6 2 2 6 2 2 6
55938- 2 2 6 2 2 6 2 2 6 38 38 38
55939- 86 86 86 50 50 50 22 22 22 6 6 6
55940- 0 0 0 0 0 0 0 0 0 0 0 0
55941- 0 0 0 0 0 0 0 0 0 0 0 0
55942- 0 0 0 0 0 0 0 0 0 0 0 0
55943- 0 0 0 0 0 0 0 0 0 0 0 0
55944- 0 0 0 0 0 0 0 0 0 0 0 0
55945- 0 0 0 0 0 0 0 0 0 0 0 0
55946- 0 0 0 0 0 0 0 0 0 0 0 0
55947- 0 0 0 0 0 0 0 0 0 0 0 0
55948- 0 0 0 0 0 0 0 0 0 0 0 0
55949- 6 6 6 14 14 14 38 38 38 82 82 82
55950- 34 34 34 2 2 6 2 2 6 2 2 6
55951- 42 42 42 195 195 195 246 246 246 253 253 253
55952-253 253 253 253 253 253 253 253 253 250 250 250
55953-242 242 242 242 242 242 250 250 250 253 253 253
55954-253 253 253 253 253 253 253 253 253 253 253 253
55955-253 253 253 250 250 250 246 246 246 238 238 238
55956-226 226 226 231 231 231 101 101 101 6 6 6
55957- 2 2 6 2 2 6 2 2 6 2 2 6
55958- 2 2 6 2 2 6 2 2 6 2 2 6
55959- 38 38 38 82 82 82 42 42 42 14 14 14
55960- 6 6 6 0 0 0 0 0 0 0 0 0
55961- 0 0 0 0 0 0 0 0 0 0 0 0
55962- 0 0 0 0 0 0 0 0 0 0 0 0
55963- 0 0 0 0 0 0 0 0 0 0 0 0
55964- 0 0 0 0 0 0 0 0 0 0 0 0
55965- 0 0 0 0 0 0 0 0 0 0 0 0
55966- 0 0 0 0 0 0 0 0 0 0 0 0
55967- 0 0 0 0 0 0 0 0 0 0 0 0
55968- 0 0 0 0 0 0 0 0 0 0 0 0
55969- 10 10 10 26 26 26 62 62 62 66 66 66
55970- 2 2 6 2 2 6 2 2 6 6 6 6
55971- 70 70 70 170 170 170 206 206 206 234 234 234
55972-246 246 246 250 250 250 250 250 250 238 238 238
55973-226 226 226 231 231 231 238 238 238 250 250 250
55974-250 250 250 250 250 250 246 246 246 231 231 231
55975-214 214 214 206 206 206 202 202 202 202 202 202
55976-198 198 198 202 202 202 182 182 182 18 18 18
55977- 2 2 6 2 2 6 2 2 6 2 2 6
55978- 2 2 6 2 2 6 2 2 6 2 2 6
55979- 2 2 6 62 62 62 66 66 66 30 30 30
55980- 10 10 10 0 0 0 0 0 0 0 0 0
55981- 0 0 0 0 0 0 0 0 0 0 0 0
55982- 0 0 0 0 0 0 0 0 0 0 0 0
55983- 0 0 0 0 0 0 0 0 0 0 0 0
55984- 0 0 0 0 0 0 0 0 0 0 0 0
55985- 0 0 0 0 0 0 0 0 0 0 0 0
55986- 0 0 0 0 0 0 0 0 0 0 0 0
55987- 0 0 0 0 0 0 0 0 0 0 0 0
55988- 0 0 0 0 0 0 0 0 0 0 0 0
55989- 14 14 14 42 42 42 82 82 82 18 18 18
55990- 2 2 6 2 2 6 2 2 6 10 10 10
55991- 94 94 94 182 182 182 218 218 218 242 242 242
55992-250 250 250 253 253 253 253 253 253 250 250 250
55993-234 234 234 253 253 253 253 253 253 253 253 253
55994-253 253 253 253 253 253 253 253 253 246 246 246
55995-238 238 238 226 226 226 210 210 210 202 202 202
55996-195 195 195 195 195 195 210 210 210 158 158 158
55997- 6 6 6 14 14 14 50 50 50 14 14 14
55998- 2 2 6 2 2 6 2 2 6 2 2 6
55999- 2 2 6 6 6 6 86 86 86 46 46 46
56000- 18 18 18 6 6 6 0 0 0 0 0 0
56001- 0 0 0 0 0 0 0 0 0 0 0 0
56002- 0 0 0 0 0 0 0 0 0 0 0 0
56003- 0 0 0 0 0 0 0 0 0 0 0 0
56004- 0 0 0 0 0 0 0 0 0 0 0 0
56005- 0 0 0 0 0 0 0 0 0 0 0 0
56006- 0 0 0 0 0 0 0 0 0 0 0 0
56007- 0 0 0 0 0 0 0 0 0 0 0 0
56008- 0 0 0 0 0 0 0 0 0 6 6 6
56009- 22 22 22 54 54 54 70 70 70 2 2 6
56010- 2 2 6 10 10 10 2 2 6 22 22 22
56011-166 166 166 231 231 231 250 250 250 253 253 253
56012-253 253 253 253 253 253 253 253 253 250 250 250
56013-242 242 242 253 253 253 253 253 253 253 253 253
56014-253 253 253 253 253 253 253 253 253 253 253 253
56015-253 253 253 253 253 253 253 253 253 246 246 246
56016-231 231 231 206 206 206 198 198 198 226 226 226
56017- 94 94 94 2 2 6 6 6 6 38 38 38
56018- 30 30 30 2 2 6 2 2 6 2 2 6
56019- 2 2 6 2 2 6 62 62 62 66 66 66
56020- 26 26 26 10 10 10 0 0 0 0 0 0
56021- 0 0 0 0 0 0 0 0 0 0 0 0
56022- 0 0 0 0 0 0 0 0 0 0 0 0
56023- 0 0 0 0 0 0 0 0 0 0 0 0
56024- 0 0 0 0 0 0 0 0 0 0 0 0
56025- 0 0 0 0 0 0 0 0 0 0 0 0
56026- 0 0 0 0 0 0 0 0 0 0 0 0
56027- 0 0 0 0 0 0 0 0 0 0 0 0
56028- 0 0 0 0 0 0 0 0 0 10 10 10
56029- 30 30 30 74 74 74 50 50 50 2 2 6
56030- 26 26 26 26 26 26 2 2 6 106 106 106
56031-238 238 238 253 253 253 253 253 253 253 253 253
56032-253 253 253 253 253 253 253 253 253 253 253 253
56033-253 253 253 253 253 253 253 253 253 253 253 253
56034-253 253 253 253 253 253 253 253 253 253 253 253
56035-253 253 253 253 253 253 253 253 253 253 253 253
56036-253 253 253 246 246 246 218 218 218 202 202 202
56037-210 210 210 14 14 14 2 2 6 2 2 6
56038- 30 30 30 22 22 22 2 2 6 2 2 6
56039- 2 2 6 2 2 6 18 18 18 86 86 86
56040- 42 42 42 14 14 14 0 0 0 0 0 0
56041- 0 0 0 0 0 0 0 0 0 0 0 0
56042- 0 0 0 0 0 0 0 0 0 0 0 0
56043- 0 0 0 0 0 0 0 0 0 0 0 0
56044- 0 0 0 0 0 0 0 0 0 0 0 0
56045- 0 0 0 0 0 0 0 0 0 0 0 0
56046- 0 0 0 0 0 0 0 0 0 0 0 0
56047- 0 0 0 0 0 0 0 0 0 0 0 0
56048- 0 0 0 0 0 0 0 0 0 14 14 14
56049- 42 42 42 90 90 90 22 22 22 2 2 6
56050- 42 42 42 2 2 6 18 18 18 218 218 218
56051-253 253 253 253 253 253 253 253 253 253 253 253
56052-253 253 253 253 253 253 253 253 253 253 253 253
56053-253 253 253 253 253 253 253 253 253 253 253 253
56054-253 253 253 253 253 253 253 253 253 253 253 253
56055-253 253 253 253 253 253 253 253 253 253 253 253
56056-253 253 253 253 253 253 250 250 250 221 221 221
56057-218 218 218 101 101 101 2 2 6 14 14 14
56058- 18 18 18 38 38 38 10 10 10 2 2 6
56059- 2 2 6 2 2 6 2 2 6 78 78 78
56060- 58 58 58 22 22 22 6 6 6 0 0 0
56061- 0 0 0 0 0 0 0 0 0 0 0 0
56062- 0 0 0 0 0 0 0 0 0 0 0 0
56063- 0 0 0 0 0 0 0 0 0 0 0 0
56064- 0 0 0 0 0 0 0 0 0 0 0 0
56065- 0 0 0 0 0 0 0 0 0 0 0 0
56066- 0 0 0 0 0 0 0 0 0 0 0 0
56067- 0 0 0 0 0 0 0 0 0 0 0 0
56068- 0 0 0 0 0 0 6 6 6 18 18 18
56069- 54 54 54 82 82 82 2 2 6 26 26 26
56070- 22 22 22 2 2 6 123 123 123 253 253 253
56071-253 253 253 253 253 253 253 253 253 253 253 253
56072-253 253 253 253 253 253 253 253 253 253 253 253
56073-253 253 253 253 253 253 253 253 253 253 253 253
56074-253 253 253 253 253 253 253 253 253 253 253 253
56075-253 253 253 253 253 253 253 253 253 253 253 253
56076-253 253 253 253 253 253 253 253 253 250 250 250
56077-238 238 238 198 198 198 6 6 6 38 38 38
56078- 58 58 58 26 26 26 38 38 38 2 2 6
56079- 2 2 6 2 2 6 2 2 6 46 46 46
56080- 78 78 78 30 30 30 10 10 10 0 0 0
56081- 0 0 0 0 0 0 0 0 0 0 0 0
56082- 0 0 0 0 0 0 0 0 0 0 0 0
56083- 0 0 0 0 0 0 0 0 0 0 0 0
56084- 0 0 0 0 0 0 0 0 0 0 0 0
56085- 0 0 0 0 0 0 0 0 0 0 0 0
56086- 0 0 0 0 0 0 0 0 0 0 0 0
56087- 0 0 0 0 0 0 0 0 0 0 0 0
56088- 0 0 0 0 0 0 10 10 10 30 30 30
56089- 74 74 74 58 58 58 2 2 6 42 42 42
56090- 2 2 6 22 22 22 231 231 231 253 253 253
56091-253 253 253 253 253 253 253 253 253 253 253 253
56092-253 253 253 253 253 253 253 253 253 250 250 250
56093-253 253 253 253 253 253 253 253 253 253 253 253
56094-253 253 253 253 253 253 253 253 253 253 253 253
56095-253 253 253 253 253 253 253 253 253 253 253 253
56096-253 253 253 253 253 253 253 253 253 253 253 253
56097-253 253 253 246 246 246 46 46 46 38 38 38
56098- 42 42 42 14 14 14 38 38 38 14 14 14
56099- 2 2 6 2 2 6 2 2 6 6 6 6
56100- 86 86 86 46 46 46 14 14 14 0 0 0
56101- 0 0 0 0 0 0 0 0 0 0 0 0
56102- 0 0 0 0 0 0 0 0 0 0 0 0
56103- 0 0 0 0 0 0 0 0 0 0 0 0
56104- 0 0 0 0 0 0 0 0 0 0 0 0
56105- 0 0 0 0 0 0 0 0 0 0 0 0
56106- 0 0 0 0 0 0 0 0 0 0 0 0
56107- 0 0 0 0 0 0 0 0 0 0 0 0
56108- 0 0 0 6 6 6 14 14 14 42 42 42
56109- 90 90 90 18 18 18 18 18 18 26 26 26
56110- 2 2 6 116 116 116 253 253 253 253 253 253
56111-253 253 253 253 253 253 253 253 253 253 253 253
56112-253 253 253 253 253 253 250 250 250 238 238 238
56113-253 253 253 253 253 253 253 253 253 253 253 253
56114-253 253 253 253 253 253 253 253 253 253 253 253
56115-253 253 253 253 253 253 253 253 253 253 253 253
56116-253 253 253 253 253 253 253 253 253 253 253 253
56117-253 253 253 253 253 253 94 94 94 6 6 6
56118- 2 2 6 2 2 6 10 10 10 34 34 34
56119- 2 2 6 2 2 6 2 2 6 2 2 6
56120- 74 74 74 58 58 58 22 22 22 6 6 6
56121- 0 0 0 0 0 0 0 0 0 0 0 0
56122- 0 0 0 0 0 0 0 0 0 0 0 0
56123- 0 0 0 0 0 0 0 0 0 0 0 0
56124- 0 0 0 0 0 0 0 0 0 0 0 0
56125- 0 0 0 0 0 0 0 0 0 0 0 0
56126- 0 0 0 0 0 0 0 0 0 0 0 0
56127- 0 0 0 0 0 0 0 0 0 0 0 0
56128- 0 0 0 10 10 10 26 26 26 66 66 66
56129- 82 82 82 2 2 6 38 38 38 6 6 6
56130- 14 14 14 210 210 210 253 253 253 253 253 253
56131-253 253 253 253 253 253 253 253 253 253 253 253
56132-253 253 253 253 253 253 246 246 246 242 242 242
56133-253 253 253 253 253 253 253 253 253 253 253 253
56134-253 253 253 253 253 253 253 253 253 253 253 253
56135-253 253 253 253 253 253 253 253 253 253 253 253
56136-253 253 253 253 253 253 253 253 253 253 253 253
56137-253 253 253 253 253 253 144 144 144 2 2 6
56138- 2 2 6 2 2 6 2 2 6 46 46 46
56139- 2 2 6 2 2 6 2 2 6 2 2 6
56140- 42 42 42 74 74 74 30 30 30 10 10 10
56141- 0 0 0 0 0 0 0 0 0 0 0 0
56142- 0 0 0 0 0 0 0 0 0 0 0 0
56143- 0 0 0 0 0 0 0 0 0 0 0 0
56144- 0 0 0 0 0 0 0 0 0 0 0 0
56145- 0 0 0 0 0 0 0 0 0 0 0 0
56146- 0 0 0 0 0 0 0 0 0 0 0 0
56147- 0 0 0 0 0 0 0 0 0 0 0 0
56148- 6 6 6 14 14 14 42 42 42 90 90 90
56149- 26 26 26 6 6 6 42 42 42 2 2 6
56150- 74 74 74 250 250 250 253 253 253 253 253 253
56151-253 253 253 253 253 253 253 253 253 253 253 253
56152-253 253 253 253 253 253 242 242 242 242 242 242
56153-253 253 253 253 253 253 253 253 253 253 253 253
56154-253 253 253 253 253 253 253 253 253 253 253 253
56155-253 253 253 253 253 253 253 253 253 253 253 253
56156-253 253 253 253 253 253 253 253 253 253 253 253
56157-253 253 253 253 253 253 182 182 182 2 2 6
56158- 2 2 6 2 2 6 2 2 6 46 46 46
56159- 2 2 6 2 2 6 2 2 6 2 2 6
56160- 10 10 10 86 86 86 38 38 38 10 10 10
56161- 0 0 0 0 0 0 0 0 0 0 0 0
56162- 0 0 0 0 0 0 0 0 0 0 0 0
56163- 0 0 0 0 0 0 0 0 0 0 0 0
56164- 0 0 0 0 0 0 0 0 0 0 0 0
56165- 0 0 0 0 0 0 0 0 0 0 0 0
56166- 0 0 0 0 0 0 0 0 0 0 0 0
56167- 0 0 0 0 0 0 0 0 0 0 0 0
56168- 10 10 10 26 26 26 66 66 66 82 82 82
56169- 2 2 6 22 22 22 18 18 18 2 2 6
56170-149 149 149 253 253 253 253 253 253 253 253 253
56171-253 253 253 253 253 253 253 253 253 253 253 253
56172-253 253 253 253 253 253 234 234 234 242 242 242
56173-253 253 253 253 253 253 253 253 253 253 253 253
56174-253 253 253 253 253 253 253 253 253 253 253 253
56175-253 253 253 253 253 253 253 253 253 253 253 253
56176-253 253 253 253 253 253 253 253 253 253 253 253
56177-253 253 253 253 253 253 206 206 206 2 2 6
56178- 2 2 6 2 2 6 2 2 6 38 38 38
56179- 2 2 6 2 2 6 2 2 6 2 2 6
56180- 6 6 6 86 86 86 46 46 46 14 14 14
56181- 0 0 0 0 0 0 0 0 0 0 0 0
56182- 0 0 0 0 0 0 0 0 0 0 0 0
56183- 0 0 0 0 0 0 0 0 0 0 0 0
56184- 0 0 0 0 0 0 0 0 0 0 0 0
56185- 0 0 0 0 0 0 0 0 0 0 0 0
56186- 0 0 0 0 0 0 0 0 0 0 0 0
56187- 0 0 0 0 0 0 0 0 0 6 6 6
56188- 18 18 18 46 46 46 86 86 86 18 18 18
56189- 2 2 6 34 34 34 10 10 10 6 6 6
56190-210 210 210 253 253 253 253 253 253 253 253 253
56191-253 253 253 253 253 253 253 253 253 253 253 253
56192-253 253 253 253 253 253 234 234 234 242 242 242
56193-253 253 253 253 253 253 253 253 253 253 253 253
56194-253 253 253 253 253 253 253 253 253 253 253 253
56195-253 253 253 253 253 253 253 253 253 253 253 253
56196-253 253 253 253 253 253 253 253 253 253 253 253
56197-253 253 253 253 253 253 221 221 221 6 6 6
56198- 2 2 6 2 2 6 6 6 6 30 30 30
56199- 2 2 6 2 2 6 2 2 6 2 2 6
56200- 2 2 6 82 82 82 54 54 54 18 18 18
56201- 6 6 6 0 0 0 0 0 0 0 0 0
56202- 0 0 0 0 0 0 0 0 0 0 0 0
56203- 0 0 0 0 0 0 0 0 0 0 0 0
56204- 0 0 0 0 0 0 0 0 0 0 0 0
56205- 0 0 0 0 0 0 0 0 0 0 0 0
56206- 0 0 0 0 0 0 0 0 0 0 0 0
56207- 0 0 0 0 0 0 0 0 0 10 10 10
56208- 26 26 26 66 66 66 62 62 62 2 2 6
56209- 2 2 6 38 38 38 10 10 10 26 26 26
56210-238 238 238 253 253 253 253 253 253 253 253 253
56211-253 253 253 253 253 253 253 253 253 253 253 253
56212-253 253 253 253 253 253 231 231 231 238 238 238
56213-253 253 253 253 253 253 253 253 253 253 253 253
56214-253 253 253 253 253 253 253 253 253 253 253 253
56215-253 253 253 253 253 253 253 253 253 253 253 253
56216-253 253 253 253 253 253 253 253 253 253 253 253
56217-253 253 253 253 253 253 231 231 231 6 6 6
56218- 2 2 6 2 2 6 10 10 10 30 30 30
56219- 2 2 6 2 2 6 2 2 6 2 2 6
56220- 2 2 6 66 66 66 58 58 58 22 22 22
56221- 6 6 6 0 0 0 0 0 0 0 0 0
56222- 0 0 0 0 0 0 0 0 0 0 0 0
56223- 0 0 0 0 0 0 0 0 0 0 0 0
56224- 0 0 0 0 0 0 0 0 0 0 0 0
56225- 0 0 0 0 0 0 0 0 0 0 0 0
56226- 0 0 0 0 0 0 0 0 0 0 0 0
56227- 0 0 0 0 0 0 0 0 0 10 10 10
56228- 38 38 38 78 78 78 6 6 6 2 2 6
56229- 2 2 6 46 46 46 14 14 14 42 42 42
56230-246 246 246 253 253 253 253 253 253 253 253 253
56231-253 253 253 253 253 253 253 253 253 253 253 253
56232-253 253 253 253 253 253 231 231 231 242 242 242
56233-253 253 253 253 253 253 253 253 253 253 253 253
56234-253 253 253 253 253 253 253 253 253 253 253 253
56235-253 253 253 253 253 253 253 253 253 253 253 253
56236-253 253 253 253 253 253 253 253 253 253 253 253
56237-253 253 253 253 253 253 234 234 234 10 10 10
56238- 2 2 6 2 2 6 22 22 22 14 14 14
56239- 2 2 6 2 2 6 2 2 6 2 2 6
56240- 2 2 6 66 66 66 62 62 62 22 22 22
56241- 6 6 6 0 0 0 0 0 0 0 0 0
56242- 0 0 0 0 0 0 0 0 0 0 0 0
56243- 0 0 0 0 0 0 0 0 0 0 0 0
56244- 0 0 0 0 0 0 0 0 0 0 0 0
56245- 0 0 0 0 0 0 0 0 0 0 0 0
56246- 0 0 0 0 0 0 0 0 0 0 0 0
56247- 0 0 0 0 0 0 6 6 6 18 18 18
56248- 50 50 50 74 74 74 2 2 6 2 2 6
56249- 14 14 14 70 70 70 34 34 34 62 62 62
56250-250 250 250 253 253 253 253 253 253 253 253 253
56251-253 253 253 253 253 253 253 253 253 253 253 253
56252-253 253 253 253 253 253 231 231 231 246 246 246
56253-253 253 253 253 253 253 253 253 253 253 253 253
56254-253 253 253 253 253 253 253 253 253 253 253 253
56255-253 253 253 253 253 253 253 253 253 253 253 253
56256-253 253 253 253 253 253 253 253 253 253 253 253
56257-253 253 253 253 253 253 234 234 234 14 14 14
56258- 2 2 6 2 2 6 30 30 30 2 2 6
56259- 2 2 6 2 2 6 2 2 6 2 2 6
56260- 2 2 6 66 66 66 62 62 62 22 22 22
56261- 6 6 6 0 0 0 0 0 0 0 0 0
56262- 0 0 0 0 0 0 0 0 0 0 0 0
56263- 0 0 0 0 0 0 0 0 0 0 0 0
56264- 0 0 0 0 0 0 0 0 0 0 0 0
56265- 0 0 0 0 0 0 0 0 0 0 0 0
56266- 0 0 0 0 0 0 0 0 0 0 0 0
56267- 0 0 0 0 0 0 6 6 6 18 18 18
56268- 54 54 54 62 62 62 2 2 6 2 2 6
56269- 2 2 6 30 30 30 46 46 46 70 70 70
56270-250 250 250 253 253 253 253 253 253 253 253 253
56271-253 253 253 253 253 253 253 253 253 253 253 253
56272-253 253 253 253 253 253 231 231 231 246 246 246
56273-253 253 253 253 253 253 253 253 253 253 253 253
56274-253 253 253 253 253 253 253 253 253 253 253 253
56275-253 253 253 253 253 253 253 253 253 253 253 253
56276-253 253 253 253 253 253 253 253 253 253 253 253
56277-253 253 253 253 253 253 226 226 226 10 10 10
56278- 2 2 6 6 6 6 30 30 30 2 2 6
56279- 2 2 6 2 2 6 2 2 6 2 2 6
56280- 2 2 6 66 66 66 58 58 58 22 22 22
56281- 6 6 6 0 0 0 0 0 0 0 0 0
56282- 0 0 0 0 0 0 0 0 0 0 0 0
56283- 0 0 0 0 0 0 0 0 0 0 0 0
56284- 0 0 0 0 0 0 0 0 0 0 0 0
56285- 0 0 0 0 0 0 0 0 0 0 0 0
56286- 0 0 0 0 0 0 0 0 0 0 0 0
56287- 0 0 0 0 0 0 6 6 6 22 22 22
56288- 58 58 58 62 62 62 2 2 6 2 2 6
56289- 2 2 6 2 2 6 30 30 30 78 78 78
56290-250 250 250 253 253 253 253 253 253 253 253 253
56291-253 253 253 253 253 253 253 253 253 253 253 253
56292-253 253 253 253 253 253 231 231 231 246 246 246
56293-253 253 253 253 253 253 253 253 253 253 253 253
56294-253 253 253 253 253 253 253 253 253 253 253 253
56295-253 253 253 253 253 253 253 253 253 253 253 253
56296-253 253 253 253 253 253 253 253 253 253 253 253
56297-253 253 253 253 253 253 206 206 206 2 2 6
56298- 22 22 22 34 34 34 18 14 6 22 22 22
56299- 26 26 26 18 18 18 6 6 6 2 2 6
56300- 2 2 6 82 82 82 54 54 54 18 18 18
56301- 6 6 6 0 0 0 0 0 0 0 0 0
56302- 0 0 0 0 0 0 0 0 0 0 0 0
56303- 0 0 0 0 0 0 0 0 0 0 0 0
56304- 0 0 0 0 0 0 0 0 0 0 0 0
56305- 0 0 0 0 0 0 0 0 0 0 0 0
56306- 0 0 0 0 0 0 0 0 0 0 0 0
56307- 0 0 0 0 0 0 6 6 6 26 26 26
56308- 62 62 62 106 106 106 74 54 14 185 133 11
56309-210 162 10 121 92 8 6 6 6 62 62 62
56310-238 238 238 253 253 253 253 253 253 253 253 253
56311-253 253 253 253 253 253 253 253 253 253 253 253
56312-253 253 253 253 253 253 231 231 231 246 246 246
56313-253 253 253 253 253 253 253 253 253 253 253 253
56314-253 253 253 253 253 253 253 253 253 253 253 253
56315-253 253 253 253 253 253 253 253 253 253 253 253
56316-253 253 253 253 253 253 253 253 253 253 253 253
56317-253 253 253 253 253 253 158 158 158 18 18 18
56318- 14 14 14 2 2 6 2 2 6 2 2 6
56319- 6 6 6 18 18 18 66 66 66 38 38 38
56320- 6 6 6 94 94 94 50 50 50 18 18 18
56321- 6 6 6 0 0 0 0 0 0 0 0 0
56322- 0 0 0 0 0 0 0 0 0 0 0 0
56323- 0 0 0 0 0 0 0 0 0 0 0 0
56324- 0 0 0 0 0 0 0 0 0 0 0 0
56325- 0 0 0 0 0 0 0 0 0 0 0 0
56326- 0 0 0 0 0 0 0 0 0 6 6 6
56327- 10 10 10 10 10 10 18 18 18 38 38 38
56328- 78 78 78 142 134 106 216 158 10 242 186 14
56329-246 190 14 246 190 14 156 118 10 10 10 10
56330- 90 90 90 238 238 238 253 253 253 253 253 253
56331-253 253 253 253 253 253 253 253 253 253 253 253
56332-253 253 253 253 253 253 231 231 231 250 250 250
56333-253 253 253 253 253 253 253 253 253 253 253 253
56334-253 253 253 253 253 253 253 253 253 253 253 253
56335-253 253 253 253 253 253 253 253 253 253 253 253
56336-253 253 253 253 253 253 253 253 253 246 230 190
56337-238 204 91 238 204 91 181 142 44 37 26 9
56338- 2 2 6 2 2 6 2 2 6 2 2 6
56339- 2 2 6 2 2 6 38 38 38 46 46 46
56340- 26 26 26 106 106 106 54 54 54 18 18 18
56341- 6 6 6 0 0 0 0 0 0 0 0 0
56342- 0 0 0 0 0 0 0 0 0 0 0 0
56343- 0 0 0 0 0 0 0 0 0 0 0 0
56344- 0 0 0 0 0 0 0 0 0 0 0 0
56345- 0 0 0 0 0 0 0 0 0 0 0 0
56346- 0 0 0 6 6 6 14 14 14 22 22 22
56347- 30 30 30 38 38 38 50 50 50 70 70 70
56348-106 106 106 190 142 34 226 170 11 242 186 14
56349-246 190 14 246 190 14 246 190 14 154 114 10
56350- 6 6 6 74 74 74 226 226 226 253 253 253
56351-253 253 253 253 253 253 253 253 253 253 253 253
56352-253 253 253 253 253 253 231 231 231 250 250 250
56353-253 253 253 253 253 253 253 253 253 253 253 253
56354-253 253 253 253 253 253 253 253 253 253 253 253
56355-253 253 253 253 253 253 253 253 253 253 253 253
56356-253 253 253 253 253 253 253 253 253 228 184 62
56357-241 196 14 241 208 19 232 195 16 38 30 10
56358- 2 2 6 2 2 6 2 2 6 2 2 6
56359- 2 2 6 6 6 6 30 30 30 26 26 26
56360-203 166 17 154 142 90 66 66 66 26 26 26
56361- 6 6 6 0 0 0 0 0 0 0 0 0
56362- 0 0 0 0 0 0 0 0 0 0 0 0
56363- 0 0 0 0 0 0 0 0 0 0 0 0
56364- 0 0 0 0 0 0 0 0 0 0 0 0
56365- 0 0 0 0 0 0 0 0 0 0 0 0
56366- 6 6 6 18 18 18 38 38 38 58 58 58
56367- 78 78 78 86 86 86 101 101 101 123 123 123
56368-175 146 61 210 150 10 234 174 13 246 186 14
56369-246 190 14 246 190 14 246 190 14 238 190 10
56370-102 78 10 2 2 6 46 46 46 198 198 198
56371-253 253 253 253 253 253 253 253 253 253 253 253
56372-253 253 253 253 253 253 234 234 234 242 242 242
56373-253 253 253 253 253 253 253 253 253 253 253 253
56374-253 253 253 253 253 253 253 253 253 253 253 253
56375-253 253 253 253 253 253 253 253 253 253 253 253
56376-253 253 253 253 253 253 253 253 253 224 178 62
56377-242 186 14 241 196 14 210 166 10 22 18 6
56378- 2 2 6 2 2 6 2 2 6 2 2 6
56379- 2 2 6 2 2 6 6 6 6 121 92 8
56380-238 202 15 232 195 16 82 82 82 34 34 34
56381- 10 10 10 0 0 0 0 0 0 0 0 0
56382- 0 0 0 0 0 0 0 0 0 0 0 0
56383- 0 0 0 0 0 0 0 0 0 0 0 0
56384- 0 0 0 0 0 0 0 0 0 0 0 0
56385- 0 0 0 0 0 0 0 0 0 0 0 0
56386- 14 14 14 38 38 38 70 70 70 154 122 46
56387-190 142 34 200 144 11 197 138 11 197 138 11
56388-213 154 11 226 170 11 242 186 14 246 190 14
56389-246 190 14 246 190 14 246 190 14 246 190 14
56390-225 175 15 46 32 6 2 2 6 22 22 22
56391-158 158 158 250 250 250 253 253 253 253 253 253
56392-253 253 253 253 253 253 253 253 253 253 253 253
56393-253 253 253 253 253 253 253 253 253 253 253 253
56394-253 253 253 253 253 253 253 253 253 253 253 253
56395-253 253 253 253 253 253 253 253 253 253 253 253
56396-253 253 253 250 250 250 242 242 242 224 178 62
56397-239 182 13 236 186 11 213 154 11 46 32 6
56398- 2 2 6 2 2 6 2 2 6 2 2 6
56399- 2 2 6 2 2 6 61 42 6 225 175 15
56400-238 190 10 236 186 11 112 100 78 42 42 42
56401- 14 14 14 0 0 0 0 0 0 0 0 0
56402- 0 0 0 0 0 0 0 0 0 0 0 0
56403- 0 0 0 0 0 0 0 0 0 0 0 0
56404- 0 0 0 0 0 0 0 0 0 0 0 0
56405- 0 0 0 0 0 0 0 0 0 6 6 6
56406- 22 22 22 54 54 54 154 122 46 213 154 11
56407-226 170 11 230 174 11 226 170 11 226 170 11
56408-236 178 12 242 186 14 246 190 14 246 190 14
56409-246 190 14 246 190 14 246 190 14 246 190 14
56410-241 196 14 184 144 12 10 10 10 2 2 6
56411- 6 6 6 116 116 116 242 242 242 253 253 253
56412-253 253 253 253 253 253 253 253 253 253 253 253
56413-253 253 253 253 253 253 253 253 253 253 253 253
56414-253 253 253 253 253 253 253 253 253 253 253 253
56415-253 253 253 253 253 253 253 253 253 253 253 253
56416-253 253 253 231 231 231 198 198 198 214 170 54
56417-236 178 12 236 178 12 210 150 10 137 92 6
56418- 18 14 6 2 2 6 2 2 6 2 2 6
56419- 6 6 6 70 47 6 200 144 11 236 178 12
56420-239 182 13 239 182 13 124 112 88 58 58 58
56421- 22 22 22 6 6 6 0 0 0 0 0 0
56422- 0 0 0 0 0 0 0 0 0 0 0 0
56423- 0 0 0 0 0 0 0 0 0 0 0 0
56424- 0 0 0 0 0 0 0 0 0 0 0 0
56425- 0 0 0 0 0 0 0 0 0 10 10 10
56426- 30 30 30 70 70 70 180 133 36 226 170 11
56427-239 182 13 242 186 14 242 186 14 246 186 14
56428-246 190 14 246 190 14 246 190 14 246 190 14
56429-246 190 14 246 190 14 246 190 14 246 190 14
56430-246 190 14 232 195 16 98 70 6 2 2 6
56431- 2 2 6 2 2 6 66 66 66 221 221 221
56432-253 253 253 253 253 253 253 253 253 253 253 253
56433-253 253 253 253 253 253 253 253 253 253 253 253
56434-253 253 253 253 253 253 253 253 253 253 253 253
56435-253 253 253 253 253 253 253 253 253 253 253 253
56436-253 253 253 206 206 206 198 198 198 214 166 58
56437-230 174 11 230 174 11 216 158 10 192 133 9
56438-163 110 8 116 81 8 102 78 10 116 81 8
56439-167 114 7 197 138 11 226 170 11 239 182 13
56440-242 186 14 242 186 14 162 146 94 78 78 78
56441- 34 34 34 14 14 14 6 6 6 0 0 0
56442- 0 0 0 0 0 0 0 0 0 0 0 0
56443- 0 0 0 0 0 0 0 0 0 0 0 0
56444- 0 0 0 0 0 0 0 0 0 0 0 0
56445- 0 0 0 0 0 0 0 0 0 6 6 6
56446- 30 30 30 78 78 78 190 142 34 226 170 11
56447-239 182 13 246 190 14 246 190 14 246 190 14
56448-246 190 14 246 190 14 246 190 14 246 190 14
56449-246 190 14 246 190 14 246 190 14 246 190 14
56450-246 190 14 241 196 14 203 166 17 22 18 6
56451- 2 2 6 2 2 6 2 2 6 38 38 38
56452-218 218 218 253 253 253 253 253 253 253 253 253
56453-253 253 253 253 253 253 253 253 253 253 253 253
56454-253 253 253 253 253 253 253 253 253 253 253 253
56455-253 253 253 253 253 253 253 253 253 253 253 253
56456-250 250 250 206 206 206 198 198 198 202 162 69
56457-226 170 11 236 178 12 224 166 10 210 150 10
56458-200 144 11 197 138 11 192 133 9 197 138 11
56459-210 150 10 226 170 11 242 186 14 246 190 14
56460-246 190 14 246 186 14 225 175 15 124 112 88
56461- 62 62 62 30 30 30 14 14 14 6 6 6
56462- 0 0 0 0 0 0 0 0 0 0 0 0
56463- 0 0 0 0 0 0 0 0 0 0 0 0
56464- 0 0 0 0 0 0 0 0 0 0 0 0
56465- 0 0 0 0 0 0 0 0 0 10 10 10
56466- 30 30 30 78 78 78 174 135 50 224 166 10
56467-239 182 13 246 190 14 246 190 14 246 190 14
56468-246 190 14 246 190 14 246 190 14 246 190 14
56469-246 190 14 246 190 14 246 190 14 246 190 14
56470-246 190 14 246 190 14 241 196 14 139 102 15
56471- 2 2 6 2 2 6 2 2 6 2 2 6
56472- 78 78 78 250 250 250 253 253 253 253 253 253
56473-253 253 253 253 253 253 253 253 253 253 253 253
56474-253 253 253 253 253 253 253 253 253 253 253 253
56475-253 253 253 253 253 253 253 253 253 253 253 253
56476-250 250 250 214 214 214 198 198 198 190 150 46
56477-219 162 10 236 178 12 234 174 13 224 166 10
56478-216 158 10 213 154 11 213 154 11 216 158 10
56479-226 170 11 239 182 13 246 190 14 246 190 14
56480-246 190 14 246 190 14 242 186 14 206 162 42
56481-101 101 101 58 58 58 30 30 30 14 14 14
56482- 6 6 6 0 0 0 0 0 0 0 0 0
56483- 0 0 0 0 0 0 0 0 0 0 0 0
56484- 0 0 0 0 0 0 0 0 0 0 0 0
56485- 0 0 0 0 0 0 0 0 0 10 10 10
56486- 30 30 30 74 74 74 174 135 50 216 158 10
56487-236 178 12 246 190 14 246 190 14 246 190 14
56488-246 190 14 246 190 14 246 190 14 246 190 14
56489-246 190 14 246 190 14 246 190 14 246 190 14
56490-246 190 14 246 190 14 241 196 14 226 184 13
56491- 61 42 6 2 2 6 2 2 6 2 2 6
56492- 22 22 22 238 238 238 253 253 253 253 253 253
56493-253 253 253 253 253 253 253 253 253 253 253 253
56494-253 253 253 253 253 253 253 253 253 253 253 253
56495-253 253 253 253 253 253 253 253 253 253 253 253
56496-253 253 253 226 226 226 187 187 187 180 133 36
56497-216 158 10 236 178 12 239 182 13 236 178 12
56498-230 174 11 226 170 11 226 170 11 230 174 11
56499-236 178 12 242 186 14 246 190 14 246 190 14
56500-246 190 14 246 190 14 246 186 14 239 182 13
56501-206 162 42 106 106 106 66 66 66 34 34 34
56502- 14 14 14 6 6 6 0 0 0 0 0 0
56503- 0 0 0 0 0 0 0 0 0 0 0 0
56504- 0 0 0 0 0 0 0 0 0 0 0 0
56505- 0 0 0 0 0 0 0 0 0 6 6 6
56506- 26 26 26 70 70 70 163 133 67 213 154 11
56507-236 178 12 246 190 14 246 190 14 246 190 14
56508-246 190 14 246 190 14 246 190 14 246 190 14
56509-246 190 14 246 190 14 246 190 14 246 190 14
56510-246 190 14 246 190 14 246 190 14 241 196 14
56511-190 146 13 18 14 6 2 2 6 2 2 6
56512- 46 46 46 246 246 246 253 253 253 253 253 253
56513-253 253 253 253 253 253 253 253 253 253 253 253
56514-253 253 253 253 253 253 253 253 253 253 253 253
56515-253 253 253 253 253 253 253 253 253 253 253 253
56516-253 253 253 221 221 221 86 86 86 156 107 11
56517-216 158 10 236 178 12 242 186 14 246 186 14
56518-242 186 14 239 182 13 239 182 13 242 186 14
56519-242 186 14 246 186 14 246 190 14 246 190 14
56520-246 190 14 246 190 14 246 190 14 246 190 14
56521-242 186 14 225 175 15 142 122 72 66 66 66
56522- 30 30 30 10 10 10 0 0 0 0 0 0
56523- 0 0 0 0 0 0 0 0 0 0 0 0
56524- 0 0 0 0 0 0 0 0 0 0 0 0
56525- 0 0 0 0 0 0 0 0 0 6 6 6
56526- 26 26 26 70 70 70 163 133 67 210 150 10
56527-236 178 12 246 190 14 246 190 14 246 190 14
56528-246 190 14 246 190 14 246 190 14 246 190 14
56529-246 190 14 246 190 14 246 190 14 246 190 14
56530-246 190 14 246 190 14 246 190 14 246 190 14
56531-232 195 16 121 92 8 34 34 34 106 106 106
56532-221 221 221 253 253 253 253 253 253 253 253 253
56533-253 253 253 253 253 253 253 253 253 253 253 253
56534-253 253 253 253 253 253 253 253 253 253 253 253
56535-253 253 253 253 253 253 253 253 253 253 253 253
56536-242 242 242 82 82 82 18 14 6 163 110 8
56537-216 158 10 236 178 12 242 186 14 246 190 14
56538-246 190 14 246 190 14 246 190 14 246 190 14
56539-246 190 14 246 190 14 246 190 14 246 190 14
56540-246 190 14 246 190 14 246 190 14 246 190 14
56541-246 190 14 246 190 14 242 186 14 163 133 67
56542- 46 46 46 18 18 18 6 6 6 0 0 0
56543- 0 0 0 0 0 0 0 0 0 0 0 0
56544- 0 0 0 0 0 0 0 0 0 0 0 0
56545- 0 0 0 0 0 0 0 0 0 10 10 10
56546- 30 30 30 78 78 78 163 133 67 210 150 10
56547-236 178 12 246 186 14 246 190 14 246 190 14
56548-246 190 14 246 190 14 246 190 14 246 190 14
56549-246 190 14 246 190 14 246 190 14 246 190 14
56550-246 190 14 246 190 14 246 190 14 246 190 14
56551-241 196 14 215 174 15 190 178 144 253 253 253
56552-253 253 253 253 253 253 253 253 253 253 253 253
56553-253 253 253 253 253 253 253 253 253 253 253 253
56554-253 253 253 253 253 253 253 253 253 253 253 253
56555-253 253 253 253 253 253 253 253 253 218 218 218
56556- 58 58 58 2 2 6 22 18 6 167 114 7
56557-216 158 10 236 178 12 246 186 14 246 190 14
56558-246 190 14 246 190 14 246 190 14 246 190 14
56559-246 190 14 246 190 14 246 190 14 246 190 14
56560-246 190 14 246 190 14 246 190 14 246 190 14
56561-246 190 14 246 186 14 242 186 14 190 150 46
56562- 54 54 54 22 22 22 6 6 6 0 0 0
56563- 0 0 0 0 0 0 0 0 0 0 0 0
56564- 0 0 0 0 0 0 0 0 0 0 0 0
56565- 0 0 0 0 0 0 0 0 0 14 14 14
56566- 38 38 38 86 86 86 180 133 36 213 154 11
56567-236 178 12 246 186 14 246 190 14 246 190 14
56568-246 190 14 246 190 14 246 190 14 246 190 14
56569-246 190 14 246 190 14 246 190 14 246 190 14
56570-246 190 14 246 190 14 246 190 14 246 190 14
56571-246 190 14 232 195 16 190 146 13 214 214 214
56572-253 253 253 253 253 253 253 253 253 253 253 253
56573-253 253 253 253 253 253 253 253 253 253 253 253
56574-253 253 253 253 253 253 253 253 253 253 253 253
56575-253 253 253 250 250 250 170 170 170 26 26 26
56576- 2 2 6 2 2 6 37 26 9 163 110 8
56577-219 162 10 239 182 13 246 186 14 246 190 14
56578-246 190 14 246 190 14 246 190 14 246 190 14
56579-246 190 14 246 190 14 246 190 14 246 190 14
56580-246 190 14 246 190 14 246 190 14 246 190 14
56581-246 186 14 236 178 12 224 166 10 142 122 72
56582- 46 46 46 18 18 18 6 6 6 0 0 0
56583- 0 0 0 0 0 0 0 0 0 0 0 0
56584- 0 0 0 0 0 0 0 0 0 0 0 0
56585- 0 0 0 0 0 0 6 6 6 18 18 18
56586- 50 50 50 109 106 95 192 133 9 224 166 10
56587-242 186 14 246 190 14 246 190 14 246 190 14
56588-246 190 14 246 190 14 246 190 14 246 190 14
56589-246 190 14 246 190 14 246 190 14 246 190 14
56590-246 190 14 246 190 14 246 190 14 246 190 14
56591-242 186 14 226 184 13 210 162 10 142 110 46
56592-226 226 226 253 253 253 253 253 253 253 253 253
56593-253 253 253 253 253 253 253 253 253 253 253 253
56594-253 253 253 253 253 253 253 253 253 253 253 253
56595-198 198 198 66 66 66 2 2 6 2 2 6
56596- 2 2 6 2 2 6 50 34 6 156 107 11
56597-219 162 10 239 182 13 246 186 14 246 190 14
56598-246 190 14 246 190 14 246 190 14 246 190 14
56599-246 190 14 246 190 14 246 190 14 246 190 14
56600-246 190 14 246 190 14 246 190 14 242 186 14
56601-234 174 13 213 154 11 154 122 46 66 66 66
56602- 30 30 30 10 10 10 0 0 0 0 0 0
56603- 0 0 0 0 0 0 0 0 0 0 0 0
56604- 0 0 0 0 0 0 0 0 0 0 0 0
56605- 0 0 0 0 0 0 6 6 6 22 22 22
56606- 58 58 58 154 121 60 206 145 10 234 174 13
56607-242 186 14 246 186 14 246 190 14 246 190 14
56608-246 190 14 246 190 14 246 190 14 246 190 14
56609-246 190 14 246 190 14 246 190 14 246 190 14
56610-246 190 14 246 190 14 246 190 14 246 190 14
56611-246 186 14 236 178 12 210 162 10 163 110 8
56612- 61 42 6 138 138 138 218 218 218 250 250 250
56613-253 253 253 253 253 253 253 253 253 250 250 250
56614-242 242 242 210 210 210 144 144 144 66 66 66
56615- 6 6 6 2 2 6 2 2 6 2 2 6
56616- 2 2 6 2 2 6 61 42 6 163 110 8
56617-216 158 10 236 178 12 246 190 14 246 190 14
56618-246 190 14 246 190 14 246 190 14 246 190 14
56619-246 190 14 246 190 14 246 190 14 246 190 14
56620-246 190 14 239 182 13 230 174 11 216 158 10
56621-190 142 34 124 112 88 70 70 70 38 38 38
56622- 18 18 18 6 6 6 0 0 0 0 0 0
56623- 0 0 0 0 0 0 0 0 0 0 0 0
56624- 0 0 0 0 0 0 0 0 0 0 0 0
56625- 0 0 0 0 0 0 6 6 6 22 22 22
56626- 62 62 62 168 124 44 206 145 10 224 166 10
56627-236 178 12 239 182 13 242 186 14 242 186 14
56628-246 186 14 246 190 14 246 190 14 246 190 14
56629-246 190 14 246 190 14 246 190 14 246 190 14
56630-246 190 14 246 190 14 246 190 14 246 190 14
56631-246 190 14 236 178 12 216 158 10 175 118 6
56632- 80 54 7 2 2 6 6 6 6 30 30 30
56633- 54 54 54 62 62 62 50 50 50 38 38 38
56634- 14 14 14 2 2 6 2 2 6 2 2 6
56635- 2 2 6 2 2 6 2 2 6 2 2 6
56636- 2 2 6 6 6 6 80 54 7 167 114 7
56637-213 154 11 236 178 12 246 190 14 246 190 14
56638-246 190 14 246 190 14 246 190 14 246 190 14
56639-246 190 14 242 186 14 239 182 13 239 182 13
56640-230 174 11 210 150 10 174 135 50 124 112 88
56641- 82 82 82 54 54 54 34 34 34 18 18 18
56642- 6 6 6 0 0 0 0 0 0 0 0 0
56643- 0 0 0 0 0 0 0 0 0 0 0 0
56644- 0 0 0 0 0 0 0 0 0 0 0 0
56645- 0 0 0 0 0 0 6 6 6 18 18 18
56646- 50 50 50 158 118 36 192 133 9 200 144 11
56647-216 158 10 219 162 10 224 166 10 226 170 11
56648-230 174 11 236 178 12 239 182 13 239 182 13
56649-242 186 14 246 186 14 246 190 14 246 190 14
56650-246 190 14 246 190 14 246 190 14 246 190 14
56651-246 186 14 230 174 11 210 150 10 163 110 8
56652-104 69 6 10 10 10 2 2 6 2 2 6
56653- 2 2 6 2 2 6 2 2 6 2 2 6
56654- 2 2 6 2 2 6 2 2 6 2 2 6
56655- 2 2 6 2 2 6 2 2 6 2 2 6
56656- 2 2 6 6 6 6 91 60 6 167 114 7
56657-206 145 10 230 174 11 242 186 14 246 190 14
56658-246 190 14 246 190 14 246 186 14 242 186 14
56659-239 182 13 230 174 11 224 166 10 213 154 11
56660-180 133 36 124 112 88 86 86 86 58 58 58
56661- 38 38 38 22 22 22 10 10 10 6 6 6
56662- 0 0 0 0 0 0 0 0 0 0 0 0
56663- 0 0 0 0 0 0 0 0 0 0 0 0
56664- 0 0 0 0 0 0 0 0 0 0 0 0
56665- 0 0 0 0 0 0 0 0 0 14 14 14
56666- 34 34 34 70 70 70 138 110 50 158 118 36
56667-167 114 7 180 123 7 192 133 9 197 138 11
56668-200 144 11 206 145 10 213 154 11 219 162 10
56669-224 166 10 230 174 11 239 182 13 242 186 14
56670-246 186 14 246 186 14 246 186 14 246 186 14
56671-239 182 13 216 158 10 185 133 11 152 99 6
56672-104 69 6 18 14 6 2 2 6 2 2 6
56673- 2 2 6 2 2 6 2 2 6 2 2 6
56674- 2 2 6 2 2 6 2 2 6 2 2 6
56675- 2 2 6 2 2 6 2 2 6 2 2 6
56676- 2 2 6 6 6 6 80 54 7 152 99 6
56677-192 133 9 219 162 10 236 178 12 239 182 13
56678-246 186 14 242 186 14 239 182 13 236 178 12
56679-224 166 10 206 145 10 192 133 9 154 121 60
56680- 94 94 94 62 62 62 42 42 42 22 22 22
56681- 14 14 14 6 6 6 0 0 0 0 0 0
56682- 0 0 0 0 0 0 0 0 0 0 0 0
56683- 0 0 0 0 0 0 0 0 0 0 0 0
56684- 0 0 0 0 0 0 0 0 0 0 0 0
56685- 0 0 0 0 0 0 0 0 0 6 6 6
56686- 18 18 18 34 34 34 58 58 58 78 78 78
56687-101 98 89 124 112 88 142 110 46 156 107 11
56688-163 110 8 167 114 7 175 118 6 180 123 7
56689-185 133 11 197 138 11 210 150 10 219 162 10
56690-226 170 11 236 178 12 236 178 12 234 174 13
56691-219 162 10 197 138 11 163 110 8 130 83 6
56692- 91 60 6 10 10 10 2 2 6 2 2 6
56693- 18 18 18 38 38 38 38 38 38 38 38 38
56694- 38 38 38 38 38 38 38 38 38 38 38 38
56695- 38 38 38 38 38 38 26 26 26 2 2 6
56696- 2 2 6 6 6 6 70 47 6 137 92 6
56697-175 118 6 200 144 11 219 162 10 230 174 11
56698-234 174 13 230 174 11 219 162 10 210 150 10
56699-192 133 9 163 110 8 124 112 88 82 82 82
56700- 50 50 50 30 30 30 14 14 14 6 6 6
56701- 0 0 0 0 0 0 0 0 0 0 0 0
56702- 0 0 0 0 0 0 0 0 0 0 0 0
56703- 0 0 0 0 0 0 0 0 0 0 0 0
56704- 0 0 0 0 0 0 0 0 0 0 0 0
56705- 0 0 0 0 0 0 0 0 0 0 0 0
56706- 6 6 6 14 14 14 22 22 22 34 34 34
56707- 42 42 42 58 58 58 74 74 74 86 86 86
56708-101 98 89 122 102 70 130 98 46 121 87 25
56709-137 92 6 152 99 6 163 110 8 180 123 7
56710-185 133 11 197 138 11 206 145 10 200 144 11
56711-180 123 7 156 107 11 130 83 6 104 69 6
56712- 50 34 6 54 54 54 110 110 110 101 98 89
56713- 86 86 86 82 82 82 78 78 78 78 78 78
56714- 78 78 78 78 78 78 78 78 78 78 78 78
56715- 78 78 78 82 82 82 86 86 86 94 94 94
56716-106 106 106 101 101 101 86 66 34 124 80 6
56717-156 107 11 180 123 7 192 133 9 200 144 11
56718-206 145 10 200 144 11 192 133 9 175 118 6
56719-139 102 15 109 106 95 70 70 70 42 42 42
56720- 22 22 22 10 10 10 0 0 0 0 0 0
56721- 0 0 0 0 0 0 0 0 0 0 0 0
56722- 0 0 0 0 0 0 0 0 0 0 0 0
56723- 0 0 0 0 0 0 0 0 0 0 0 0
56724- 0 0 0 0 0 0 0 0 0 0 0 0
56725- 0 0 0 0 0 0 0 0 0 0 0 0
56726- 0 0 0 0 0 0 6 6 6 10 10 10
56727- 14 14 14 22 22 22 30 30 30 38 38 38
56728- 50 50 50 62 62 62 74 74 74 90 90 90
56729-101 98 89 112 100 78 121 87 25 124 80 6
56730-137 92 6 152 99 6 152 99 6 152 99 6
56731-138 86 6 124 80 6 98 70 6 86 66 30
56732-101 98 89 82 82 82 58 58 58 46 46 46
56733- 38 38 38 34 34 34 34 34 34 34 34 34
56734- 34 34 34 34 34 34 34 34 34 34 34 34
56735- 34 34 34 34 34 34 38 38 38 42 42 42
56736- 54 54 54 82 82 82 94 86 76 91 60 6
56737-134 86 6 156 107 11 167 114 7 175 118 6
56738-175 118 6 167 114 7 152 99 6 121 87 25
56739-101 98 89 62 62 62 34 34 34 18 18 18
56740- 6 6 6 0 0 0 0 0 0 0 0 0
56741- 0 0 0 0 0 0 0 0 0 0 0 0
56742- 0 0 0 0 0 0 0 0 0 0 0 0
56743- 0 0 0 0 0 0 0 0 0 0 0 0
56744- 0 0 0 0 0 0 0 0 0 0 0 0
56745- 0 0 0 0 0 0 0 0 0 0 0 0
56746- 0 0 0 0 0 0 0 0 0 0 0 0
56747- 0 0 0 6 6 6 6 6 6 10 10 10
56748- 18 18 18 22 22 22 30 30 30 42 42 42
56749- 50 50 50 66 66 66 86 86 86 101 98 89
56750-106 86 58 98 70 6 104 69 6 104 69 6
56751-104 69 6 91 60 6 82 62 34 90 90 90
56752- 62 62 62 38 38 38 22 22 22 14 14 14
56753- 10 10 10 10 10 10 10 10 10 10 10 10
56754- 10 10 10 10 10 10 6 6 6 10 10 10
56755- 10 10 10 10 10 10 10 10 10 14 14 14
56756- 22 22 22 42 42 42 70 70 70 89 81 66
56757- 80 54 7 104 69 6 124 80 6 137 92 6
56758-134 86 6 116 81 8 100 82 52 86 86 86
56759- 58 58 58 30 30 30 14 14 14 6 6 6
56760- 0 0 0 0 0 0 0 0 0 0 0 0
56761- 0 0 0 0 0 0 0 0 0 0 0 0
56762- 0 0 0 0 0 0 0 0 0 0 0 0
56763- 0 0 0 0 0 0 0 0 0 0 0 0
56764- 0 0 0 0 0 0 0 0 0 0 0 0
56765- 0 0 0 0 0 0 0 0 0 0 0 0
56766- 0 0 0 0 0 0 0 0 0 0 0 0
56767- 0 0 0 0 0 0 0 0 0 0 0 0
56768- 0 0 0 6 6 6 10 10 10 14 14 14
56769- 18 18 18 26 26 26 38 38 38 54 54 54
56770- 70 70 70 86 86 86 94 86 76 89 81 66
56771- 89 81 66 86 86 86 74 74 74 50 50 50
56772- 30 30 30 14 14 14 6 6 6 0 0 0
56773- 0 0 0 0 0 0 0 0 0 0 0 0
56774- 0 0 0 0 0 0 0 0 0 0 0 0
56775- 0 0 0 0 0 0 0 0 0 0 0 0
56776- 6 6 6 18 18 18 34 34 34 58 58 58
56777- 82 82 82 89 81 66 89 81 66 89 81 66
56778- 94 86 66 94 86 76 74 74 74 50 50 50
56779- 26 26 26 14 14 14 6 6 6 0 0 0
56780- 0 0 0 0 0 0 0 0 0 0 0 0
56781- 0 0 0 0 0 0 0 0 0 0 0 0
56782- 0 0 0 0 0 0 0 0 0 0 0 0
56783- 0 0 0 0 0 0 0 0 0 0 0 0
56784- 0 0 0 0 0 0 0 0 0 0 0 0
56785- 0 0 0 0 0 0 0 0 0 0 0 0
56786- 0 0 0 0 0 0 0 0 0 0 0 0
56787- 0 0 0 0 0 0 0 0 0 0 0 0
56788- 0 0 0 0 0 0 0 0 0 0 0 0
56789- 6 6 6 6 6 6 14 14 14 18 18 18
56790- 30 30 30 38 38 38 46 46 46 54 54 54
56791- 50 50 50 42 42 42 30 30 30 18 18 18
56792- 10 10 10 0 0 0 0 0 0 0 0 0
56793- 0 0 0 0 0 0 0 0 0 0 0 0
56794- 0 0 0 0 0 0 0 0 0 0 0 0
56795- 0 0 0 0 0 0 0 0 0 0 0 0
56796- 0 0 0 6 6 6 14 14 14 26 26 26
56797- 38 38 38 50 50 50 58 58 58 58 58 58
56798- 54 54 54 42 42 42 30 30 30 18 18 18
56799- 10 10 10 0 0 0 0 0 0 0 0 0
56800- 0 0 0 0 0 0 0 0 0 0 0 0
56801- 0 0 0 0 0 0 0 0 0 0 0 0
56802- 0 0 0 0 0 0 0 0 0 0 0 0
56803- 0 0 0 0 0 0 0 0 0 0 0 0
56804- 0 0 0 0 0 0 0 0 0 0 0 0
56805- 0 0 0 0 0 0 0 0 0 0 0 0
56806- 0 0 0 0 0 0 0 0 0 0 0 0
56807- 0 0 0 0 0 0 0 0 0 0 0 0
56808- 0 0 0 0 0 0 0 0 0 0 0 0
56809- 0 0 0 0 0 0 0 0 0 6 6 6
56810- 6 6 6 10 10 10 14 14 14 18 18 18
56811- 18 18 18 14 14 14 10 10 10 6 6 6
56812- 0 0 0 0 0 0 0 0 0 0 0 0
56813- 0 0 0 0 0 0 0 0 0 0 0 0
56814- 0 0 0 0 0 0 0 0 0 0 0 0
56815- 0 0 0 0 0 0 0 0 0 0 0 0
56816- 0 0 0 0 0 0 0 0 0 6 6 6
56817- 14 14 14 18 18 18 22 22 22 22 22 22
56818- 18 18 18 14 14 14 10 10 10 6 6 6
56819- 0 0 0 0 0 0 0 0 0 0 0 0
56820- 0 0 0 0 0 0 0 0 0 0 0 0
56821- 0 0 0 0 0 0 0 0 0 0 0 0
56822- 0 0 0 0 0 0 0 0 0 0 0 0
56823- 0 0 0 0 0 0 0 0 0 0 0 0
56824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56837+4 4 4 4 4 4
56838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56851+4 4 4 4 4 4
56852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56865+4 4 4 4 4 4
56866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56879+4 4 4 4 4 4
56880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56893+4 4 4 4 4 4
56894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56907+4 4 4 4 4 4
56908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56912+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
56913+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
56914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56917+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
56918+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
56919+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
56920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56921+4 4 4 4 4 4
56922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56926+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
56927+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
56928+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56931+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
56932+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
56933+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
56934+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56935+4 4 4 4 4 4
56936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56940+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
56941+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
56942+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
56943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56945+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
56946+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
56947+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
56948+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
56949+4 4 4 4 4 4
56950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56953+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
56954+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
56955+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
56956+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
56957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56958+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
56959+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
56960+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
56961+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
56962+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
56963+4 4 4 4 4 4
56964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56967+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
56968+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
56969+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
56970+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
56971+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
56972+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
56973+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
56974+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
56975+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
56976+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
56977+4 4 4 4 4 4
56978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
56981+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
56982+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
56983+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
56984+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
56985+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
56986+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
56987+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
56988+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
56989+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
56990+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
56991+4 4 4 4 4 4
56992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56994+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
56995+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
56996+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
56997+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
56998+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
56999+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
57000+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
57001+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
57002+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
57003+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
57004+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
57005+4 4 4 4 4 4
57006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57008+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
57009+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
57010+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
57011+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
57012+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
57013+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
57014+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
57015+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
57016+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
57017+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
57018+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
57019+4 4 4 4 4 4
57020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57022+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
57023+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
57024+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
57025+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
57026+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
57027+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
57028+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
57029+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
57030+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
57031+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
57032+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57033+4 4 4 4 4 4
57034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57036+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
57037+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
57038+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
57039+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
57040+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
57041+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
57042+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
57043+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
57044+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
57045+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
57046+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
57047+4 4 4 4 4 4
57048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57049+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
57050+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
57051+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
57052+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
57053+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
57054+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
57055+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
57056+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
57057+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
57058+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
57059+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
57060+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
57061+4 4 4 4 4 4
57062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57063+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
57064+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
57065+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
57066+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57067+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
57068+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
57069+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
57070+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
57071+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
57072+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
57073+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
57074+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
57075+0 0 0 4 4 4
57076+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57077+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
57078+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
57079+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
57080+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
57081+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
57082+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
57083+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
57084+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
57085+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
57086+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
57087+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
57088+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
57089+2 0 0 0 0 0
57090+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
57091+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
57092+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
57093+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
57094+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
57095+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
57096+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
57097+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
57098+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
57099+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
57100+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
57101+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
57102+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
57103+37 38 37 0 0 0
57104+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57105+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
57106+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
57107+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
57108+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
57109+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
57110+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
57111+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
57112+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
57113+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
57114+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
57115+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
57116+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
57117+85 115 134 4 0 0
57118+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
57119+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
57120+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
57121+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
57122+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
57123+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
57124+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
57125+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
57126+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
57127+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
57128+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
57129+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
57130+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
57131+60 73 81 4 0 0
57132+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
57133+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
57134+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
57135+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
57136+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
57137+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
57138+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
57139+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
57140+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
57141+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
57142+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
57143+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
57144+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
57145+16 19 21 4 0 0
57146+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
57147+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
57148+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
57149+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
57150+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
57151+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
57152+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
57153+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
57154+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
57155+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
57156+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
57157+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
57158+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
57159+4 0 0 4 3 3
57160+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
57161+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
57162+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
57163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
57164+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
57165+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
57166+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
57167+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
57168+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
57169+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
57170+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
57171+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
57172+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
57173+3 2 2 4 4 4
57174+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
57175+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
57176+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
57177+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57178+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
57179+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
57180+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
57181+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
57182+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
57183+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
57184+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
57185+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
57186+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
57187+4 4 4 4 4 4
57188+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
57189+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
57190+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
57191+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
57192+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
57193+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
57194+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
57195+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
57196+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
57197+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
57198+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
57199+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
57200+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
57201+4 4 4 4 4 4
57202+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
57203+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
57204+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
57205+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
57206+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
57207+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57208+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
57209+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
57210+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
57211+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
57212+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
57213+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
57214+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
57215+5 5 5 5 5 5
57216+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
57217+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
57218+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
57219+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
57220+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
57221+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57222+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
57223+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
57224+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
57225+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
57226+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
57227+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
57228+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57229+5 5 5 4 4 4
57230+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
57231+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
57232+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
57233+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
57234+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57235+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
57236+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
57237+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
57238+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
57239+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
57240+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
57241+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57243+4 4 4 4 4 4
57244+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
57245+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
57246+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
57247+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
57248+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
57249+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57250+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57251+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
57252+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
57253+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
57254+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
57255+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
57256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57257+4 4 4 4 4 4
57258+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
57259+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
57260+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
57261+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
57262+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57263+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
57264+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
57265+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
57266+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
57267+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
57268+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
57269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57271+4 4 4 4 4 4
57272+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
57273+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
57274+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
57275+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
57276+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57277+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57278+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57279+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
57280+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
57281+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
57282+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
57283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57285+4 4 4 4 4 4
57286+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
57287+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
57288+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
57289+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
57290+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57291+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
57292+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57293+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
57294+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
57295+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
57296+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57299+4 4 4 4 4 4
57300+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
57301+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
57302+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
57303+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
57304+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57305+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
57306+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
57307+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
57308+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
57309+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
57310+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
57311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57313+4 4 4 4 4 4
57314+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
57315+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
57316+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
57317+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
57318+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57319+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
57320+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
57321+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
57322+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
57323+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
57324+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
57325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57327+4 4 4 4 4 4
57328+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
57329+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
57330+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
57331+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57332+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
57333+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
57334+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
57335+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
57336+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
57337+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
57338+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57341+4 4 4 4 4 4
57342+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
57343+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
57344+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
57345+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57346+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57347+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
57348+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
57349+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
57350+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
57351+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
57352+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57355+4 4 4 4 4 4
57356+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
57357+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
57358+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57359+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57360+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57361+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
57362+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
57363+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
57364+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
57365+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
57366+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57369+4 4 4 4 4 4
57370+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
57371+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
57372+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57373+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57374+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57375+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
57376+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
57377+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
57378+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57379+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57380+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57383+4 4 4 4 4 4
57384+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57385+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
57386+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57387+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
57388+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
57389+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
57390+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
57391+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
57392+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57393+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57394+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57397+4 4 4 4 4 4
57398+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57399+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
57400+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57401+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
57402+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57403+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
57404+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
57405+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
57406+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57407+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57408+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57411+4 4 4 4 4 4
57412+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
57413+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
57414+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57415+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
57416+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
57417+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
57418+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
57419+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
57420+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57421+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57422+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57425+4 4 4 4 4 4
57426+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
57427+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
57428+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57429+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
57430+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
57431+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
57432+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
57433+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
57434+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57435+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57436+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57439+4 4 4 4 4 4
57440+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57441+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
57442+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57443+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
57444+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
57445+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
57446+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
57447+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
57448+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57449+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57450+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57453+4 4 4 4 4 4
57454+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
57455+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
57456+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57457+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
57458+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
57459+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
57460+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
57461+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
57462+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
57463+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57464+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57467+4 4 4 4 4 4
57468+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57469+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
57470+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
57471+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
57472+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
57473+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
57474+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
57475+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
57476+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57477+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57478+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57481+4 4 4 4 4 4
57482+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57483+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
57484+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57485+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
57486+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
57487+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
57488+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
57489+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
57490+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57491+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57492+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57495+4 4 4 4 4 4
57496+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57497+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
57498+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
57499+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
57500+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
57501+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
57502+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57503+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
57504+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57505+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57506+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57509+4 4 4 4 4 4
57510+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57511+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
57512+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
57513+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57514+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
57515+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
57516+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57517+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
57518+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57519+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57520+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57523+4 4 4 4 4 4
57524+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57525+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
57526+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
57527+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
57528+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
57529+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
57530+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
57531+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
57532+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
57533+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57534+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57537+4 4 4 4 4 4
57538+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57539+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
57540+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
57541+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
57542+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
57543+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
57544+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
57545+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
57546+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
57547+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57548+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57551+4 4 4 4 4 4
57552+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
57553+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
57554+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
57555+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
57556+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57557+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
57558+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
57559+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
57560+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
57561+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57562+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57565+4 4 4 4 4 4
57566+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57567+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
57568+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
57569+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
57570+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
57571+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
57572+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
57573+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
57574+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
57575+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57576+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57579+4 4 4 4 4 4
57580+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
57581+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
57582+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
57583+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
57584+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
57585+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
57586+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
57587+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
57588+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
57589+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
57590+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57593+4 4 4 4 4 4
57594+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
57595+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57596+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
57597+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
57598+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
57599+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
57600+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
57601+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
57602+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
57603+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
57604+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57607+4 4 4 4 4 4
57608+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
57609+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57610+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
57611+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
57612+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
57613+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
57614+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57615+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
57616+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
57617+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
57618+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57621+4 4 4 4 4 4
57622+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
57623+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
57624+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
57625+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
57626+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
57627+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
57628+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
57629+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
57630+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
57631+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
57632+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57635+4 4 4 4 4 4
57636+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
57637+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
57638+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57639+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
57640+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
57641+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
57642+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
57643+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
57644+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
57645+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
57646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57649+4 4 4 4 4 4
57650+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57651+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
57652+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
57653+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
57654+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
57655+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
57656+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
57657+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
57658+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
57659+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57663+4 4 4 4 4 4
57664+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
57665+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
57666+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
57667+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
57668+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
57669+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
57670+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
57671+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
57672+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
57673+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57677+4 4 4 4 4 4
57678+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
57679+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
57680+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
57681+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
57682+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
57683+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
57684+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
57685+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
57686+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57687+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57691+4 4 4 4 4 4
57692+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
57693+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57694+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
57695+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57696+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
57697+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
57698+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
57699+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
57700+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
57701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57705+4 4 4 4 4 4
57706+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
57707+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
57708+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
57709+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
57710+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
57711+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
57712+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
57713+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
57714+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
57715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57719+4 4 4 4 4 4
57720+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
57721+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
57722+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
57723+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
57724+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
57725+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
57726+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
57727+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
57728+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57733+4 4 4 4 4 4
57734+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
57735+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
57736+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57737+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
57738+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
57739+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
57740+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
57741+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
57742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57747+4 4 4 4 4 4
57748+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
57749+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
57750+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
57751+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
57752+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
57753+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
57754+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
57755+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57761+4 4 4 4 4 4
57762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57763+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
57764+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57765+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
57766+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
57767+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
57768+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
57769+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
57770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57775+4 4 4 4 4 4
57776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57777+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
57778+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
57779+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
57780+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
57781+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
57782+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
57783+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
57784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57789+4 4 4 4 4 4
57790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57791+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57792+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
57793+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
57794+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
57795+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
57796+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
57797+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57803+4 4 4 4 4 4
57804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57806+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57807+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
57808+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
57809+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
57810+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
57811+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57817+4 4 4 4 4 4
57818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57821+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57822+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
57823+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
57824+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
57825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57831+4 4 4 4 4 4
57832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57835+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57836+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57837+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
57838+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
57839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57845+4 4 4 4 4 4
57846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57849+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
57850+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57851+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
57852+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
57853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57859+4 4 4 4 4 4
57860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57863+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
57864+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
57865+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
57866+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
57867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57873+4 4 4 4 4 4
57874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57878+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
57879+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57880+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57887+4 4 4 4 4 4
57888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57892+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
57893+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
57894+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
57895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57901+4 4 4 4 4 4
57902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57906+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
57907+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
57908+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57915+4 4 4 4 4 4
57916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57920+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
57921+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
57922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57929+4 4 4 4 4 4
57930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57934+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
57935+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
57936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57943+4 4 4 4 4 4
57944diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
57945index fef20db..d28b1ab 100644
57946--- a/drivers/xen/xenfs/xenstored.c
57947+++ b/drivers/xen/xenfs/xenstored.c
57948@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
57949 static int xsd_kva_open(struct inode *inode, struct file *file)
57950 {
57951 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
57952+#ifdef CONFIG_GRKERNSEC_HIDESYM
57953+ NULL);
57954+#else
57955 xen_store_interface);
57956+#endif
57957+
57958 if (!file->private_data)
57959 return -ENOMEM;
57960 return 0;
57961diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
57962index eb14e05..5156de7 100644
57963--- a/fs/9p/vfs_addr.c
57964+++ b/fs/9p/vfs_addr.c
57965@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
57966
57967 retval = v9fs_file_write_internal(inode,
57968 v9inode->writeback_fid,
57969- (__force const char __user *)buffer,
57970+ (const char __force_user *)buffer,
57971 len, &offset, 0);
57972 if (retval > 0)
57973 retval = 0;
57974diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
57975index 9ee5343..5165e3c 100644
57976--- a/fs/9p/vfs_inode.c
57977+++ b/fs/9p/vfs_inode.c
57978@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
57979 void
57980 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
57981 {
57982- char *s = nd_get_link(nd);
57983+ const char *s = nd_get_link(nd);
57984
57985 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
57986 dentry, IS_ERR(s) ? "<error>" : s);
57987diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
57988index c055d56e..a46f4f5 100644
57989--- a/fs/Kconfig.binfmt
57990+++ b/fs/Kconfig.binfmt
57991@@ -106,7 +106,7 @@ config HAVE_AOUT
57992
57993 config BINFMT_AOUT
57994 tristate "Kernel support for a.out and ECOFF binaries"
57995- depends on HAVE_AOUT
57996+ depends on HAVE_AOUT && BROKEN
57997 ---help---
57998 A.out (Assembler.OUTput) is a set of formats for libraries and
57999 executables used in the earliest versions of UNIX. Linux used
58000diff --git a/fs/afs/inode.c b/fs/afs/inode.c
58001index 8a1d38e..300a14e 100644
58002--- a/fs/afs/inode.c
58003+++ b/fs/afs/inode.c
58004@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58005 struct afs_vnode *vnode;
58006 struct super_block *sb;
58007 struct inode *inode;
58008- static atomic_t afs_autocell_ino;
58009+ static atomic_unchecked_t afs_autocell_ino;
58010
58011 _enter("{%x:%u},%*.*s,",
58012 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
58013@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58014 data.fid.unique = 0;
58015 data.fid.vnode = 0;
58016
58017- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
58018+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
58019 afs_iget5_autocell_test, afs_iget5_set,
58020 &data);
58021 if (!inode) {
58022diff --git a/fs/aio.c b/fs/aio.c
58023index c428871..3f3041b 100644
58024--- a/fs/aio.c
58025+++ b/fs/aio.c
58026@@ -413,7 +413,7 @@ static int aio_setup_ring(struct kioctx *ctx)
58027 size += sizeof(struct io_event) * nr_events;
58028
58029 nr_pages = PFN_UP(size);
58030- if (nr_pages < 0)
58031+ if (nr_pages <= 0)
58032 return -EINVAL;
58033
58034 file = aio_private_file(ctx, nr_pages);
58035diff --git a/fs/attr.c b/fs/attr.c
58036index 6530ced..4a827e2 100644
58037--- a/fs/attr.c
58038+++ b/fs/attr.c
58039@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
58040 unsigned long limit;
58041
58042 limit = rlimit(RLIMIT_FSIZE);
58043+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
58044 if (limit != RLIM_INFINITY && offset > limit)
58045 goto out_sig;
58046 if (offset > inode->i_sb->s_maxbytes)
58047diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
58048index 116fd38..c04182da 100644
58049--- a/fs/autofs4/waitq.c
58050+++ b/fs/autofs4/waitq.c
58051@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
58052 {
58053 unsigned long sigpipe, flags;
58054 mm_segment_t fs;
58055- const char *data = (const char *)addr;
58056+ const char __user *data = (const char __force_user *)addr;
58057 ssize_t wr = 0;
58058
58059 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
58060@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
58061 return 1;
58062 }
58063
58064+#ifdef CONFIG_GRKERNSEC_HIDESYM
58065+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
58066+#endif
58067+
58068 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58069 enum autofs_notify notify)
58070 {
58071@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58072
58073 /* If this is a direct mount request create a dummy name */
58074 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
58075+#ifdef CONFIG_GRKERNSEC_HIDESYM
58076+ /* this name does get written to userland via autofs4_write() */
58077+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
58078+#else
58079 qstr.len = sprintf(name, "%p", dentry);
58080+#endif
58081 else {
58082 qstr.len = autofs4_getpath(sbi, dentry, &name);
58083 if (!qstr.len) {
58084diff --git a/fs/befs/endian.h b/fs/befs/endian.h
58085index 2722387..56059b5 100644
58086--- a/fs/befs/endian.h
58087+++ b/fs/befs/endian.h
58088@@ -11,7 +11,7 @@
58089
58090 #include <asm/byteorder.h>
58091
58092-static inline u64
58093+static inline u64 __intentional_overflow(-1)
58094 fs64_to_cpu(const struct super_block *sb, fs64 n)
58095 {
58096 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58097@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
58098 return (__force fs64)cpu_to_be64(n);
58099 }
58100
58101-static inline u32
58102+static inline u32 __intentional_overflow(-1)
58103 fs32_to_cpu(const struct super_block *sb, fs32 n)
58104 {
58105 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58106@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
58107 return (__force fs32)cpu_to_be32(n);
58108 }
58109
58110-static inline u16
58111+static inline u16 __intentional_overflow(-1)
58112 fs16_to_cpu(const struct super_block *sb, fs16 n)
58113 {
58114 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58115diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
58116index 4c55668..eeae150 100644
58117--- a/fs/binfmt_aout.c
58118+++ b/fs/binfmt_aout.c
58119@@ -16,6 +16,7 @@
58120 #include <linux/string.h>
58121 #include <linux/fs.h>
58122 #include <linux/file.h>
58123+#include <linux/security.h>
58124 #include <linux/stat.h>
58125 #include <linux/fcntl.h>
58126 #include <linux/ptrace.h>
58127@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
58128 #endif
58129 # define START_STACK(u) ((void __user *)u.start_stack)
58130
58131+ memset(&dump, 0, sizeof(dump));
58132+
58133 fs = get_fs();
58134 set_fs(KERNEL_DS);
58135 has_dumped = 1;
58136@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58137
58138 /* If the size of the dump file exceeds the rlimit, then see what would happen
58139 if we wrote the stack, but not the data area. */
58140+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
58141 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58142 dump.u_dsize = 0;
58143
58144 /* Make sure we have enough room to write the stack and data areas. */
58145+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
58146 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58147 dump.u_ssize = 0;
58148
58149@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
58150 rlim = rlimit(RLIMIT_DATA);
58151 if (rlim >= RLIM_INFINITY)
58152 rlim = ~0;
58153+
58154+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
58155 if (ex.a_data + ex.a_bss > rlim)
58156 return -ENOMEM;
58157
58158@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
58159
58160 install_exec_creds(bprm);
58161
58162+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58163+ current->mm->pax_flags = 0UL;
58164+#endif
58165+
58166+#ifdef CONFIG_PAX_PAGEEXEC
58167+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
58168+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
58169+
58170+#ifdef CONFIG_PAX_EMUTRAMP
58171+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
58172+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
58173+#endif
58174+
58175+#ifdef CONFIG_PAX_MPROTECT
58176+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
58177+ current->mm->pax_flags |= MF_PAX_MPROTECT;
58178+#endif
58179+
58180+ }
58181+#endif
58182+
58183 if (N_MAGIC(ex) == OMAGIC) {
58184 unsigned long text_addr, map_size;
58185 loff_t pos;
58186@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
58187 return error;
58188
58189 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
58190- PROT_READ | PROT_WRITE | PROT_EXEC,
58191+ PROT_READ | PROT_WRITE,
58192 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
58193 fd_offset + ex.a_text);
58194 if (error != N_DATADDR(ex))
58195diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
58196index 995986b..dcc4ef2 100644
58197--- a/fs/binfmt_elf.c
58198+++ b/fs/binfmt_elf.c
58199@@ -34,6 +34,7 @@
58200 #include <linux/utsname.h>
58201 #include <linux/coredump.h>
58202 #include <linux/sched.h>
58203+#include <linux/xattr.h>
58204 #include <asm/uaccess.h>
58205 #include <asm/param.h>
58206 #include <asm/page.h>
58207@@ -47,7 +48,7 @@
58208
58209 static int load_elf_binary(struct linux_binprm *bprm);
58210 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
58211- int, int, unsigned long);
58212+ int, int, unsigned long) __intentional_overflow(-1);
58213
58214 #ifdef CONFIG_USELIB
58215 static int load_elf_library(struct file *);
58216@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
58217 #define elf_core_dump NULL
58218 #endif
58219
58220+#ifdef CONFIG_PAX_MPROTECT
58221+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
58222+#endif
58223+
58224+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58225+static void elf_handle_mmap(struct file *file);
58226+#endif
58227+
58228 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
58229 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
58230 #else
58231@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
58232 .load_binary = load_elf_binary,
58233 .load_shlib = load_elf_library,
58234 .core_dump = elf_core_dump,
58235+
58236+#ifdef CONFIG_PAX_MPROTECT
58237+ .handle_mprotect= elf_handle_mprotect,
58238+#endif
58239+
58240+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58241+ .handle_mmap = elf_handle_mmap,
58242+#endif
58243+
58244 .min_coredump = ELF_EXEC_PAGESIZE,
58245 };
58246
58247@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
58248
58249 static int set_brk(unsigned long start, unsigned long end)
58250 {
58251+ unsigned long e = end;
58252+
58253 start = ELF_PAGEALIGN(start);
58254 end = ELF_PAGEALIGN(end);
58255 if (end > start) {
58256@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
58257 if (BAD_ADDR(addr))
58258 return addr;
58259 }
58260- current->mm->start_brk = current->mm->brk = end;
58261+ current->mm->start_brk = current->mm->brk = e;
58262 return 0;
58263 }
58264
58265@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58266 elf_addr_t __user *u_rand_bytes;
58267 const char *k_platform = ELF_PLATFORM;
58268 const char *k_base_platform = ELF_BASE_PLATFORM;
58269- unsigned char k_rand_bytes[16];
58270+ u32 k_rand_bytes[4];
58271 int items;
58272 elf_addr_t *elf_info;
58273 int ei_index = 0;
58274 const struct cred *cred = current_cred();
58275 struct vm_area_struct *vma;
58276+ unsigned long saved_auxv[AT_VECTOR_SIZE];
58277
58278 /*
58279 * In some cases (e.g. Hyper-Threading), we want to avoid L1
58280@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58281 * Generate 16 random bytes for userspace PRNG seeding.
58282 */
58283 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
58284- u_rand_bytes = (elf_addr_t __user *)
58285- STACK_ALLOC(p, sizeof(k_rand_bytes));
58286+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
58287+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
58288+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
58289+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
58290+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
58291+ u_rand_bytes = (elf_addr_t __user *) p;
58292 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
58293 return -EFAULT;
58294
58295@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58296 return -EFAULT;
58297 current->mm->env_end = p;
58298
58299+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
58300+
58301 /* Put the elf_info on the stack in the right place. */
58302 sp = (elf_addr_t __user *)envp + 1;
58303- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
58304+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
58305 return -EFAULT;
58306 return 0;
58307 }
58308@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
58309 an ELF header */
58310
58311 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58312- struct file *interpreter, unsigned long *interp_map_addr,
58313+ struct file *interpreter,
58314 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
58315 {
58316 struct elf_phdr *eppnt;
58317- unsigned long load_addr = 0;
58318+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
58319 int load_addr_set = 0;
58320 unsigned long last_bss = 0, elf_bss = 0;
58321- unsigned long error = ~0UL;
58322+ unsigned long error = -EINVAL;
58323 unsigned long total_size;
58324 int i;
58325
58326@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58327 goto out;
58328 }
58329
58330+#ifdef CONFIG_PAX_SEGMEXEC
58331+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
58332+ pax_task_size = SEGMEXEC_TASK_SIZE;
58333+#endif
58334+
58335 eppnt = interp_elf_phdata;
58336 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
58337 if (eppnt->p_type == PT_LOAD) {
58338@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58339 map_addr = elf_map(interpreter, load_addr + vaddr,
58340 eppnt, elf_prot, elf_type, total_size);
58341 total_size = 0;
58342- if (!*interp_map_addr)
58343- *interp_map_addr = map_addr;
58344 error = map_addr;
58345 if (BAD_ADDR(map_addr))
58346 goto out;
58347@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58348 k = load_addr + eppnt->p_vaddr;
58349 if (BAD_ADDR(k) ||
58350 eppnt->p_filesz > eppnt->p_memsz ||
58351- eppnt->p_memsz > TASK_SIZE ||
58352- TASK_SIZE - eppnt->p_memsz < k) {
58353+ eppnt->p_memsz > pax_task_size ||
58354+ pax_task_size - eppnt->p_memsz < k) {
58355 error = -ENOMEM;
58356 goto out;
58357 }
58358@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58359 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
58360
58361 /* Map the last of the bss segment */
58362- error = vm_brk(elf_bss, last_bss - elf_bss);
58363- if (BAD_ADDR(error))
58364- goto out;
58365+ if (last_bss > elf_bss) {
58366+ error = vm_brk(elf_bss, last_bss - elf_bss);
58367+ if (BAD_ADDR(error))
58368+ goto out;
58369+ }
58370 }
58371
58372 error = load_addr;
58373@@ -634,6 +666,336 @@ out:
58374 return error;
58375 }
58376
58377+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58378+#ifdef CONFIG_PAX_SOFTMODE
58379+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
58380+{
58381+ unsigned long pax_flags = 0UL;
58382+
58383+#ifdef CONFIG_PAX_PAGEEXEC
58384+ if (elf_phdata->p_flags & PF_PAGEEXEC)
58385+ pax_flags |= MF_PAX_PAGEEXEC;
58386+#endif
58387+
58388+#ifdef CONFIG_PAX_SEGMEXEC
58389+ if (elf_phdata->p_flags & PF_SEGMEXEC)
58390+ pax_flags |= MF_PAX_SEGMEXEC;
58391+#endif
58392+
58393+#ifdef CONFIG_PAX_EMUTRAMP
58394+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58395+ pax_flags |= MF_PAX_EMUTRAMP;
58396+#endif
58397+
58398+#ifdef CONFIG_PAX_MPROTECT
58399+ if (elf_phdata->p_flags & PF_MPROTECT)
58400+ pax_flags |= MF_PAX_MPROTECT;
58401+#endif
58402+
58403+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58404+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
58405+ pax_flags |= MF_PAX_RANDMMAP;
58406+#endif
58407+
58408+ return pax_flags;
58409+}
58410+#endif
58411+
58412+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
58413+{
58414+ unsigned long pax_flags = 0UL;
58415+
58416+#ifdef CONFIG_PAX_PAGEEXEC
58417+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
58418+ pax_flags |= MF_PAX_PAGEEXEC;
58419+#endif
58420+
58421+#ifdef CONFIG_PAX_SEGMEXEC
58422+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
58423+ pax_flags |= MF_PAX_SEGMEXEC;
58424+#endif
58425+
58426+#ifdef CONFIG_PAX_EMUTRAMP
58427+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
58428+ pax_flags |= MF_PAX_EMUTRAMP;
58429+#endif
58430+
58431+#ifdef CONFIG_PAX_MPROTECT
58432+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
58433+ pax_flags |= MF_PAX_MPROTECT;
58434+#endif
58435+
58436+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58437+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
58438+ pax_flags |= MF_PAX_RANDMMAP;
58439+#endif
58440+
58441+ return pax_flags;
58442+}
58443+#endif
58444+
58445+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58446+#ifdef CONFIG_PAX_SOFTMODE
58447+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
58448+{
58449+ unsigned long pax_flags = 0UL;
58450+
58451+#ifdef CONFIG_PAX_PAGEEXEC
58452+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
58453+ pax_flags |= MF_PAX_PAGEEXEC;
58454+#endif
58455+
58456+#ifdef CONFIG_PAX_SEGMEXEC
58457+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
58458+ pax_flags |= MF_PAX_SEGMEXEC;
58459+#endif
58460+
58461+#ifdef CONFIG_PAX_EMUTRAMP
58462+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58463+ pax_flags |= MF_PAX_EMUTRAMP;
58464+#endif
58465+
58466+#ifdef CONFIG_PAX_MPROTECT
58467+ if (pax_flags_softmode & MF_PAX_MPROTECT)
58468+ pax_flags |= MF_PAX_MPROTECT;
58469+#endif
58470+
58471+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58472+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
58473+ pax_flags |= MF_PAX_RANDMMAP;
58474+#endif
58475+
58476+ return pax_flags;
58477+}
58478+#endif
58479+
58480+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
58481+{
58482+ unsigned long pax_flags = 0UL;
58483+
58484+#ifdef CONFIG_PAX_PAGEEXEC
58485+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
58486+ pax_flags |= MF_PAX_PAGEEXEC;
58487+#endif
58488+
58489+#ifdef CONFIG_PAX_SEGMEXEC
58490+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
58491+ pax_flags |= MF_PAX_SEGMEXEC;
58492+#endif
58493+
58494+#ifdef CONFIG_PAX_EMUTRAMP
58495+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
58496+ pax_flags |= MF_PAX_EMUTRAMP;
58497+#endif
58498+
58499+#ifdef CONFIG_PAX_MPROTECT
58500+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
58501+ pax_flags |= MF_PAX_MPROTECT;
58502+#endif
58503+
58504+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58505+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
58506+ pax_flags |= MF_PAX_RANDMMAP;
58507+#endif
58508+
58509+ return pax_flags;
58510+}
58511+#endif
58512+
58513+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58514+static unsigned long pax_parse_defaults(void)
58515+{
58516+ unsigned long pax_flags = 0UL;
58517+
58518+#ifdef CONFIG_PAX_SOFTMODE
58519+ if (pax_softmode)
58520+ return pax_flags;
58521+#endif
58522+
58523+#ifdef CONFIG_PAX_PAGEEXEC
58524+ pax_flags |= MF_PAX_PAGEEXEC;
58525+#endif
58526+
58527+#ifdef CONFIG_PAX_SEGMEXEC
58528+ pax_flags |= MF_PAX_SEGMEXEC;
58529+#endif
58530+
58531+#ifdef CONFIG_PAX_MPROTECT
58532+ pax_flags |= MF_PAX_MPROTECT;
58533+#endif
58534+
58535+#ifdef CONFIG_PAX_RANDMMAP
58536+ if (randomize_va_space)
58537+ pax_flags |= MF_PAX_RANDMMAP;
58538+#endif
58539+
58540+ return pax_flags;
58541+}
58542+
58543+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
58544+{
58545+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
58546+
58547+#ifdef CONFIG_PAX_EI_PAX
58548+
58549+#ifdef CONFIG_PAX_SOFTMODE
58550+ if (pax_softmode)
58551+ return pax_flags;
58552+#endif
58553+
58554+ pax_flags = 0UL;
58555+
58556+#ifdef CONFIG_PAX_PAGEEXEC
58557+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
58558+ pax_flags |= MF_PAX_PAGEEXEC;
58559+#endif
58560+
58561+#ifdef CONFIG_PAX_SEGMEXEC
58562+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
58563+ pax_flags |= MF_PAX_SEGMEXEC;
58564+#endif
58565+
58566+#ifdef CONFIG_PAX_EMUTRAMP
58567+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
58568+ pax_flags |= MF_PAX_EMUTRAMP;
58569+#endif
58570+
58571+#ifdef CONFIG_PAX_MPROTECT
58572+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
58573+ pax_flags |= MF_PAX_MPROTECT;
58574+#endif
58575+
58576+#ifdef CONFIG_PAX_ASLR
58577+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
58578+ pax_flags |= MF_PAX_RANDMMAP;
58579+#endif
58580+
58581+#endif
58582+
58583+ return pax_flags;
58584+
58585+}
58586+
58587+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
58588+{
58589+
58590+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58591+ unsigned long i;
58592+
58593+ for (i = 0UL; i < elf_ex->e_phnum; i++)
58594+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
58595+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
58596+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
58597+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
58598+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
58599+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
58600+ return PAX_PARSE_FLAGS_FALLBACK;
58601+
58602+#ifdef CONFIG_PAX_SOFTMODE
58603+ if (pax_softmode)
58604+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
58605+ else
58606+#endif
58607+
58608+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
58609+ break;
58610+ }
58611+#endif
58612+
58613+ return PAX_PARSE_FLAGS_FALLBACK;
58614+}
58615+
58616+static unsigned long pax_parse_xattr_pax(struct file * const file)
58617+{
58618+
58619+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58620+ ssize_t xattr_size, i;
58621+ unsigned char xattr_value[sizeof("pemrs") - 1];
58622+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
58623+
58624+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
58625+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
58626+ return PAX_PARSE_FLAGS_FALLBACK;
58627+
58628+ for (i = 0; i < xattr_size; i++)
58629+ switch (xattr_value[i]) {
58630+ default:
58631+ return PAX_PARSE_FLAGS_FALLBACK;
58632+
58633+#define parse_flag(option1, option2, flag) \
58634+ case option1: \
58635+ if (pax_flags_hardmode & MF_PAX_##flag) \
58636+ return PAX_PARSE_FLAGS_FALLBACK;\
58637+ pax_flags_hardmode |= MF_PAX_##flag; \
58638+ break; \
58639+ case option2: \
58640+ if (pax_flags_softmode & MF_PAX_##flag) \
58641+ return PAX_PARSE_FLAGS_FALLBACK;\
58642+ pax_flags_softmode |= MF_PAX_##flag; \
58643+ break;
58644+
58645+ parse_flag('p', 'P', PAGEEXEC);
58646+ parse_flag('e', 'E', EMUTRAMP);
58647+ parse_flag('m', 'M', MPROTECT);
58648+ parse_flag('r', 'R', RANDMMAP);
58649+ parse_flag('s', 'S', SEGMEXEC);
58650+
58651+#undef parse_flag
58652+ }
58653+
58654+ if (pax_flags_hardmode & pax_flags_softmode)
58655+ return PAX_PARSE_FLAGS_FALLBACK;
58656+
58657+#ifdef CONFIG_PAX_SOFTMODE
58658+ if (pax_softmode)
58659+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
58660+ else
58661+#endif
58662+
58663+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
58664+#else
58665+ return PAX_PARSE_FLAGS_FALLBACK;
58666+#endif
58667+
58668+}
58669+
58670+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
58671+{
58672+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
58673+
58674+ pax_flags = pax_parse_defaults();
58675+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
58676+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
58677+ xattr_pax_flags = pax_parse_xattr_pax(file);
58678+
58679+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58680+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58681+ pt_pax_flags != xattr_pax_flags)
58682+ return -EINVAL;
58683+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58684+ pax_flags = xattr_pax_flags;
58685+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58686+ pax_flags = pt_pax_flags;
58687+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58688+ pax_flags = ei_pax_flags;
58689+
58690+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
58691+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58692+ if ((__supported_pte_mask & _PAGE_NX))
58693+ pax_flags &= ~MF_PAX_SEGMEXEC;
58694+ else
58695+ pax_flags &= ~MF_PAX_PAGEEXEC;
58696+ }
58697+#endif
58698+
58699+ if (0 > pax_check_flags(&pax_flags))
58700+ return -EINVAL;
58701+
58702+ current->mm->pax_flags = pax_flags;
58703+ return 0;
58704+}
58705+#endif
58706+
58707 /*
58708 * These are the functions used to load ELF style executables and shared
58709 * libraries. There is no binary dependent code anywhere else.
58710@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
58711 {
58712 unsigned long random_variable = 0;
58713
58714+#ifdef CONFIG_PAX_RANDUSTACK
58715+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
58716+ return stack_top - current->mm->delta_stack;
58717+#endif
58718+
58719 if ((current->flags & PF_RANDOMIZE) &&
58720 !(current->personality & ADDR_NO_RANDOMIZE)) {
58721 random_variable = (unsigned long) get_random_int();
58722@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58723 unsigned long load_addr = 0, load_bias = 0;
58724 int load_addr_set = 0;
58725 char * elf_interpreter = NULL;
58726- unsigned long error;
58727+ unsigned long error = 0;
58728 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
58729 unsigned long elf_bss, elf_brk;
58730 int retval, i;
58731@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58732 struct elfhdr interp_elf_ex;
58733 } *loc;
58734 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
58735+ unsigned long pax_task_size;
58736
58737 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
58738 if (!loc) {
58739@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
58740 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
58741 may depend on the personality. */
58742 SET_PERSONALITY2(loc->elf_ex, &arch_state);
58743+
58744+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58745+ current->mm->pax_flags = 0UL;
58746+#endif
58747+
58748+#ifdef CONFIG_PAX_DLRESOLVE
58749+ current->mm->call_dl_resolve = 0UL;
58750+#endif
58751+
58752+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
58753+ current->mm->call_syscall = 0UL;
58754+#endif
58755+
58756+#ifdef CONFIG_PAX_ASLR
58757+ current->mm->delta_mmap = 0UL;
58758+ current->mm->delta_stack = 0UL;
58759+#endif
58760+
58761+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58762+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
58763+ send_sig(SIGKILL, current, 0);
58764+ goto out_free_dentry;
58765+ }
58766+#endif
58767+
58768+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
58769+ pax_set_initial_flags(bprm);
58770+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
58771+ if (pax_set_initial_flags_func)
58772+ (pax_set_initial_flags_func)(bprm);
58773+#endif
58774+
58775+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
58776+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
58777+ current->mm->context.user_cs_limit = PAGE_SIZE;
58778+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
58779+ }
58780+#endif
58781+
58782+#ifdef CONFIG_PAX_SEGMEXEC
58783+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
58784+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
58785+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
58786+ pax_task_size = SEGMEXEC_TASK_SIZE;
58787+ current->mm->def_flags |= VM_NOHUGEPAGE;
58788+ } else
58789+#endif
58790+
58791+ pax_task_size = TASK_SIZE;
58792+
58793+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
58794+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58795+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
58796+ put_cpu();
58797+ }
58798+#endif
58799+
58800+#ifdef CONFIG_PAX_ASLR
58801+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
58802+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
58803+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
58804+ }
58805+#endif
58806+
58807+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
58808+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58809+ executable_stack = EXSTACK_DISABLE_X;
58810+ current->personality &= ~READ_IMPLIES_EXEC;
58811+ } else
58812+#endif
58813+
58814 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
58815 current->personality |= READ_IMPLIES_EXEC;
58816
58817@@ -924,6 +1363,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
58818 #else
58819 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
58820 #endif
58821+
58822+#ifdef CONFIG_PAX_RANDMMAP
58823+ /* PaX: randomize base address at the default exe base if requested */
58824+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
58825+#ifdef CONFIG_SPARC64
58826+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
58827+#else
58828+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
58829+#endif
58830+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
58831+ elf_flags |= MAP_FIXED;
58832+ }
58833+#endif
58834+
58835 }
58836
58837 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
58838@@ -955,9 +1408,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
58839 * allowed task size. Note that p_filesz must always be
58840 * <= p_memsz so it is only necessary to check p_memsz.
58841 */
58842- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
58843- elf_ppnt->p_memsz > TASK_SIZE ||
58844- TASK_SIZE - elf_ppnt->p_memsz < k) {
58845+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
58846+ elf_ppnt->p_memsz > pax_task_size ||
58847+ pax_task_size - elf_ppnt->p_memsz < k) {
58848 /* set_brk can never work. Avoid overflows. */
58849 retval = -EINVAL;
58850 goto out_free_dentry;
58851@@ -993,16 +1446,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
58852 if (retval)
58853 goto out_free_dentry;
58854 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
58855- retval = -EFAULT; /* Nobody gets to see this, but.. */
58856- goto out_free_dentry;
58857+ /*
58858+ * This bss-zeroing can fail if the ELF
58859+ * file specifies odd protections. So
58860+ * we don't check the return value
58861+ */
58862 }
58863
58864+#ifdef CONFIG_PAX_RANDMMAP
58865+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
58866+ unsigned long start, size, flags;
58867+ vm_flags_t vm_flags;
58868+
58869+ start = ELF_PAGEALIGN(elf_brk);
58870+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
58871+ flags = MAP_FIXED | MAP_PRIVATE;
58872+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
58873+
58874+ down_write(&current->mm->mmap_sem);
58875+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
58876+ retval = -ENOMEM;
58877+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
58878+// if (current->personality & ADDR_NO_RANDOMIZE)
58879+// vm_flags |= VM_READ | VM_MAYREAD;
58880+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
58881+ retval = IS_ERR_VALUE(start) ? start : 0;
58882+ }
58883+ up_write(&current->mm->mmap_sem);
58884+ if (retval == 0)
58885+ retval = set_brk(start + size, start + size + PAGE_SIZE);
58886+ if (retval < 0)
58887+ goto out_free_dentry;
58888+ }
58889+#endif
58890+
58891 if (elf_interpreter) {
58892- unsigned long interp_map_addr = 0;
58893-
58894 elf_entry = load_elf_interp(&loc->interp_elf_ex,
58895 interpreter,
58896- &interp_map_addr,
58897 load_bias, interp_elf_phdata);
58898 if (!IS_ERR((void *)elf_entry)) {
58899 /*
58900@@ -1230,7 +1710,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
58901 * Decide what to dump of a segment, part, all or none.
58902 */
58903 static unsigned long vma_dump_size(struct vm_area_struct *vma,
58904- unsigned long mm_flags)
58905+ unsigned long mm_flags, long signr)
58906 {
58907 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
58908
58909@@ -1268,7 +1748,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
58910 if (vma->vm_file == NULL)
58911 return 0;
58912
58913- if (FILTER(MAPPED_PRIVATE))
58914+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
58915 goto whole;
58916
58917 /*
58918@@ -1475,9 +1955,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
58919 {
58920 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
58921 int i = 0;
58922- do
58923+ do {
58924 i += 2;
58925- while (auxv[i - 2] != AT_NULL);
58926+ } while (auxv[i - 2] != AT_NULL);
58927 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
58928 }
58929
58930@@ -1486,7 +1966,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
58931 {
58932 mm_segment_t old_fs = get_fs();
58933 set_fs(KERNEL_DS);
58934- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
58935+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
58936 set_fs(old_fs);
58937 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
58938 }
58939@@ -2206,7 +2686,7 @@ static int elf_core_dump(struct coredump_params *cprm)
58940 vma = next_vma(vma, gate_vma)) {
58941 unsigned long dump_size;
58942
58943- dump_size = vma_dump_size(vma, cprm->mm_flags);
58944+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
58945 vma_filesz[i++] = dump_size;
58946 vma_data_size += dump_size;
58947 }
58948@@ -2314,6 +2794,167 @@ out:
58949
58950 #endif /* CONFIG_ELF_CORE */
58951
58952+#ifdef CONFIG_PAX_MPROTECT
58953+/* PaX: non-PIC ELF libraries need relocations on their executable segments
58954+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
58955+ * we'll remove VM_MAYWRITE for good on RELRO segments.
58956+ *
58957+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
58958+ * basis because we want to allow the common case and not the special ones.
58959+ */
58960+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
58961+{
58962+ struct elfhdr elf_h;
58963+ struct elf_phdr elf_p;
58964+ unsigned long i;
58965+ unsigned long oldflags;
58966+ bool is_textrel_rw, is_textrel_rx, is_relro;
58967+
58968+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
58969+ return;
58970+
58971+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
58972+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
58973+
58974+#ifdef CONFIG_PAX_ELFRELOCS
58975+ /* possible TEXTREL */
58976+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
58977+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
58978+#else
58979+ is_textrel_rw = false;
58980+ is_textrel_rx = false;
58981+#endif
58982+
58983+ /* possible RELRO */
58984+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
58985+
58986+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
58987+ return;
58988+
58989+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
58990+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
58991+
58992+#ifdef CONFIG_PAX_ETEXECRELOCS
58993+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
58994+#else
58995+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
58996+#endif
58997+
58998+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
58999+ !elf_check_arch(&elf_h) ||
59000+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59001+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59002+ return;
59003+
59004+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59005+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59006+ return;
59007+ switch (elf_p.p_type) {
59008+ case PT_DYNAMIC:
59009+ if (!is_textrel_rw && !is_textrel_rx)
59010+ continue;
59011+ i = 0UL;
59012+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
59013+ elf_dyn dyn;
59014+
59015+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
59016+ break;
59017+ if (dyn.d_tag == DT_NULL)
59018+ break;
59019+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
59020+ gr_log_textrel(vma);
59021+ if (is_textrel_rw)
59022+ vma->vm_flags |= VM_MAYWRITE;
59023+ else
59024+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
59025+ vma->vm_flags &= ~VM_MAYWRITE;
59026+ break;
59027+ }
59028+ i++;
59029+ }
59030+ is_textrel_rw = false;
59031+ is_textrel_rx = false;
59032+ continue;
59033+
59034+ case PT_GNU_RELRO:
59035+ if (!is_relro)
59036+ continue;
59037+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
59038+ vma->vm_flags &= ~VM_MAYWRITE;
59039+ is_relro = false;
59040+ continue;
59041+
59042+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59043+ case PT_PAX_FLAGS: {
59044+ const char *msg_mprotect = "", *msg_emutramp = "";
59045+ char *buffer_lib, *buffer_exe;
59046+
59047+ if (elf_p.p_flags & PF_NOMPROTECT)
59048+ msg_mprotect = "MPROTECT disabled";
59049+
59050+#ifdef CONFIG_PAX_EMUTRAMP
59051+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
59052+ msg_emutramp = "EMUTRAMP enabled";
59053+#endif
59054+
59055+ if (!msg_mprotect[0] && !msg_emutramp[0])
59056+ continue;
59057+
59058+ if (!printk_ratelimit())
59059+ continue;
59060+
59061+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
59062+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
59063+ if (buffer_lib && buffer_exe) {
59064+ char *path_lib, *path_exe;
59065+
59066+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
59067+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
59068+
59069+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
59070+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
59071+
59072+ }
59073+ free_page((unsigned long)buffer_exe);
59074+ free_page((unsigned long)buffer_lib);
59075+ continue;
59076+ }
59077+#endif
59078+
59079+ }
59080+ }
59081+}
59082+#endif
59083+
59084+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59085+
59086+extern int grsec_enable_log_rwxmaps;
59087+
59088+static void elf_handle_mmap(struct file *file)
59089+{
59090+ struct elfhdr elf_h;
59091+ struct elf_phdr elf_p;
59092+ unsigned long i;
59093+
59094+ if (!grsec_enable_log_rwxmaps)
59095+ return;
59096+
59097+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59098+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59099+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
59100+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59101+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59102+ return;
59103+
59104+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59105+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59106+ return;
59107+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
59108+ gr_log_ptgnustack(file);
59109+ }
59110+}
59111+#endif
59112+
59113 static int __init init_elf_binfmt(void)
59114 {
59115 register_binfmt(&elf_format);
59116diff --git a/fs/block_dev.c b/fs/block_dev.c
59117index b48c41b..e070416 100644
59118--- a/fs/block_dev.c
59119+++ b/fs/block_dev.c
59120@@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
59121 else if (bdev->bd_contains == bdev)
59122 return true; /* is a whole device which isn't held */
59123
59124- else if (whole->bd_holder == bd_may_claim)
59125+ else if (whole->bd_holder == (void *)bd_may_claim)
59126 return true; /* is a partition of a device that is being partitioned */
59127 else if (whole->bd_holder != NULL)
59128 return false; /* is a partition of a held device */
59129diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
59130index f54511d..58acdec 100644
59131--- a/fs/btrfs/ctree.c
59132+++ b/fs/btrfs/ctree.c
59133@@ -1173,9 +1173,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
59134 free_extent_buffer(buf);
59135 add_root_to_dirty_list(root);
59136 } else {
59137- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
59138- parent_start = parent->start;
59139- else
59140+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
59141+ if (parent)
59142+ parent_start = parent->start;
59143+ else
59144+ parent_start = 0;
59145+ } else
59146 parent_start = 0;
59147
59148 WARN_ON(trans->transid != btrfs_header_generation(parent));
59149diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
59150index de4e70f..b41dc45 100644
59151--- a/fs/btrfs/delayed-inode.c
59152+++ b/fs/btrfs/delayed-inode.c
59153@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
59154
59155 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
59156 {
59157- int seq = atomic_inc_return(&delayed_root->items_seq);
59158+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
59159 if ((atomic_dec_return(&delayed_root->items) <
59160 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
59161 waitqueue_active(&delayed_root->wait))
59162@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
59163
59164 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
59165 {
59166- int val = atomic_read(&delayed_root->items_seq);
59167+ int val = atomic_read_unchecked(&delayed_root->items_seq);
59168
59169 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
59170 return 1;
59171@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
59172 int seq;
59173 int ret;
59174
59175- seq = atomic_read(&delayed_root->items_seq);
59176+ seq = atomic_read_unchecked(&delayed_root->items_seq);
59177
59178 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
59179 if (ret)
59180diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
59181index f70119f..ab5894d 100644
59182--- a/fs/btrfs/delayed-inode.h
59183+++ b/fs/btrfs/delayed-inode.h
59184@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
59185 */
59186 struct list_head prepare_list;
59187 atomic_t items; /* for delayed items */
59188- atomic_t items_seq; /* for delayed items */
59189+ atomic_unchecked_t items_seq; /* for delayed items */
59190 int nodes; /* for delayed nodes */
59191 wait_queue_head_t wait;
59192 };
59193@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
59194 struct btrfs_delayed_root *delayed_root)
59195 {
59196 atomic_set(&delayed_root->items, 0);
59197- atomic_set(&delayed_root->items_seq, 0);
59198+ atomic_set_unchecked(&delayed_root->items_seq, 0);
59199 delayed_root->nodes = 0;
59200 spin_lock_init(&delayed_root->lock);
59201 init_waitqueue_head(&delayed_root->wait);
59202diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
59203index 6f49b28..483410f 100644
59204--- a/fs/btrfs/super.c
59205+++ b/fs/btrfs/super.c
59206@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
59207 function, line, errstr);
59208 return;
59209 }
59210- ACCESS_ONCE(trans->transaction->aborted) = errno;
59211+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
59212 /* Wake up anybody who may be waiting on this transaction */
59213 wake_up(&root->fs_info->transaction_wait);
59214 wake_up(&root->fs_info->transaction_blocked_wait);
59215diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
59216index 92db3f6..898a561 100644
59217--- a/fs/btrfs/sysfs.c
59218+++ b/fs/btrfs/sysfs.c
59219@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
59220 for (set = 0; set < FEAT_MAX; set++) {
59221 int i;
59222 struct attribute *attrs[2];
59223- struct attribute_group agroup = {
59224+ attribute_group_no_const agroup = {
59225 .name = "features",
59226 .attrs = attrs,
59227 };
59228diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
59229index 2299bfd..4098e72 100644
59230--- a/fs/btrfs/tests/free-space-tests.c
59231+++ b/fs/btrfs/tests/free-space-tests.c
59232@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59233 * extent entry.
59234 */
59235 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
59236- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59237+ pax_open_kernel();
59238+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59239+ pax_close_kernel();
59240
59241 /*
59242 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
59243@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59244 if (ret)
59245 return ret;
59246
59247- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59248+ pax_open_kernel();
59249+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59250+ pax_close_kernel();
59251 __btrfs_remove_free_space_cache(cache->free_space_ctl);
59252
59253 return 0;
59254diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
59255index 154990c..d0cf699 100644
59256--- a/fs/btrfs/tree-log.h
59257+++ b/fs/btrfs/tree-log.h
59258@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
59259 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
59260 struct btrfs_trans_handle *trans)
59261 {
59262- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
59263+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
59264 }
59265
59266 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
59267diff --git a/fs/buffer.c b/fs/buffer.c
59268index 20805db..2e8fc69 100644
59269--- a/fs/buffer.c
59270+++ b/fs/buffer.c
59271@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
59272 bh_cachep = kmem_cache_create("buffer_head",
59273 sizeof(struct buffer_head), 0,
59274 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
59275- SLAB_MEM_SPREAD),
59276+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
59277 NULL);
59278
59279 /*
59280diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
59281index fbb08e9..0fda764 100644
59282--- a/fs/cachefiles/bind.c
59283+++ b/fs/cachefiles/bind.c
59284@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
59285 args);
59286
59287 /* start by checking things over */
59288- ASSERT(cache->fstop_percent >= 0 &&
59289- cache->fstop_percent < cache->fcull_percent &&
59290+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
59291 cache->fcull_percent < cache->frun_percent &&
59292 cache->frun_percent < 100);
59293
59294- ASSERT(cache->bstop_percent >= 0 &&
59295- cache->bstop_percent < cache->bcull_percent &&
59296+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
59297 cache->bcull_percent < cache->brun_percent &&
59298 cache->brun_percent < 100);
59299
59300diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
59301index ce1b115..4a6852c 100644
59302--- a/fs/cachefiles/daemon.c
59303+++ b/fs/cachefiles/daemon.c
59304@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
59305 if (n > buflen)
59306 return -EMSGSIZE;
59307
59308- if (copy_to_user(_buffer, buffer, n) != 0)
59309+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
59310 return -EFAULT;
59311
59312 return n;
59313@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
59314 if (test_bit(CACHEFILES_DEAD, &cache->flags))
59315 return -EIO;
59316
59317- if (datalen < 0 || datalen > PAGE_SIZE - 1)
59318+ if (datalen > PAGE_SIZE - 1)
59319 return -EOPNOTSUPP;
59320
59321 /* drag the command string into the kernel so we can parse it */
59322@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
59323 if (args[0] != '%' || args[1] != '\0')
59324 return -EINVAL;
59325
59326- if (fstop < 0 || fstop >= cache->fcull_percent)
59327+ if (fstop >= cache->fcull_percent)
59328 return cachefiles_daemon_range_error(cache, args);
59329
59330 cache->fstop_percent = fstop;
59331@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
59332 if (args[0] != '%' || args[1] != '\0')
59333 return -EINVAL;
59334
59335- if (bstop < 0 || bstop >= cache->bcull_percent)
59336+ if (bstop >= cache->bcull_percent)
59337 return cachefiles_daemon_range_error(cache, args);
59338
59339 cache->bstop_percent = bstop;
59340diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
59341index 8c52472..c4e3a69 100644
59342--- a/fs/cachefiles/internal.h
59343+++ b/fs/cachefiles/internal.h
59344@@ -66,7 +66,7 @@ struct cachefiles_cache {
59345 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
59346 struct rb_root active_nodes; /* active nodes (can't be culled) */
59347 rwlock_t active_lock; /* lock for active_nodes */
59348- atomic_t gravecounter; /* graveyard uniquifier */
59349+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
59350 unsigned frun_percent; /* when to stop culling (% files) */
59351 unsigned fcull_percent; /* when to start culling (% files) */
59352 unsigned fstop_percent; /* when to stop allocating (% files) */
59353@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
59354 * proc.c
59355 */
59356 #ifdef CONFIG_CACHEFILES_HISTOGRAM
59357-extern atomic_t cachefiles_lookup_histogram[HZ];
59358-extern atomic_t cachefiles_mkdir_histogram[HZ];
59359-extern atomic_t cachefiles_create_histogram[HZ];
59360+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59361+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59362+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
59363
59364 extern int __init cachefiles_proc_init(void);
59365 extern void cachefiles_proc_cleanup(void);
59366 static inline
59367-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
59368+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
59369 {
59370 unsigned long jif = jiffies - start_jif;
59371 if (jif >= HZ)
59372 jif = HZ - 1;
59373- atomic_inc(&histogram[jif]);
59374+ atomic_inc_unchecked(&histogram[jif]);
59375 }
59376
59377 #else
59378diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
59379index 7f8e83f..8951aa4 100644
59380--- a/fs/cachefiles/namei.c
59381+++ b/fs/cachefiles/namei.c
59382@@ -309,7 +309,7 @@ try_again:
59383 /* first step is to make up a grave dentry in the graveyard */
59384 sprintf(nbuffer, "%08x%08x",
59385 (uint32_t) get_seconds(),
59386- (uint32_t) atomic_inc_return(&cache->gravecounter));
59387+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
59388
59389 /* do the multiway lock magic */
59390 trap = lock_rename(cache->graveyard, dir);
59391diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
59392index eccd339..4c1d995 100644
59393--- a/fs/cachefiles/proc.c
59394+++ b/fs/cachefiles/proc.c
59395@@ -14,9 +14,9 @@
59396 #include <linux/seq_file.h>
59397 #include "internal.h"
59398
59399-atomic_t cachefiles_lookup_histogram[HZ];
59400-atomic_t cachefiles_mkdir_histogram[HZ];
59401-atomic_t cachefiles_create_histogram[HZ];
59402+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59403+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59404+atomic_unchecked_t cachefiles_create_histogram[HZ];
59405
59406 /*
59407 * display the latency histogram
59408@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
59409 return 0;
59410 default:
59411 index = (unsigned long) v - 3;
59412- x = atomic_read(&cachefiles_lookup_histogram[index]);
59413- y = atomic_read(&cachefiles_mkdir_histogram[index]);
59414- z = atomic_read(&cachefiles_create_histogram[index]);
59415+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
59416+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
59417+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
59418 if (x == 0 && y == 0 && z == 0)
59419 return 0;
59420
59421diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
59422index c241603..56bae60 100644
59423--- a/fs/ceph/dir.c
59424+++ b/fs/ceph/dir.c
59425@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
59426 struct dentry *dentry, *last;
59427 struct ceph_dentry_info *di;
59428 int err = 0;
59429+ char d_name[DNAME_INLINE_LEN];
59430+ const unsigned char *name;
59431
59432 /* claim ref on last dentry we returned */
59433 last = fi->dentry;
59434@@ -192,7 +194,12 @@ more:
59435
59436 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
59437 dentry, dentry, dentry->d_inode);
59438- if (!dir_emit(ctx, dentry->d_name.name,
59439+ name = dentry->d_name.name;
59440+ if (name == dentry->d_iname) {
59441+ memcpy(d_name, name, dentry->d_name.len);
59442+ name = d_name;
59443+ }
59444+ if (!dir_emit(ctx, name,
59445 dentry->d_name.len,
59446 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
59447 dentry->d_inode->i_mode >> 12)) {
59448@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
59449 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
59450 struct ceph_mds_client *mdsc = fsc->mdsc;
59451 unsigned frag = fpos_frag(ctx->pos);
59452- int off = fpos_off(ctx->pos);
59453+ unsigned int off = fpos_off(ctx->pos);
59454 int err;
59455 u32 ftype;
59456 struct ceph_mds_reply_info_parsed *rinfo;
59457diff --git a/fs/ceph/super.c b/fs/ceph/super.c
59458index 50f06cd..c7eba3e 100644
59459--- a/fs/ceph/super.c
59460+++ b/fs/ceph/super.c
59461@@ -896,7 +896,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
59462 /*
59463 * construct our own bdi so we can control readahead, etc.
59464 */
59465-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
59466+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
59467
59468 static int ceph_register_bdi(struct super_block *sb,
59469 struct ceph_fs_client *fsc)
59470@@ -913,7 +913,7 @@ static int ceph_register_bdi(struct super_block *sb,
59471 default_backing_dev_info.ra_pages;
59472
59473 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
59474- atomic_long_inc_return(&bdi_seq));
59475+ atomic_long_inc_return_unchecked(&bdi_seq));
59476 if (!err)
59477 sb->s_bdi = &fsc->backing_dev_info;
59478 return err;
59479diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
59480index 7febcf2..62a5721 100644
59481--- a/fs/cifs/cifs_debug.c
59482+++ b/fs/cifs/cifs_debug.c
59483@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59484
59485 if (strtobool(&c, &bv) == 0) {
59486 #ifdef CONFIG_CIFS_STATS2
59487- atomic_set(&totBufAllocCount, 0);
59488- atomic_set(&totSmBufAllocCount, 0);
59489+ atomic_set_unchecked(&totBufAllocCount, 0);
59490+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59491 #endif /* CONFIG_CIFS_STATS2 */
59492 spin_lock(&cifs_tcp_ses_lock);
59493 list_for_each(tmp1, &cifs_tcp_ses_list) {
59494@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59495 tcon = list_entry(tmp3,
59496 struct cifs_tcon,
59497 tcon_list);
59498- atomic_set(&tcon->num_smbs_sent, 0);
59499+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
59500 if (server->ops->clear_stats)
59501 server->ops->clear_stats(tcon);
59502 }
59503@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59504 smBufAllocCount.counter, cifs_min_small);
59505 #ifdef CONFIG_CIFS_STATS2
59506 seq_printf(m, "Total Large %d Small %d Allocations\n",
59507- atomic_read(&totBufAllocCount),
59508- atomic_read(&totSmBufAllocCount));
59509+ atomic_read_unchecked(&totBufAllocCount),
59510+ atomic_read_unchecked(&totSmBufAllocCount));
59511 #endif /* CONFIG_CIFS_STATS2 */
59512
59513 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
59514@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59515 if (tcon->need_reconnect)
59516 seq_puts(m, "\tDISCONNECTED ");
59517 seq_printf(m, "\nSMBs: %d",
59518- atomic_read(&tcon->num_smbs_sent));
59519+ atomic_read_unchecked(&tcon->num_smbs_sent));
59520 if (server->ops->print_stats)
59521 server->ops->print_stats(m, tcon);
59522 }
59523diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
59524index d72fe37..ded5511 100644
59525--- a/fs/cifs/cifsfs.c
59526+++ b/fs/cifs/cifsfs.c
59527@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
59528 */
59529 cifs_req_cachep = kmem_cache_create("cifs_request",
59530 CIFSMaxBufSize + max_hdr_size, 0,
59531- SLAB_HWCACHE_ALIGN, NULL);
59532+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
59533 if (cifs_req_cachep == NULL)
59534 return -ENOMEM;
59535
59536@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
59537 efficient to alloc 1 per page off the slab compared to 17K (5page)
59538 alloc of large cifs buffers even when page debugging is on */
59539 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
59540- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
59541+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
59542 NULL);
59543 if (cifs_sm_req_cachep == NULL) {
59544 mempool_destroy(cifs_req_poolp);
59545@@ -1204,8 +1204,8 @@ init_cifs(void)
59546 atomic_set(&bufAllocCount, 0);
59547 atomic_set(&smBufAllocCount, 0);
59548 #ifdef CONFIG_CIFS_STATS2
59549- atomic_set(&totBufAllocCount, 0);
59550- atomic_set(&totSmBufAllocCount, 0);
59551+ atomic_set_unchecked(&totBufAllocCount, 0);
59552+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59553 #endif /* CONFIG_CIFS_STATS2 */
59554
59555 atomic_set(&midCount, 0);
59556diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
59557index 22b289a..bbbba08 100644
59558--- a/fs/cifs/cifsglob.h
59559+++ b/fs/cifs/cifsglob.h
59560@@ -823,35 +823,35 @@ struct cifs_tcon {
59561 __u16 Flags; /* optional support bits */
59562 enum statusEnum tidStatus;
59563 #ifdef CONFIG_CIFS_STATS
59564- atomic_t num_smbs_sent;
59565+ atomic_unchecked_t num_smbs_sent;
59566 union {
59567 struct {
59568- atomic_t num_writes;
59569- atomic_t num_reads;
59570- atomic_t num_flushes;
59571- atomic_t num_oplock_brks;
59572- atomic_t num_opens;
59573- atomic_t num_closes;
59574- atomic_t num_deletes;
59575- atomic_t num_mkdirs;
59576- atomic_t num_posixopens;
59577- atomic_t num_posixmkdirs;
59578- atomic_t num_rmdirs;
59579- atomic_t num_renames;
59580- atomic_t num_t2renames;
59581- atomic_t num_ffirst;
59582- atomic_t num_fnext;
59583- atomic_t num_fclose;
59584- atomic_t num_hardlinks;
59585- atomic_t num_symlinks;
59586- atomic_t num_locks;
59587- atomic_t num_acl_get;
59588- atomic_t num_acl_set;
59589+ atomic_unchecked_t num_writes;
59590+ atomic_unchecked_t num_reads;
59591+ atomic_unchecked_t num_flushes;
59592+ atomic_unchecked_t num_oplock_brks;
59593+ atomic_unchecked_t num_opens;
59594+ atomic_unchecked_t num_closes;
59595+ atomic_unchecked_t num_deletes;
59596+ atomic_unchecked_t num_mkdirs;
59597+ atomic_unchecked_t num_posixopens;
59598+ atomic_unchecked_t num_posixmkdirs;
59599+ atomic_unchecked_t num_rmdirs;
59600+ atomic_unchecked_t num_renames;
59601+ atomic_unchecked_t num_t2renames;
59602+ atomic_unchecked_t num_ffirst;
59603+ atomic_unchecked_t num_fnext;
59604+ atomic_unchecked_t num_fclose;
59605+ atomic_unchecked_t num_hardlinks;
59606+ atomic_unchecked_t num_symlinks;
59607+ atomic_unchecked_t num_locks;
59608+ atomic_unchecked_t num_acl_get;
59609+ atomic_unchecked_t num_acl_set;
59610 } cifs_stats;
59611 #ifdef CONFIG_CIFS_SMB2
59612 struct {
59613- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59614- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59615+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59616+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59617 } smb2_stats;
59618 #endif /* CONFIG_CIFS_SMB2 */
59619 } stats;
59620@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
59621 }
59622
59623 #ifdef CONFIG_CIFS_STATS
59624-#define cifs_stats_inc atomic_inc
59625+#define cifs_stats_inc atomic_inc_unchecked
59626
59627 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
59628 unsigned int bytes)
59629@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
59630 /* Various Debug counters */
59631 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
59632 #ifdef CONFIG_CIFS_STATS2
59633-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
59634-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
59635+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
59636+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
59637 #endif
59638 GLOBAL_EXTERN atomic_t smBufAllocCount;
59639 GLOBAL_EXTERN atomic_t midCount;
59640diff --git a/fs/cifs/file.c b/fs/cifs/file.c
59641index 74f1287..7ef0237 100644
59642--- a/fs/cifs/file.c
59643+++ b/fs/cifs/file.c
59644@@ -2060,10 +2060,14 @@ static int cifs_writepages(struct address_space *mapping,
59645 index = mapping->writeback_index; /* Start from prev offset */
59646 end = -1;
59647 } else {
59648- index = wbc->range_start >> PAGE_CACHE_SHIFT;
59649- end = wbc->range_end >> PAGE_CACHE_SHIFT;
59650- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
59651+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
59652 range_whole = true;
59653+ index = 0;
59654+ end = ULONG_MAX;
59655+ } else {
59656+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
59657+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
59658+ }
59659 scanned = true;
59660 }
59661 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
59662diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
59663index 3379463..3af418a 100644
59664--- a/fs/cifs/misc.c
59665+++ b/fs/cifs/misc.c
59666@@ -170,7 +170,7 @@ cifs_buf_get(void)
59667 memset(ret_buf, 0, buf_size + 3);
59668 atomic_inc(&bufAllocCount);
59669 #ifdef CONFIG_CIFS_STATS2
59670- atomic_inc(&totBufAllocCount);
59671+ atomic_inc_unchecked(&totBufAllocCount);
59672 #endif /* CONFIG_CIFS_STATS2 */
59673 }
59674
59675@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
59676 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
59677 atomic_inc(&smBufAllocCount);
59678 #ifdef CONFIG_CIFS_STATS2
59679- atomic_inc(&totSmBufAllocCount);
59680+ atomic_inc_unchecked(&totSmBufAllocCount);
59681 #endif /* CONFIG_CIFS_STATS2 */
59682
59683 }
59684diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
59685index d297903..1cb7516 100644
59686--- a/fs/cifs/smb1ops.c
59687+++ b/fs/cifs/smb1ops.c
59688@@ -622,27 +622,27 @@ static void
59689 cifs_clear_stats(struct cifs_tcon *tcon)
59690 {
59691 #ifdef CONFIG_CIFS_STATS
59692- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
59693- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
59694- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
59695- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59696- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
59697- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
59698- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59699- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
59700- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
59701- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
59702- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
59703- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
59704- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
59705- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
59706- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
59707- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
59708- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
59709- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
59710- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
59711- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
59712- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
59713+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
59714+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
59715+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
59716+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59717+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
59718+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
59719+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59720+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
59721+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
59722+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
59723+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
59724+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
59725+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
59726+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
59727+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
59728+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
59729+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
59730+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
59731+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
59732+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
59733+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
59734 #endif
59735 }
59736
59737@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59738 {
59739 #ifdef CONFIG_CIFS_STATS
59740 seq_printf(m, " Oplocks breaks: %d",
59741- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
59742+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
59743 seq_printf(m, "\nReads: %d Bytes: %llu",
59744- atomic_read(&tcon->stats.cifs_stats.num_reads),
59745+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
59746 (long long)(tcon->bytes_read));
59747 seq_printf(m, "\nWrites: %d Bytes: %llu",
59748- atomic_read(&tcon->stats.cifs_stats.num_writes),
59749+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
59750 (long long)(tcon->bytes_written));
59751 seq_printf(m, "\nFlushes: %d",
59752- atomic_read(&tcon->stats.cifs_stats.num_flushes));
59753+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
59754 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
59755- atomic_read(&tcon->stats.cifs_stats.num_locks),
59756- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
59757- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
59758+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
59759+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
59760+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
59761 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
59762- atomic_read(&tcon->stats.cifs_stats.num_opens),
59763- atomic_read(&tcon->stats.cifs_stats.num_closes),
59764- atomic_read(&tcon->stats.cifs_stats.num_deletes));
59765+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
59766+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
59767+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
59768 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
59769- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
59770- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
59771+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
59772+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
59773 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
59774- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
59775- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
59776+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
59777+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
59778 seq_printf(m, "\nRenames: %d T2 Renames %d",
59779- atomic_read(&tcon->stats.cifs_stats.num_renames),
59780- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
59781+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
59782+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
59783 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
59784- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
59785- atomic_read(&tcon->stats.cifs_stats.num_fnext),
59786- atomic_read(&tcon->stats.cifs_stats.num_fclose));
59787+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
59788+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
59789+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
59790 #endif
59791 }
59792
59793diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
59794index 96b5d40..e5db0c1 100644
59795--- a/fs/cifs/smb2ops.c
59796+++ b/fs/cifs/smb2ops.c
59797@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
59798 #ifdef CONFIG_CIFS_STATS
59799 int i;
59800 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
59801- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59802- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59803+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59804+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59805 }
59806 #endif
59807 }
59808@@ -459,65 +459,65 @@ static void
59809 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59810 {
59811 #ifdef CONFIG_CIFS_STATS
59812- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59813- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59814+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59815+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59816 seq_printf(m, "\nNegotiates: %d sent %d failed",
59817- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
59818- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
59819+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
59820+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
59821 seq_printf(m, "\nSessionSetups: %d sent %d failed",
59822- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
59823- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
59824+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
59825+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
59826 seq_printf(m, "\nLogoffs: %d sent %d failed",
59827- atomic_read(&sent[SMB2_LOGOFF_HE]),
59828- atomic_read(&failed[SMB2_LOGOFF_HE]));
59829+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
59830+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
59831 seq_printf(m, "\nTreeConnects: %d sent %d failed",
59832- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
59833- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
59834+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
59835+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
59836 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
59837- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
59838- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
59839+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
59840+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
59841 seq_printf(m, "\nCreates: %d sent %d failed",
59842- atomic_read(&sent[SMB2_CREATE_HE]),
59843- atomic_read(&failed[SMB2_CREATE_HE]));
59844+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
59845+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
59846 seq_printf(m, "\nCloses: %d sent %d failed",
59847- atomic_read(&sent[SMB2_CLOSE_HE]),
59848- atomic_read(&failed[SMB2_CLOSE_HE]));
59849+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
59850+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
59851 seq_printf(m, "\nFlushes: %d sent %d failed",
59852- atomic_read(&sent[SMB2_FLUSH_HE]),
59853- atomic_read(&failed[SMB2_FLUSH_HE]));
59854+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
59855+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
59856 seq_printf(m, "\nReads: %d sent %d failed",
59857- atomic_read(&sent[SMB2_READ_HE]),
59858- atomic_read(&failed[SMB2_READ_HE]));
59859+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
59860+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
59861 seq_printf(m, "\nWrites: %d sent %d failed",
59862- atomic_read(&sent[SMB2_WRITE_HE]),
59863- atomic_read(&failed[SMB2_WRITE_HE]));
59864+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
59865+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
59866 seq_printf(m, "\nLocks: %d sent %d failed",
59867- atomic_read(&sent[SMB2_LOCK_HE]),
59868- atomic_read(&failed[SMB2_LOCK_HE]));
59869+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
59870+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
59871 seq_printf(m, "\nIOCTLs: %d sent %d failed",
59872- atomic_read(&sent[SMB2_IOCTL_HE]),
59873- atomic_read(&failed[SMB2_IOCTL_HE]));
59874+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
59875+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
59876 seq_printf(m, "\nCancels: %d sent %d failed",
59877- atomic_read(&sent[SMB2_CANCEL_HE]),
59878- atomic_read(&failed[SMB2_CANCEL_HE]));
59879+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
59880+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
59881 seq_printf(m, "\nEchos: %d sent %d failed",
59882- atomic_read(&sent[SMB2_ECHO_HE]),
59883- atomic_read(&failed[SMB2_ECHO_HE]));
59884+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
59885+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
59886 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
59887- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
59888- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
59889+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
59890+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
59891 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
59892- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
59893- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
59894+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
59895+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
59896 seq_printf(m, "\nQueryInfos: %d sent %d failed",
59897- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
59898- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
59899+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
59900+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
59901 seq_printf(m, "\nSetInfos: %d sent %d failed",
59902- atomic_read(&sent[SMB2_SET_INFO_HE]),
59903- atomic_read(&failed[SMB2_SET_INFO_HE]));
59904+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
59905+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
59906 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
59907- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
59908- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
59909+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
59910+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
59911 #endif
59912 }
59913
59914diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
59915index 3417340..b942390 100644
59916--- a/fs/cifs/smb2pdu.c
59917+++ b/fs/cifs/smb2pdu.c
59918@@ -2144,8 +2144,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
59919 default:
59920 cifs_dbg(VFS, "info level %u isn't supported\n",
59921 srch_inf->info_level);
59922- rc = -EINVAL;
59923- goto qdir_exit;
59924+ return -EINVAL;
59925 }
59926
59927 req->FileIndex = cpu_to_le32(index);
59928diff --git a/fs/coda/cache.c b/fs/coda/cache.c
59929index 46ee6f2..89a9e7f 100644
59930--- a/fs/coda/cache.c
59931+++ b/fs/coda/cache.c
59932@@ -24,7 +24,7 @@
59933 #include "coda_linux.h"
59934 #include "coda_cache.h"
59935
59936-static atomic_t permission_epoch = ATOMIC_INIT(0);
59937+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
59938
59939 /* replace or extend an acl cache hit */
59940 void coda_cache_enter(struct inode *inode, int mask)
59941@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
59942 struct coda_inode_info *cii = ITOC(inode);
59943
59944 spin_lock(&cii->c_lock);
59945- cii->c_cached_epoch = atomic_read(&permission_epoch);
59946+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
59947 if (!uid_eq(cii->c_uid, current_fsuid())) {
59948 cii->c_uid = current_fsuid();
59949 cii->c_cached_perm = mask;
59950@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
59951 {
59952 struct coda_inode_info *cii = ITOC(inode);
59953 spin_lock(&cii->c_lock);
59954- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
59955+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
59956 spin_unlock(&cii->c_lock);
59957 }
59958
59959 /* remove all acl caches */
59960 void coda_cache_clear_all(struct super_block *sb)
59961 {
59962- atomic_inc(&permission_epoch);
59963+ atomic_inc_unchecked(&permission_epoch);
59964 }
59965
59966
59967@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
59968 spin_lock(&cii->c_lock);
59969 hit = (mask & cii->c_cached_perm) == mask &&
59970 uid_eq(cii->c_uid, current_fsuid()) &&
59971- cii->c_cached_epoch == atomic_read(&permission_epoch);
59972+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
59973 spin_unlock(&cii->c_lock);
59974
59975 return hit;
59976diff --git a/fs/compat.c b/fs/compat.c
59977index 6fd272d..dd34ba2 100644
59978--- a/fs/compat.c
59979+++ b/fs/compat.c
59980@@ -54,7 +54,7 @@
59981 #include <asm/ioctls.h>
59982 #include "internal.h"
59983
59984-int compat_log = 1;
59985+int compat_log = 0;
59986
59987 int compat_printk(const char *fmt, ...)
59988 {
59989@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
59990
59991 set_fs(KERNEL_DS);
59992 /* The __user pointer cast is valid because of the set_fs() */
59993- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
59994+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
59995 set_fs(oldfs);
59996 /* truncating is ok because it's a user address */
59997 if (!ret)
59998@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
59999 goto out;
60000
60001 ret = -EINVAL;
60002- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
60003+ if (nr_segs > UIO_MAXIOV)
60004 goto out;
60005 if (nr_segs > fast_segs) {
60006 ret = -ENOMEM;
60007@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
60008 struct compat_readdir_callback {
60009 struct dir_context ctx;
60010 struct compat_old_linux_dirent __user *dirent;
60011+ struct file * file;
60012 int result;
60013 };
60014
60015@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
60016 buf->result = -EOVERFLOW;
60017 return -EOVERFLOW;
60018 }
60019+
60020+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60021+ return 0;
60022+
60023 buf->result++;
60024 dirent = buf->dirent;
60025 if (!access_ok(VERIFY_WRITE, dirent,
60026@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60027 if (!f.file)
60028 return -EBADF;
60029
60030+ buf.file = f.file;
60031 error = iterate_dir(f.file, &buf.ctx);
60032 if (buf.result)
60033 error = buf.result;
60034@@ -913,6 +919,7 @@ struct compat_getdents_callback {
60035 struct dir_context ctx;
60036 struct compat_linux_dirent __user *current_dir;
60037 struct compat_linux_dirent __user *previous;
60038+ struct file * file;
60039 int count;
60040 int error;
60041 };
60042@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
60043 buf->error = -EOVERFLOW;
60044 return -EOVERFLOW;
60045 }
60046+
60047+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60048+ return 0;
60049+
60050 dirent = buf->previous;
60051 if (dirent) {
60052 if (__put_user(offset, &dirent->d_off))
60053@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
60054 if (!f.file)
60055 return -EBADF;
60056
60057+ buf.file = f.file;
60058 error = iterate_dir(f.file, &buf.ctx);
60059 if (error >= 0)
60060 error = buf.error;
60061@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
60062 struct dir_context ctx;
60063 struct linux_dirent64 __user *current_dir;
60064 struct linux_dirent64 __user *previous;
60065+ struct file * file;
60066 int count;
60067 int error;
60068 };
60069@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
60070 buf->error = -EINVAL; /* only used if we fail.. */
60071 if (reclen > buf->count)
60072 return -EINVAL;
60073+
60074+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60075+ return 0;
60076+
60077 dirent = buf->previous;
60078
60079 if (dirent) {
60080@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60081 if (!f.file)
60082 return -EBADF;
60083
60084+ buf.file = f.file;
60085 error = iterate_dir(f.file, &buf.ctx);
60086 if (error >= 0)
60087 error = buf.error;
60088diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
60089index 4d24d17..4f8c09e 100644
60090--- a/fs/compat_binfmt_elf.c
60091+++ b/fs/compat_binfmt_elf.c
60092@@ -30,11 +30,13 @@
60093 #undef elf_phdr
60094 #undef elf_shdr
60095 #undef elf_note
60096+#undef elf_dyn
60097 #undef elf_addr_t
60098 #define elfhdr elf32_hdr
60099 #define elf_phdr elf32_phdr
60100 #define elf_shdr elf32_shdr
60101 #define elf_note elf32_note
60102+#define elf_dyn Elf32_Dyn
60103 #define elf_addr_t Elf32_Addr
60104
60105 /*
60106diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
60107index afec645..9c65620 100644
60108--- a/fs/compat_ioctl.c
60109+++ b/fs/compat_ioctl.c
60110@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
60111 return -EFAULT;
60112 if (__get_user(udata, &ss32->iomem_base))
60113 return -EFAULT;
60114- ss.iomem_base = compat_ptr(udata);
60115+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
60116 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
60117 __get_user(ss.port_high, &ss32->port_high))
60118 return -EFAULT;
60119@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
60120 for (i = 0; i < nmsgs; i++) {
60121 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
60122 return -EFAULT;
60123- if (get_user(datap, &umsgs[i].buf) ||
60124- put_user(compat_ptr(datap), &tmsgs[i].buf))
60125+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
60126+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
60127 return -EFAULT;
60128 }
60129 return sys_ioctl(fd, cmd, (unsigned long)tdata);
60130@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
60131 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
60132 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
60133 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
60134- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
60135+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
60136 return -EFAULT;
60137
60138 return ioctl_preallocate(file, p);
60139@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
60140 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
60141 {
60142 unsigned int a, b;
60143- a = *(unsigned int *)p;
60144- b = *(unsigned int *)q;
60145+ a = *(const unsigned int *)p;
60146+ b = *(const unsigned int *)q;
60147 if (a > b)
60148 return 1;
60149 if (a < b)
60150diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
60151index c9c298b..544d100 100644
60152--- a/fs/configfs/dir.c
60153+++ b/fs/configfs/dir.c
60154@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60155 }
60156 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
60157 struct configfs_dirent *next;
60158- const char *name;
60159+ const unsigned char * name;
60160+ char d_name[sizeof(next->s_dentry->d_iname)];
60161 int len;
60162 struct inode *inode = NULL;
60163
60164@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60165 continue;
60166
60167 name = configfs_get_name(next);
60168- len = strlen(name);
60169+ if (next->s_dentry && name == next->s_dentry->d_iname) {
60170+ len = next->s_dentry->d_name.len;
60171+ memcpy(d_name, name, len);
60172+ name = d_name;
60173+ } else
60174+ len = strlen(name);
60175
60176 /*
60177 * We'll have a dentry and an inode for
60178diff --git a/fs/coredump.c b/fs/coredump.c
60179index b5c86ff..0dac262 100644
60180--- a/fs/coredump.c
60181+++ b/fs/coredump.c
60182@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
60183 struct pipe_inode_info *pipe = file->private_data;
60184
60185 pipe_lock(pipe);
60186- pipe->readers++;
60187- pipe->writers--;
60188+ atomic_inc(&pipe->readers);
60189+ atomic_dec(&pipe->writers);
60190 wake_up_interruptible_sync(&pipe->wait);
60191 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60192 pipe_unlock(pipe);
60193@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
60194 * We actually want wait_event_freezable() but then we need
60195 * to clear TIF_SIGPENDING and improve dump_interrupted().
60196 */
60197- wait_event_interruptible(pipe->wait, pipe->readers == 1);
60198+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
60199
60200 pipe_lock(pipe);
60201- pipe->readers--;
60202- pipe->writers++;
60203+ atomic_dec(&pipe->readers);
60204+ atomic_inc(&pipe->writers);
60205 pipe_unlock(pipe);
60206 }
60207
60208@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
60209 struct files_struct *displaced;
60210 bool need_nonrelative = false;
60211 bool core_dumped = false;
60212- static atomic_t core_dump_count = ATOMIC_INIT(0);
60213+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
60214+ long signr = siginfo->si_signo;
60215+ int dumpable;
60216 struct coredump_params cprm = {
60217 .siginfo = siginfo,
60218 .regs = signal_pt_regs(),
60219@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
60220 .mm_flags = mm->flags,
60221 };
60222
60223- audit_core_dumps(siginfo->si_signo);
60224+ audit_core_dumps(signr);
60225+
60226+ dumpable = __get_dumpable(cprm.mm_flags);
60227+
60228+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
60229+ gr_handle_brute_attach(dumpable);
60230
60231 binfmt = mm->binfmt;
60232 if (!binfmt || !binfmt->core_dump)
60233 goto fail;
60234- if (!__get_dumpable(cprm.mm_flags))
60235+ if (!dumpable)
60236 goto fail;
60237
60238 cred = prepare_creds();
60239@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
60240 need_nonrelative = true;
60241 }
60242
60243- retval = coredump_wait(siginfo->si_signo, &core_state);
60244+ retval = coredump_wait(signr, &core_state);
60245 if (retval < 0)
60246 goto fail_creds;
60247
60248@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
60249 }
60250 cprm.limit = RLIM_INFINITY;
60251
60252- dump_count = atomic_inc_return(&core_dump_count);
60253+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
60254 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
60255 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
60256 task_tgid_vnr(current), current->comm);
60257@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
60258 } else {
60259 struct inode *inode;
60260
60261+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
60262+
60263 if (cprm.limit < binfmt->min_coredump)
60264 goto fail_unlock;
60265
60266@@ -681,7 +690,7 @@ close_fail:
60267 filp_close(cprm.file, NULL);
60268 fail_dropcount:
60269 if (ispipe)
60270- atomic_dec(&core_dump_count);
60271+ atomic_dec_unchecked(&core_dump_count);
60272 fail_unlock:
60273 kfree(cn.corename);
60274 coredump_finish(mm, core_dumped);
60275@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
60276 struct file *file = cprm->file;
60277 loff_t pos = file->f_pos;
60278 ssize_t n;
60279+
60280+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
60281 if (cprm->written + nr > cprm->limit)
60282 return 0;
60283 while (nr) {
60284diff --git a/fs/dcache.c b/fs/dcache.c
60285index e368d4f..b40ba59 100644
60286--- a/fs/dcache.c
60287+++ b/fs/dcache.c
60288@@ -508,7 +508,7 @@ static void __dentry_kill(struct dentry *dentry)
60289 * dentry_iput drops the locks, at which point nobody (except
60290 * transient RCU lookups) can reach this dentry.
60291 */
60292- BUG_ON((int)dentry->d_lockref.count > 0);
60293+ BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0);
60294 this_cpu_dec(nr_dentry);
60295 if (dentry->d_op && dentry->d_op->d_release)
60296 dentry->d_op->d_release(dentry);
60297@@ -561,7 +561,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
60298 struct dentry *parent = dentry->d_parent;
60299 if (IS_ROOT(dentry))
60300 return NULL;
60301- if (unlikely((int)dentry->d_lockref.count < 0))
60302+ if (unlikely((int)__lockref_read(&dentry->d_lockref) < 0))
60303 return NULL;
60304 if (likely(spin_trylock(&parent->d_lock)))
60305 return parent;
60306@@ -638,7 +638,7 @@ repeat:
60307 dentry->d_flags |= DCACHE_REFERENCED;
60308 dentry_lru_add(dentry);
60309
60310- dentry->d_lockref.count--;
60311+ __lockref_dec(&dentry->d_lockref);
60312 spin_unlock(&dentry->d_lock);
60313 return;
60314
60315@@ -653,7 +653,7 @@ EXPORT_SYMBOL(dput);
60316 /* This must be called with d_lock held */
60317 static inline void __dget_dlock(struct dentry *dentry)
60318 {
60319- dentry->d_lockref.count++;
60320+ __lockref_inc(&dentry->d_lockref);
60321 }
60322
60323 static inline void __dget(struct dentry *dentry)
60324@@ -694,8 +694,8 @@ repeat:
60325 goto repeat;
60326 }
60327 rcu_read_unlock();
60328- BUG_ON(!ret->d_lockref.count);
60329- ret->d_lockref.count++;
60330+ BUG_ON(!__lockref_read(&ret->d_lockref));
60331+ __lockref_inc(&ret->d_lockref);
60332 spin_unlock(&ret->d_lock);
60333 return ret;
60334 }
60335@@ -773,9 +773,9 @@ restart:
60336 spin_lock(&inode->i_lock);
60337 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
60338 spin_lock(&dentry->d_lock);
60339- if (!dentry->d_lockref.count) {
60340+ if (!__lockref_read(&dentry->d_lockref)) {
60341 struct dentry *parent = lock_parent(dentry);
60342- if (likely(!dentry->d_lockref.count)) {
60343+ if (likely(!__lockref_read(&dentry->d_lockref))) {
60344 __dentry_kill(dentry);
60345 dput(parent);
60346 goto restart;
60347@@ -810,7 +810,7 @@ static void shrink_dentry_list(struct list_head *list)
60348 * We found an inuse dentry which was not removed from
60349 * the LRU because of laziness during lookup. Do not free it.
60350 */
60351- if ((int)dentry->d_lockref.count > 0) {
60352+ if ((int)__lockref_read(&dentry->d_lockref) > 0) {
60353 spin_unlock(&dentry->d_lock);
60354 if (parent)
60355 spin_unlock(&parent->d_lock);
60356@@ -848,8 +848,8 @@ static void shrink_dentry_list(struct list_head *list)
60357 dentry = parent;
60358 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
60359 parent = lock_parent(dentry);
60360- if (dentry->d_lockref.count != 1) {
60361- dentry->d_lockref.count--;
60362+ if (__lockref_read(&dentry->d_lockref) != 1) {
60363+ __lockref_inc(&dentry->d_lockref);
60364 spin_unlock(&dentry->d_lock);
60365 if (parent)
60366 spin_unlock(&parent->d_lock);
60367@@ -889,7 +889,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
60368 * counts, just remove them from the LRU. Otherwise give them
60369 * another pass through the LRU.
60370 */
60371- if (dentry->d_lockref.count) {
60372+ if (__lockref_read(&dentry->d_lockref) > 0) {
60373 d_lru_isolate(dentry);
60374 spin_unlock(&dentry->d_lock);
60375 return LRU_REMOVED;
60376@@ -1225,7 +1225,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
60377 } else {
60378 if (dentry->d_flags & DCACHE_LRU_LIST)
60379 d_lru_del(dentry);
60380- if (!dentry->d_lockref.count) {
60381+ if (!__lockref_read(&dentry->d_lockref)) {
60382 d_shrink_add(dentry, &data->dispose);
60383 data->found++;
60384 }
60385@@ -1273,7 +1273,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60386 return D_WALK_CONTINUE;
60387
60388 /* root with refcount 1 is fine */
60389- if (dentry == _data && dentry->d_lockref.count == 1)
60390+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
60391 return D_WALK_CONTINUE;
60392
60393 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
60394@@ -1282,7 +1282,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60395 dentry->d_inode ?
60396 dentry->d_inode->i_ino : 0UL,
60397 dentry,
60398- dentry->d_lockref.count,
60399+ __lockref_read(&dentry->d_lockref),
60400 dentry->d_sb->s_type->name,
60401 dentry->d_sb->s_id);
60402 WARN_ON(1);
60403@@ -1423,7 +1423,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60404 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
60405 if (name->len > DNAME_INLINE_LEN-1) {
60406 size_t size = offsetof(struct external_name, name[1]);
60407- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
60408+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
60409 if (!p) {
60410 kmem_cache_free(dentry_cache, dentry);
60411 return NULL;
60412@@ -1443,7 +1443,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60413 smp_wmb();
60414 dentry->d_name.name = dname;
60415
60416- dentry->d_lockref.count = 1;
60417+ __lockref_set(&dentry->d_lockref, 1);
60418 dentry->d_flags = 0;
60419 spin_lock_init(&dentry->d_lock);
60420 seqcount_init(&dentry->d_seq);
60421@@ -1452,6 +1452,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60422 dentry->d_sb = sb;
60423 dentry->d_op = NULL;
60424 dentry->d_fsdata = NULL;
60425+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
60426+ atomic_set(&dentry->chroot_refcnt, 0);
60427+#endif
60428 INIT_HLIST_BL_NODE(&dentry->d_hash);
60429 INIT_LIST_HEAD(&dentry->d_lru);
60430 INIT_LIST_HEAD(&dentry->d_subdirs);
60431@@ -2151,7 +2154,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
60432 goto next;
60433 }
60434
60435- dentry->d_lockref.count++;
60436+ __lockref_inc(&dentry->d_lockref);
60437 found = dentry;
60438 spin_unlock(&dentry->d_lock);
60439 break;
60440@@ -2250,7 +2253,7 @@ again:
60441 spin_lock(&dentry->d_lock);
60442 inode = dentry->d_inode;
60443 isdir = S_ISDIR(inode->i_mode);
60444- if (dentry->d_lockref.count == 1) {
60445+ if (__lockref_read(&dentry->d_lockref) == 1) {
60446 if (!spin_trylock(&inode->i_lock)) {
60447 spin_unlock(&dentry->d_lock);
60448 cpu_relax();
60449@@ -3203,7 +3206,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
60450
60451 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
60452 dentry->d_flags |= DCACHE_GENOCIDE;
60453- dentry->d_lockref.count--;
60454+ __lockref_dec(&dentry->d_lockref);
60455 }
60456 }
60457 return D_WALK_CONTINUE;
60458@@ -3319,7 +3322,8 @@ void __init vfs_caches_init(unsigned long mempages)
60459 mempages -= reserve;
60460
60461 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
60462- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
60463+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
60464+ SLAB_NO_SANITIZE, NULL);
60465
60466 dcache_init();
60467 inode_init();
60468diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
60469index 6f0ce53..92bba36 100644
60470--- a/fs/debugfs/inode.c
60471+++ b/fs/debugfs/inode.c
60472@@ -423,10 +423,20 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
60473 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
60474 * returned.
60475 */
60476+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60477+extern int grsec_enable_sysfs_restrict;
60478+#endif
60479+
60480 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
60481 {
60482- return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
60483- parent, NULL, NULL);
60484+ umode_t mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
60485+
60486+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60487+ if (grsec_enable_sysfs_restrict)
60488+ mode = S_IFDIR | S_IRWXU;
60489+#endif
60490+
60491+ return __create_file(name, mode, parent, NULL, NULL);
60492 }
60493 EXPORT_SYMBOL_GPL(debugfs_create_dir);
60494
60495diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
60496index 1686dc2..9611c50 100644
60497--- a/fs/ecryptfs/inode.c
60498+++ b/fs/ecryptfs/inode.c
60499@@ -664,7 +664,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
60500 old_fs = get_fs();
60501 set_fs(get_ds());
60502 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
60503- (char __user *)lower_buf,
60504+ (char __force_user *)lower_buf,
60505 PATH_MAX);
60506 set_fs(old_fs);
60507 if (rc < 0)
60508diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
60509index e4141f2..d8263e8 100644
60510--- a/fs/ecryptfs/miscdev.c
60511+++ b/fs/ecryptfs/miscdev.c
60512@@ -304,7 +304,7 @@ check_list:
60513 goto out_unlock_msg_ctx;
60514 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
60515 if (msg_ctx->msg) {
60516- if (copy_to_user(&buf[i], packet_length, packet_length_size))
60517+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
60518 goto out_unlock_msg_ctx;
60519 i += packet_length_size;
60520 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
60521diff --git a/fs/exec.c b/fs/exec.c
60522index ad8798e..5f872c9 100644
60523--- a/fs/exec.c
60524+++ b/fs/exec.c
60525@@ -56,8 +56,20 @@
60526 #include <linux/pipe_fs_i.h>
60527 #include <linux/oom.h>
60528 #include <linux/compat.h>
60529+#include <linux/random.h>
60530+#include <linux/seq_file.h>
60531+#include <linux/coredump.h>
60532+#include <linux/mman.h>
60533+
60534+#ifdef CONFIG_PAX_REFCOUNT
60535+#include <linux/kallsyms.h>
60536+#include <linux/kdebug.h>
60537+#endif
60538+
60539+#include <trace/events/fs.h>
60540
60541 #include <asm/uaccess.h>
60542+#include <asm/sections.h>
60543 #include <asm/mmu_context.h>
60544 #include <asm/tlb.h>
60545
60546@@ -66,19 +78,34 @@
60547
60548 #include <trace/events/sched.h>
60549
60550+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60551+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
60552+{
60553+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
60554+}
60555+#endif
60556+
60557+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
60558+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60559+EXPORT_SYMBOL(pax_set_initial_flags_func);
60560+#endif
60561+
60562 int suid_dumpable = 0;
60563
60564 static LIST_HEAD(formats);
60565 static DEFINE_RWLOCK(binfmt_lock);
60566
60567+extern int gr_process_kernel_exec_ban(void);
60568+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
60569+
60570 void __register_binfmt(struct linux_binfmt * fmt, int insert)
60571 {
60572 BUG_ON(!fmt);
60573 if (WARN_ON(!fmt->load_binary))
60574 return;
60575 write_lock(&binfmt_lock);
60576- insert ? list_add(&fmt->lh, &formats) :
60577- list_add_tail(&fmt->lh, &formats);
60578+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
60579+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
60580 write_unlock(&binfmt_lock);
60581 }
60582
60583@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
60584 void unregister_binfmt(struct linux_binfmt * fmt)
60585 {
60586 write_lock(&binfmt_lock);
60587- list_del(&fmt->lh);
60588+ pax_list_del((struct list_head *)&fmt->lh);
60589 write_unlock(&binfmt_lock);
60590 }
60591
60592@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60593 int write)
60594 {
60595 struct page *page;
60596- int ret;
60597
60598-#ifdef CONFIG_STACK_GROWSUP
60599- if (write) {
60600- ret = expand_downwards(bprm->vma, pos);
60601- if (ret < 0)
60602- return NULL;
60603- }
60604-#endif
60605- ret = get_user_pages(current, bprm->mm, pos,
60606- 1, write, 1, &page, NULL);
60607- if (ret <= 0)
60608+ if (0 > expand_downwards(bprm->vma, pos))
60609+ return NULL;
60610+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
60611 return NULL;
60612
60613 if (write) {
60614@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60615 if (size <= ARG_MAX)
60616 return page;
60617
60618+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60619+ // only allow 512KB for argv+env on suid/sgid binaries
60620+ // to prevent easy ASLR exhaustion
60621+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
60622+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
60623+ (size > (512 * 1024))) {
60624+ put_page(page);
60625+ return NULL;
60626+ }
60627+#endif
60628+
60629 /*
60630 * Limit to 1/4-th the stack size for the argv+env strings.
60631 * This ensures that:
60632@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60633 vma->vm_end = STACK_TOP_MAX;
60634 vma->vm_start = vma->vm_end - PAGE_SIZE;
60635 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
60636+
60637+#ifdef CONFIG_PAX_SEGMEXEC
60638+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
60639+#endif
60640+
60641 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
60642 INIT_LIST_HEAD(&vma->anon_vma_chain);
60643
60644@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60645 arch_bprm_mm_init(mm, vma);
60646 up_write(&mm->mmap_sem);
60647 bprm->p = vma->vm_end - sizeof(void *);
60648+
60649+#ifdef CONFIG_PAX_RANDUSTACK
60650+ if (randomize_va_space)
60651+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
60652+#endif
60653+
60654 return 0;
60655 err:
60656 up_write(&mm->mmap_sem);
60657@@ -396,7 +437,7 @@ struct user_arg_ptr {
60658 } ptr;
60659 };
60660
60661-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60662+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60663 {
60664 const char __user *native;
60665
60666@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60667 compat_uptr_t compat;
60668
60669 if (get_user(compat, argv.ptr.compat + nr))
60670- return ERR_PTR(-EFAULT);
60671+ return (const char __force_user *)ERR_PTR(-EFAULT);
60672
60673 return compat_ptr(compat);
60674 }
60675 #endif
60676
60677 if (get_user(native, argv.ptr.native + nr))
60678- return ERR_PTR(-EFAULT);
60679+ return (const char __force_user *)ERR_PTR(-EFAULT);
60680
60681 return native;
60682 }
60683@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
60684 if (!p)
60685 break;
60686
60687- if (IS_ERR(p))
60688+ if (IS_ERR((const char __force_kernel *)p))
60689 return -EFAULT;
60690
60691 if (i >= max)
60692@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
60693
60694 ret = -EFAULT;
60695 str = get_user_arg_ptr(argv, argc);
60696- if (IS_ERR(str))
60697+ if (IS_ERR((const char __force_kernel *)str))
60698 goto out;
60699
60700 len = strnlen_user(str, MAX_ARG_STRLEN);
60701@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
60702 int r;
60703 mm_segment_t oldfs = get_fs();
60704 struct user_arg_ptr argv = {
60705- .ptr.native = (const char __user *const __user *)__argv,
60706+ .ptr.native = (const char __user * const __force_user *)__argv,
60707 };
60708
60709 set_fs(KERNEL_DS);
60710@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60711 unsigned long new_end = old_end - shift;
60712 struct mmu_gather tlb;
60713
60714- BUG_ON(new_start > new_end);
60715+ if (new_start >= new_end || new_start < mmap_min_addr)
60716+ return -ENOMEM;
60717
60718 /*
60719 * ensure there are no vmas between where we want to go
60720@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60721 if (vma != find_vma(mm, new_start))
60722 return -EFAULT;
60723
60724+#ifdef CONFIG_PAX_SEGMEXEC
60725+ BUG_ON(pax_find_mirror_vma(vma));
60726+#endif
60727+
60728 /*
60729 * cover the whole range: [new_start, old_end)
60730 */
60731@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60732 stack_top = arch_align_stack(stack_top);
60733 stack_top = PAGE_ALIGN(stack_top);
60734
60735- if (unlikely(stack_top < mmap_min_addr) ||
60736- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
60737- return -ENOMEM;
60738-
60739 stack_shift = vma->vm_end - stack_top;
60740
60741 bprm->p -= stack_shift;
60742@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
60743 bprm->exec -= stack_shift;
60744
60745 down_write(&mm->mmap_sem);
60746+
60747+ /* Move stack pages down in memory. */
60748+ if (stack_shift) {
60749+ ret = shift_arg_pages(vma, stack_shift);
60750+ if (ret)
60751+ goto out_unlock;
60752+ }
60753+
60754 vm_flags = VM_STACK_FLAGS;
60755
60756+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
60757+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60758+ vm_flags &= ~VM_EXEC;
60759+
60760+#ifdef CONFIG_PAX_MPROTECT
60761+ if (mm->pax_flags & MF_PAX_MPROTECT)
60762+ vm_flags &= ~VM_MAYEXEC;
60763+#endif
60764+
60765+ }
60766+#endif
60767+
60768 /*
60769 * Adjust stack execute permissions; explicitly enable for
60770 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
60771@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60772 goto out_unlock;
60773 BUG_ON(prev != vma);
60774
60775- /* Move stack pages down in memory. */
60776- if (stack_shift) {
60777- ret = shift_arg_pages(vma, stack_shift);
60778- if (ret)
60779- goto out_unlock;
60780- }
60781-
60782 /* mprotect_fixup is overkill to remove the temporary stack flags */
60783 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
60784
60785@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
60786 #endif
60787 current->mm->start_stack = bprm->p;
60788 ret = expand_stack(vma, stack_base);
60789+
60790+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
60791+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
60792+ unsigned long size;
60793+ vm_flags_t vm_flags;
60794+
60795+ size = STACK_TOP - vma->vm_end;
60796+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
60797+
60798+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
60799+
60800+#ifdef CONFIG_X86
60801+ if (!ret) {
60802+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
60803+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
60804+ }
60805+#endif
60806+
60807+ }
60808+#endif
60809+
60810 if (ret)
60811 ret = -EFAULT;
60812
60813@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
60814 if (err)
60815 goto exit;
60816
60817- if (name->name[0] != '\0')
60818+ if (name->name[0] != '\0') {
60819 fsnotify_open(file);
60820+ trace_open_exec(name->name);
60821+ }
60822
60823 out:
60824 return file;
60825@@ -809,7 +887,7 @@ int kernel_read(struct file *file, loff_t offset,
60826 old_fs = get_fs();
60827 set_fs(get_ds());
60828 /* The cast to a user pointer is valid due to the set_fs() */
60829- result = vfs_read(file, (void __user *)addr, count, &pos);
60830+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
60831 set_fs(old_fs);
60832 return result;
60833 }
60834@@ -854,6 +932,7 @@ static int exec_mmap(struct mm_struct *mm)
60835 tsk->mm = mm;
60836 tsk->active_mm = mm;
60837 activate_mm(active_mm, mm);
60838+ populate_stack();
60839 tsk->mm->vmacache_seqnum = 0;
60840 vmacache_flush(tsk);
60841 task_unlock(tsk);
60842@@ -1252,7 +1331,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
60843 }
60844 rcu_read_unlock();
60845
60846- if (p->fs->users > n_fs)
60847+ if (atomic_read(&p->fs->users) > n_fs)
60848 bprm->unsafe |= LSM_UNSAFE_SHARE;
60849 else
60850 p->fs->in_exec = 1;
60851@@ -1433,6 +1512,31 @@ static int exec_binprm(struct linux_binprm *bprm)
60852 return ret;
60853 }
60854
60855+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60856+static DEFINE_PER_CPU(u64, exec_counter);
60857+static int __init init_exec_counters(void)
60858+{
60859+ unsigned int cpu;
60860+
60861+ for_each_possible_cpu(cpu) {
60862+ per_cpu(exec_counter, cpu) = (u64)cpu;
60863+ }
60864+
60865+ return 0;
60866+}
60867+early_initcall(init_exec_counters);
60868+static inline void increment_exec_counter(void)
60869+{
60870+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
60871+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
60872+}
60873+#else
60874+static inline void increment_exec_counter(void) {}
60875+#endif
60876+
60877+extern void gr_handle_exec_args(struct linux_binprm *bprm,
60878+ struct user_arg_ptr argv);
60879+
60880 /*
60881 * sys_execve() executes a new program.
60882 */
60883@@ -1441,6 +1545,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60884 struct user_arg_ptr envp,
60885 int flags)
60886 {
60887+#ifdef CONFIG_GRKERNSEC
60888+ struct file *old_exec_file;
60889+ struct acl_subject_label *old_acl;
60890+ struct rlimit old_rlim[RLIM_NLIMITS];
60891+#endif
60892 char *pathbuf = NULL;
60893 struct linux_binprm *bprm;
60894 struct file *file;
60895@@ -1450,6 +1559,8 @@ static int do_execveat_common(int fd, struct filename *filename,
60896 if (IS_ERR(filename))
60897 return PTR_ERR(filename);
60898
60899+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
60900+
60901 /*
60902 * We move the actual failure in case of RLIMIT_NPROC excess from
60903 * set*uid() to execve() because too many poorly written programs
60904@@ -1487,6 +1598,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60905 if (IS_ERR(file))
60906 goto out_unmark;
60907
60908+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
60909+ retval = -EPERM;
60910+ goto out_unmark;
60911+ }
60912+
60913 sched_exec();
60914
60915 bprm->file = file;
60916@@ -1513,6 +1629,11 @@ static int do_execveat_common(int fd, struct filename *filename,
60917 }
60918 bprm->interp = bprm->filename;
60919
60920+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
60921+ retval = -EACCES;
60922+ goto out_unmark;
60923+ }
60924+
60925 retval = bprm_mm_init(bprm);
60926 if (retval)
60927 goto out_unmark;
60928@@ -1529,24 +1650,70 @@ static int do_execveat_common(int fd, struct filename *filename,
60929 if (retval < 0)
60930 goto out;
60931
60932+#ifdef CONFIG_GRKERNSEC
60933+ old_acl = current->acl;
60934+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
60935+ old_exec_file = current->exec_file;
60936+ get_file(file);
60937+ current->exec_file = file;
60938+#endif
60939+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60940+ /* limit suid stack to 8MB
60941+ * we saved the old limits above and will restore them if this exec fails
60942+ */
60943+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
60944+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
60945+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
60946+#endif
60947+
60948+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
60949+ retval = -EPERM;
60950+ goto out_fail;
60951+ }
60952+
60953+ if (!gr_tpe_allow(file)) {
60954+ retval = -EACCES;
60955+ goto out_fail;
60956+ }
60957+
60958+ if (gr_check_crash_exec(file)) {
60959+ retval = -EACCES;
60960+ goto out_fail;
60961+ }
60962+
60963+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
60964+ bprm->unsafe);
60965+ if (retval < 0)
60966+ goto out_fail;
60967+
60968 retval = copy_strings_kernel(1, &bprm->filename, bprm);
60969 if (retval < 0)
60970- goto out;
60971+ goto out_fail;
60972
60973 bprm->exec = bprm->p;
60974 retval = copy_strings(bprm->envc, envp, bprm);
60975 if (retval < 0)
60976- goto out;
60977+ goto out_fail;
60978
60979 retval = copy_strings(bprm->argc, argv, bprm);
60980 if (retval < 0)
60981- goto out;
60982+ goto out_fail;
60983+
60984+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
60985+
60986+ gr_handle_exec_args(bprm, argv);
60987
60988 retval = exec_binprm(bprm);
60989 if (retval < 0)
60990- goto out;
60991+ goto out_fail;
60992+#ifdef CONFIG_GRKERNSEC
60993+ if (old_exec_file)
60994+ fput(old_exec_file);
60995+#endif
60996
60997 /* execve succeeded */
60998+
60999+ increment_exec_counter();
61000 current->fs->in_exec = 0;
61001 current->in_execve = 0;
61002 acct_update_integrals(current);
61003@@ -1558,6 +1725,14 @@ static int do_execveat_common(int fd, struct filename *filename,
61004 put_files_struct(displaced);
61005 return retval;
61006
61007+out_fail:
61008+#ifdef CONFIG_GRKERNSEC
61009+ current->acl = old_acl;
61010+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
61011+ fput(current->exec_file);
61012+ current->exec_file = old_exec_file;
61013+#endif
61014+
61015 out:
61016 if (bprm->mm) {
61017 acct_arg_size(bprm, 0);
61018@@ -1704,3 +1879,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
61019 argv, envp, flags);
61020 }
61021 #endif
61022+
61023+int pax_check_flags(unsigned long *flags)
61024+{
61025+ int retval = 0;
61026+
61027+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
61028+ if (*flags & MF_PAX_SEGMEXEC)
61029+ {
61030+ *flags &= ~MF_PAX_SEGMEXEC;
61031+ retval = -EINVAL;
61032+ }
61033+#endif
61034+
61035+ if ((*flags & MF_PAX_PAGEEXEC)
61036+
61037+#ifdef CONFIG_PAX_PAGEEXEC
61038+ && (*flags & MF_PAX_SEGMEXEC)
61039+#endif
61040+
61041+ )
61042+ {
61043+ *flags &= ~MF_PAX_PAGEEXEC;
61044+ retval = -EINVAL;
61045+ }
61046+
61047+ if ((*flags & MF_PAX_MPROTECT)
61048+
61049+#ifdef CONFIG_PAX_MPROTECT
61050+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61051+#endif
61052+
61053+ )
61054+ {
61055+ *flags &= ~MF_PAX_MPROTECT;
61056+ retval = -EINVAL;
61057+ }
61058+
61059+ if ((*flags & MF_PAX_EMUTRAMP)
61060+
61061+#ifdef CONFIG_PAX_EMUTRAMP
61062+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61063+#endif
61064+
61065+ )
61066+ {
61067+ *flags &= ~MF_PAX_EMUTRAMP;
61068+ retval = -EINVAL;
61069+ }
61070+
61071+ return retval;
61072+}
61073+
61074+EXPORT_SYMBOL(pax_check_flags);
61075+
61076+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61077+char *pax_get_path(const struct path *path, char *buf, int buflen)
61078+{
61079+ char *pathname = d_path(path, buf, buflen);
61080+
61081+ if (IS_ERR(pathname))
61082+ goto toolong;
61083+
61084+ pathname = mangle_path(buf, pathname, "\t\n\\");
61085+ if (!pathname)
61086+ goto toolong;
61087+
61088+ *pathname = 0;
61089+ return buf;
61090+
61091+toolong:
61092+ return "<path too long>";
61093+}
61094+EXPORT_SYMBOL(pax_get_path);
61095+
61096+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
61097+{
61098+ struct task_struct *tsk = current;
61099+ struct mm_struct *mm = current->mm;
61100+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
61101+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
61102+ char *path_exec = NULL;
61103+ char *path_fault = NULL;
61104+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
61105+ siginfo_t info = { };
61106+
61107+ if (buffer_exec && buffer_fault) {
61108+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
61109+
61110+ down_read(&mm->mmap_sem);
61111+ vma = mm->mmap;
61112+ while (vma && (!vma_exec || !vma_fault)) {
61113+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
61114+ vma_exec = vma;
61115+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
61116+ vma_fault = vma;
61117+ vma = vma->vm_next;
61118+ }
61119+ if (vma_exec)
61120+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
61121+ if (vma_fault) {
61122+ start = vma_fault->vm_start;
61123+ end = vma_fault->vm_end;
61124+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
61125+ if (vma_fault->vm_file)
61126+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
61127+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
61128+ path_fault = "<heap>";
61129+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
61130+ path_fault = "<stack>";
61131+ else
61132+ path_fault = "<anonymous mapping>";
61133+ }
61134+ up_read(&mm->mmap_sem);
61135+ }
61136+ if (tsk->signal->curr_ip)
61137+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
61138+ else
61139+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
61140+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
61141+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
61142+ free_page((unsigned long)buffer_exec);
61143+ free_page((unsigned long)buffer_fault);
61144+ pax_report_insns(regs, pc, sp);
61145+ info.si_signo = SIGKILL;
61146+ info.si_errno = 0;
61147+ info.si_code = SI_KERNEL;
61148+ info.si_pid = 0;
61149+ info.si_uid = 0;
61150+ do_coredump(&info);
61151+}
61152+#endif
61153+
61154+#ifdef CONFIG_PAX_REFCOUNT
61155+void pax_report_refcount_overflow(struct pt_regs *regs)
61156+{
61157+ if (current->signal->curr_ip)
61158+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
61159+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
61160+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61161+ else
61162+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
61163+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61164+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
61165+ preempt_disable();
61166+ show_regs(regs);
61167+ preempt_enable();
61168+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
61169+}
61170+#endif
61171+
61172+#ifdef CONFIG_PAX_USERCOPY
61173+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
61174+static noinline int check_stack_object(const void *obj, unsigned long len)
61175+{
61176+ const void * const stack = task_stack_page(current);
61177+ const void * const stackend = stack + THREAD_SIZE;
61178+
61179+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61180+ const void *frame = NULL;
61181+ const void *oldframe;
61182+#endif
61183+
61184+ if (obj + len < obj)
61185+ return -1;
61186+
61187+ if (obj + len <= stack || stackend <= obj)
61188+ return 0;
61189+
61190+ if (obj < stack || stackend < obj + len)
61191+ return -1;
61192+
61193+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61194+ oldframe = __builtin_frame_address(1);
61195+ if (oldframe)
61196+ frame = __builtin_frame_address(2);
61197+ /*
61198+ low ----------------------------------------------> high
61199+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
61200+ ^----------------^
61201+ allow copies only within here
61202+ */
61203+ while (stack <= frame && frame < stackend) {
61204+ /* if obj + len extends past the last frame, this
61205+ check won't pass and the next frame will be 0,
61206+ causing us to bail out and correctly report
61207+ the copy as invalid
61208+ */
61209+ if (obj + len <= frame)
61210+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
61211+ oldframe = frame;
61212+ frame = *(const void * const *)frame;
61213+ }
61214+ return -1;
61215+#else
61216+ return 1;
61217+#endif
61218+}
61219+
61220+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
61221+{
61222+ if (current->signal->curr_ip)
61223+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61224+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61225+ else
61226+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61227+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61228+ dump_stack();
61229+ gr_handle_kernel_exploit();
61230+ do_group_exit(SIGKILL);
61231+}
61232+#endif
61233+
61234+#ifdef CONFIG_PAX_USERCOPY
61235+
61236+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
61237+{
61238+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61239+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
61240+#ifdef CONFIG_MODULES
61241+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
61242+#else
61243+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
61244+#endif
61245+
61246+#else
61247+ unsigned long textlow = (unsigned long)_stext;
61248+ unsigned long texthigh = (unsigned long)_etext;
61249+
61250+#ifdef CONFIG_X86_64
61251+ /* check against linear mapping as well */
61252+ if (high > (unsigned long)__va(__pa(textlow)) &&
61253+ low < (unsigned long)__va(__pa(texthigh)))
61254+ return true;
61255+#endif
61256+
61257+#endif
61258+
61259+ if (high <= textlow || low >= texthigh)
61260+ return false;
61261+ else
61262+ return true;
61263+}
61264+#endif
61265+
61266+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
61267+{
61268+#ifdef CONFIG_PAX_USERCOPY
61269+ const char *type;
61270+#endif
61271+
61272+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
61273+ unsigned long stackstart = (unsigned long)task_stack_page(current);
61274+ unsigned long currentsp = (unsigned long)&stackstart;
61275+ if (unlikely((currentsp < stackstart + 512 ||
61276+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
61277+ BUG();
61278+#endif
61279+
61280+#ifndef CONFIG_PAX_USERCOPY_DEBUG
61281+ if (const_size)
61282+ return;
61283+#endif
61284+
61285+#ifdef CONFIG_PAX_USERCOPY
61286+ if (!n)
61287+ return;
61288+
61289+ type = check_heap_object(ptr, n);
61290+ if (!type) {
61291+ int ret = check_stack_object(ptr, n);
61292+ if (ret == 1 || ret == 2)
61293+ return;
61294+ if (ret == 0) {
61295+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
61296+ type = "<kernel text>";
61297+ else
61298+ return;
61299+ } else
61300+ type = "<process stack>";
61301+ }
61302+
61303+ pax_report_usercopy(ptr, n, to_user, type);
61304+#endif
61305+
61306+}
61307+EXPORT_SYMBOL(__check_object_size);
61308+
61309+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
61310+void pax_track_stack(void)
61311+{
61312+ unsigned long sp = (unsigned long)&sp;
61313+ if (sp < current_thread_info()->lowest_stack &&
61314+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
61315+ current_thread_info()->lowest_stack = sp;
61316+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
61317+ BUG();
61318+}
61319+EXPORT_SYMBOL(pax_track_stack);
61320+#endif
61321+
61322+#ifdef CONFIG_PAX_SIZE_OVERFLOW
61323+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
61324+{
61325+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
61326+ dump_stack();
61327+ do_group_exit(SIGKILL);
61328+}
61329+EXPORT_SYMBOL(report_size_overflow);
61330+#endif
61331diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
61332index 9f9992b..8b59411 100644
61333--- a/fs/ext2/balloc.c
61334+++ b/fs/ext2/balloc.c
61335@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
61336
61337 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61338 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61339- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61340+ if (free_blocks < root_blocks + 1 &&
61341 !uid_eq(sbi->s_resuid, current_fsuid()) &&
61342 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61343- !in_group_p (sbi->s_resgid))) {
61344+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61345 return 0;
61346 }
61347 return 1;
61348diff --git a/fs/ext2/super.c b/fs/ext2/super.c
61349index ae55fdd..5e64c27 100644
61350--- a/fs/ext2/super.c
61351+++ b/fs/ext2/super.c
61352@@ -268,10 +268,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
61353 #ifdef CONFIG_EXT2_FS_XATTR
61354 if (test_opt(sb, XATTR_USER))
61355 seq_puts(seq, ",user_xattr");
61356- if (!test_opt(sb, XATTR_USER) &&
61357- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
61358+ if (!test_opt(sb, XATTR_USER))
61359 seq_puts(seq, ",nouser_xattr");
61360- }
61361 #endif
61362
61363 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61364@@ -850,8 +848,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
61365 if (def_mount_opts & EXT2_DEFM_UID16)
61366 set_opt(sbi->s_mount_opt, NO_UID32);
61367 #ifdef CONFIG_EXT2_FS_XATTR
61368- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
61369- set_opt(sbi->s_mount_opt, XATTR_USER);
61370+ /* always enable user xattrs */
61371+ set_opt(sbi->s_mount_opt, XATTR_USER);
61372 #endif
61373 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61374 if (def_mount_opts & EXT2_DEFM_ACL)
61375diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
61376index 9142614..97484fa 100644
61377--- a/fs/ext2/xattr.c
61378+++ b/fs/ext2/xattr.c
61379@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
61380 struct buffer_head *bh = NULL;
61381 struct ext2_xattr_entry *entry;
61382 char *end;
61383- size_t rest = buffer_size;
61384+ size_t rest = buffer_size, total_size = 0;
61385 int error;
61386
61387 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
61388@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
61389 buffer += size;
61390 }
61391 rest -= size;
61392+ total_size += size;
61393 }
61394 }
61395- error = buffer_size - rest; /* total size */
61396+ error = total_size;
61397
61398 cleanup:
61399 brelse(bh);
61400diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
61401index 158b5d4..2432610 100644
61402--- a/fs/ext3/balloc.c
61403+++ b/fs/ext3/balloc.c
61404@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
61405
61406 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61407 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61408- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61409+ if (free_blocks < root_blocks + 1 &&
61410 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
61411 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61412- !in_group_p (sbi->s_resgid))) {
61413+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61414 return 0;
61415 }
61416 return 1;
61417diff --git a/fs/ext3/super.c b/fs/ext3/super.c
61418index 9b4e7d7..048d025 100644
61419--- a/fs/ext3/super.c
61420+++ b/fs/ext3/super.c
61421@@ -653,10 +653,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
61422 #ifdef CONFIG_EXT3_FS_XATTR
61423 if (test_opt(sb, XATTR_USER))
61424 seq_puts(seq, ",user_xattr");
61425- if (!test_opt(sb, XATTR_USER) &&
61426- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
61427+ if (!test_opt(sb, XATTR_USER))
61428 seq_puts(seq, ",nouser_xattr");
61429- }
61430 #endif
61431 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61432 if (test_opt(sb, POSIX_ACL))
61433@@ -1758,8 +1756,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
61434 if (def_mount_opts & EXT3_DEFM_UID16)
61435 set_opt(sbi->s_mount_opt, NO_UID32);
61436 #ifdef CONFIG_EXT3_FS_XATTR
61437- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
61438- set_opt(sbi->s_mount_opt, XATTR_USER);
61439+ /* always enable user xattrs */
61440+ set_opt(sbi->s_mount_opt, XATTR_USER);
61441 #endif
61442 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61443 if (def_mount_opts & EXT3_DEFM_ACL)
61444diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
61445index c6874be..f8a6ae8 100644
61446--- a/fs/ext3/xattr.c
61447+++ b/fs/ext3/xattr.c
61448@@ -330,7 +330,7 @@ static int
61449 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61450 char *buffer, size_t buffer_size)
61451 {
61452- size_t rest = buffer_size;
61453+ size_t rest = buffer_size, total_size = 0;
61454
61455 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
61456 const struct xattr_handler *handler =
61457@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61458 buffer += size;
61459 }
61460 rest -= size;
61461+ total_size += size;
61462 }
61463 }
61464- return buffer_size - rest;
61465+ return total_size;
61466 }
61467
61468 static int
61469diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
61470index 83a6f49..d4e4d03 100644
61471--- a/fs/ext4/balloc.c
61472+++ b/fs/ext4/balloc.c
61473@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
61474 /* Hm, nope. Are (enough) root reserved clusters available? */
61475 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
61476 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
61477- capable(CAP_SYS_RESOURCE) ||
61478- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
61479+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
61480+ capable_nolog(CAP_SYS_RESOURCE)) {
61481
61482 if (free_clusters >= (nclusters + dirty_clusters +
61483 resv_clusters))
61484diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
61485index a75fba6..8235fca 100644
61486--- a/fs/ext4/ext4.h
61487+++ b/fs/ext4/ext4.h
61488@@ -1274,19 +1274,19 @@ struct ext4_sb_info {
61489 unsigned long s_mb_last_start;
61490
61491 /* stats for buddy allocator */
61492- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
61493- atomic_t s_bal_success; /* we found long enough chunks */
61494- atomic_t s_bal_allocated; /* in blocks */
61495- atomic_t s_bal_ex_scanned; /* total extents scanned */
61496- atomic_t s_bal_goals; /* goal hits */
61497- atomic_t s_bal_breaks; /* too long searches */
61498- atomic_t s_bal_2orders; /* 2^order hits */
61499+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
61500+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
61501+ atomic_unchecked_t s_bal_allocated; /* in blocks */
61502+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
61503+ atomic_unchecked_t s_bal_goals; /* goal hits */
61504+ atomic_unchecked_t s_bal_breaks; /* too long searches */
61505+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
61506 spinlock_t s_bal_lock;
61507 unsigned long s_mb_buddies_generated;
61508 unsigned long long s_mb_generation_time;
61509- atomic_t s_mb_lost_chunks;
61510- atomic_t s_mb_preallocated;
61511- atomic_t s_mb_discarded;
61512+ atomic_unchecked_t s_mb_lost_chunks;
61513+ atomic_unchecked_t s_mb_preallocated;
61514+ atomic_unchecked_t s_mb_discarded;
61515 atomic_t s_lock_busy;
61516
61517 /* locality groups */
61518diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
61519index 8d1e602..abf497b 100644
61520--- a/fs/ext4/mballoc.c
61521+++ b/fs/ext4/mballoc.c
61522@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
61523 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
61524
61525 if (EXT4_SB(sb)->s_mb_stats)
61526- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
61527+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
61528
61529 break;
61530 }
61531@@ -2211,7 +2211,7 @@ repeat:
61532 ac->ac_status = AC_STATUS_CONTINUE;
61533 ac->ac_flags |= EXT4_MB_HINT_FIRST;
61534 cr = 3;
61535- atomic_inc(&sbi->s_mb_lost_chunks);
61536+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
61537 goto repeat;
61538 }
61539 }
61540@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
61541 if (sbi->s_mb_stats) {
61542 ext4_msg(sb, KERN_INFO,
61543 "mballoc: %u blocks %u reqs (%u success)",
61544- atomic_read(&sbi->s_bal_allocated),
61545- atomic_read(&sbi->s_bal_reqs),
61546- atomic_read(&sbi->s_bal_success));
61547+ atomic_read_unchecked(&sbi->s_bal_allocated),
61548+ atomic_read_unchecked(&sbi->s_bal_reqs),
61549+ atomic_read_unchecked(&sbi->s_bal_success));
61550 ext4_msg(sb, KERN_INFO,
61551 "mballoc: %u extents scanned, %u goal hits, "
61552 "%u 2^N hits, %u breaks, %u lost",
61553- atomic_read(&sbi->s_bal_ex_scanned),
61554- atomic_read(&sbi->s_bal_goals),
61555- atomic_read(&sbi->s_bal_2orders),
61556- atomic_read(&sbi->s_bal_breaks),
61557- atomic_read(&sbi->s_mb_lost_chunks));
61558+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
61559+ atomic_read_unchecked(&sbi->s_bal_goals),
61560+ atomic_read_unchecked(&sbi->s_bal_2orders),
61561+ atomic_read_unchecked(&sbi->s_bal_breaks),
61562+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
61563 ext4_msg(sb, KERN_INFO,
61564 "mballoc: %lu generated and it took %Lu",
61565 sbi->s_mb_buddies_generated,
61566 sbi->s_mb_generation_time);
61567 ext4_msg(sb, KERN_INFO,
61568 "mballoc: %u preallocated, %u discarded",
61569- atomic_read(&sbi->s_mb_preallocated),
61570- atomic_read(&sbi->s_mb_discarded));
61571+ atomic_read_unchecked(&sbi->s_mb_preallocated),
61572+ atomic_read_unchecked(&sbi->s_mb_discarded));
61573 }
61574
61575 free_percpu(sbi->s_locality_groups);
61576@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
61577 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
61578
61579 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
61580- atomic_inc(&sbi->s_bal_reqs);
61581- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61582+ atomic_inc_unchecked(&sbi->s_bal_reqs);
61583+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61584 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
61585- atomic_inc(&sbi->s_bal_success);
61586- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
61587+ atomic_inc_unchecked(&sbi->s_bal_success);
61588+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
61589 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
61590 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
61591- atomic_inc(&sbi->s_bal_goals);
61592+ atomic_inc_unchecked(&sbi->s_bal_goals);
61593 if (ac->ac_found > sbi->s_mb_max_to_scan)
61594- atomic_inc(&sbi->s_bal_breaks);
61595+ atomic_inc_unchecked(&sbi->s_bal_breaks);
61596 }
61597
61598 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
61599@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
61600 trace_ext4_mb_new_inode_pa(ac, pa);
61601
61602 ext4_mb_use_inode_pa(ac, pa);
61603- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
61604+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
61605
61606 ei = EXT4_I(ac->ac_inode);
61607 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61608@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
61609 trace_ext4_mb_new_group_pa(ac, pa);
61610
61611 ext4_mb_use_group_pa(ac, pa);
61612- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61613+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61614
61615 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61616 lg = ac->ac_lg;
61617@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
61618 * from the bitmap and continue.
61619 */
61620 }
61621- atomic_add(free, &sbi->s_mb_discarded);
61622+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
61623
61624 return err;
61625 }
61626@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
61627 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
61628 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
61629 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
61630- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61631+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61632 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
61633
61634 return 0;
61635diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
61636index 8313ca3..8a37d08 100644
61637--- a/fs/ext4/mmp.c
61638+++ b/fs/ext4/mmp.c
61639@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
61640 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
61641 const char *function, unsigned int line, const char *msg)
61642 {
61643- __ext4_warning(sb, function, line, msg);
61644+ __ext4_warning(sb, function, line, "%s", msg);
61645 __ext4_warning(sb, function, line,
61646 "MMP failure info: last update time: %llu, last update "
61647 "node: %s, last update device: %s\n",
61648diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
61649index 8a8ec62..1b02de5 100644
61650--- a/fs/ext4/resize.c
61651+++ b/fs/ext4/resize.c
61652@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61653
61654 ext4_debug("mark blocks [%llu/%u] used\n", block, count);
61655 for (count2 = count; count > 0; count -= count2, block += count2) {
61656- ext4_fsblk_t start;
61657+ ext4_fsblk_t start, diff;
61658 struct buffer_head *bh;
61659 ext4_group_t group;
61660 int err;
61661@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61662 start = ext4_group_first_block_no(sb, group);
61663 group -= flex_gd->groups[0].group;
61664
61665- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
61666- if (count2 > count)
61667- count2 = count;
61668-
61669 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
61670 BUG_ON(flex_gd->count > 1);
61671 continue;
61672@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61673 err = ext4_journal_get_write_access(handle, bh);
61674 if (err)
61675 return err;
61676+
61677+ diff = block - start;
61678+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
61679+ if (count2 > count)
61680+ count2 = count;
61681+
61682 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
61683- block - start, count2);
61684- ext4_set_bits(bh->b_data, block - start, count2);
61685+ diff, count2);
61686+ ext4_set_bits(bh->b_data, diff, count2);
61687
61688 err = ext4_handle_dirty_metadata(handle, NULL, bh);
61689 if (unlikely(err))
61690diff --git a/fs/ext4/super.c b/fs/ext4/super.c
61691index fc29b2c..6c8b255 100644
61692--- a/fs/ext4/super.c
61693+++ b/fs/ext4/super.c
61694@@ -1252,7 +1252,7 @@ static ext4_fsblk_t get_sb_block(void **data)
61695 }
61696
61697 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
61698-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61699+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61700 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
61701
61702 #ifdef CONFIG_QUOTA
61703@@ -2440,7 +2440,7 @@ struct ext4_attr {
61704 int offset;
61705 int deprecated_val;
61706 } u;
61707-};
61708+} __do_const;
61709
61710 static int parse_strtoull(const char *buf,
61711 unsigned long long max, unsigned long long *value)
61712diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
61713index 1e09fc7..0400dd4 100644
61714--- a/fs/ext4/xattr.c
61715+++ b/fs/ext4/xattr.c
61716@@ -399,7 +399,7 @@ static int
61717 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61718 char *buffer, size_t buffer_size)
61719 {
61720- size_t rest = buffer_size;
61721+ size_t rest = buffer_size, total_size = 0;
61722
61723 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
61724 const struct xattr_handler *handler =
61725@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61726 buffer += size;
61727 }
61728 rest -= size;
61729+ total_size += size;
61730 }
61731 }
61732- return buffer_size - rest;
61733+ return total_size;
61734 }
61735
61736 static int
61737diff --git a/fs/fcntl.c b/fs/fcntl.c
61738index ee85cd4..9dd0d20 100644
61739--- a/fs/fcntl.c
61740+++ b/fs/fcntl.c
61741@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
61742 int force)
61743 {
61744 security_file_set_fowner(filp);
61745+ if (gr_handle_chroot_fowner(pid, type))
61746+ return;
61747+ if (gr_check_protected_task_fowner(pid, type))
61748+ return;
61749 f_modown(filp, pid, type, force);
61750 }
61751 EXPORT_SYMBOL(__f_setown);
61752diff --git a/fs/fhandle.c b/fs/fhandle.c
61753index 999ff5c..2281df9 100644
61754--- a/fs/fhandle.c
61755+++ b/fs/fhandle.c
61756@@ -8,6 +8,7 @@
61757 #include <linux/fs_struct.h>
61758 #include <linux/fsnotify.h>
61759 #include <linux/personality.h>
61760+#include <linux/grsecurity.h>
61761 #include <asm/uaccess.h>
61762 #include "internal.h"
61763 #include "mount.h"
61764@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
61765 } else
61766 retval = 0;
61767 /* copy the mount id */
61768- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
61769- sizeof(*mnt_id)) ||
61770+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
61771 copy_to_user(ufh, handle,
61772 sizeof(struct file_handle) + handle_bytes))
61773 retval = -EFAULT;
61774@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
61775 * the directory. Ideally we would like CAP_DAC_SEARCH.
61776 * But we don't have that
61777 */
61778- if (!capable(CAP_DAC_READ_SEARCH)) {
61779+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
61780 retval = -EPERM;
61781 goto out_err;
61782 }
61783@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
61784 goto out_err;
61785 }
61786 /* copy the full handle */
61787- if (copy_from_user(handle, ufh,
61788- sizeof(struct file_handle) +
61789+ *handle = f_handle;
61790+ if (copy_from_user(&handle->f_handle,
61791+ &ufh->f_handle,
61792 f_handle.handle_bytes)) {
61793 retval = -EFAULT;
61794 goto out_handle;
61795diff --git a/fs/file.c b/fs/file.c
61796index ee738ea..f6c1562 100644
61797--- a/fs/file.c
61798+++ b/fs/file.c
61799@@ -16,6 +16,7 @@
61800 #include <linux/slab.h>
61801 #include <linux/vmalloc.h>
61802 #include <linux/file.h>
61803+#include <linux/security.h>
61804 #include <linux/fdtable.h>
61805 #include <linux/bitops.h>
61806 #include <linux/interrupt.h>
61807@@ -139,7 +140,7 @@ out:
61808 * Return <0 error code on error; 1 on successful completion.
61809 * The files->file_lock should be held on entry, and will be held on exit.
61810 */
61811-static int expand_fdtable(struct files_struct *files, int nr)
61812+static int expand_fdtable(struct files_struct *files, unsigned int nr)
61813 __releases(files->file_lock)
61814 __acquires(files->file_lock)
61815 {
61816@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
61817 * expanded and execution may have blocked.
61818 * The files->file_lock should be held on entry, and will be held on exit.
61819 */
61820-static int expand_files(struct files_struct *files, int nr)
61821+static int expand_files(struct files_struct *files, unsigned int nr)
61822 {
61823 struct fdtable *fdt;
61824
61825@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
61826 if (!file)
61827 return __close_fd(files, fd);
61828
61829+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
61830 if (fd >= rlimit(RLIMIT_NOFILE))
61831 return -EBADF;
61832
61833@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
61834 if (unlikely(oldfd == newfd))
61835 return -EINVAL;
61836
61837+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
61838 if (newfd >= rlimit(RLIMIT_NOFILE))
61839 return -EBADF;
61840
61841@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
61842 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
61843 {
61844 int err;
61845+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
61846 if (from >= rlimit(RLIMIT_NOFILE))
61847 return -EINVAL;
61848 err = alloc_fd(from, flags);
61849diff --git a/fs/filesystems.c b/fs/filesystems.c
61850index 5797d45..7d7d79a 100644
61851--- a/fs/filesystems.c
61852+++ b/fs/filesystems.c
61853@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
61854 int len = dot ? dot - name : strlen(name);
61855
61856 fs = __get_fs_type(name, len);
61857+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61858+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
61859+#else
61860 if (!fs && (request_module("fs-%.*s", len, name) == 0))
61861+#endif
61862 fs = __get_fs_type(name, len);
61863
61864 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
61865diff --git a/fs/fs_struct.c b/fs/fs_struct.c
61866index 7dca743..2f2786d 100644
61867--- a/fs/fs_struct.c
61868+++ b/fs/fs_struct.c
61869@@ -4,6 +4,7 @@
61870 #include <linux/path.h>
61871 #include <linux/slab.h>
61872 #include <linux/fs_struct.h>
61873+#include <linux/grsecurity.h>
61874 #include "internal.h"
61875
61876 /*
61877@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
61878 struct path old_root;
61879
61880 path_get(path);
61881+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
61882 spin_lock(&fs->lock);
61883 write_seqcount_begin(&fs->seq);
61884 old_root = fs->root;
61885 fs->root = *path;
61886+ gr_set_chroot_entries(current, path);
61887 write_seqcount_end(&fs->seq);
61888 spin_unlock(&fs->lock);
61889- if (old_root.dentry)
61890+ if (old_root.dentry) {
61891+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
61892 path_put(&old_root);
61893+ }
61894 }
61895
61896 /*
61897@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
61898 int hits = 0;
61899 spin_lock(&fs->lock);
61900 write_seqcount_begin(&fs->seq);
61901+ /* this root replacement is only done by pivot_root,
61902+ leave grsec's chroot tagging alone for this task
61903+ so that a pivoted root isn't treated as a chroot
61904+ */
61905 hits += replace_path(&fs->root, old_root, new_root);
61906 hits += replace_path(&fs->pwd, old_root, new_root);
61907 write_seqcount_end(&fs->seq);
61908@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
61909
61910 void free_fs_struct(struct fs_struct *fs)
61911 {
61912+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
61913 path_put(&fs->root);
61914 path_put(&fs->pwd);
61915 kmem_cache_free(fs_cachep, fs);
61916@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
61917 task_lock(tsk);
61918 spin_lock(&fs->lock);
61919 tsk->fs = NULL;
61920- kill = !--fs->users;
61921+ gr_clear_chroot_entries(tsk);
61922+ kill = !atomic_dec_return(&fs->users);
61923 spin_unlock(&fs->lock);
61924 task_unlock(tsk);
61925 if (kill)
61926@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
61927 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
61928 /* We don't need to lock fs - think why ;-) */
61929 if (fs) {
61930- fs->users = 1;
61931+ atomic_set(&fs->users, 1);
61932 fs->in_exec = 0;
61933 spin_lock_init(&fs->lock);
61934 seqcount_init(&fs->seq);
61935@@ -121,6 +132,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
61936 spin_lock(&old->lock);
61937 fs->root = old->root;
61938 path_get(&fs->root);
61939+ /* instead of calling gr_set_chroot_entries here,
61940+ we call it from every caller of this function
61941+ */
61942 fs->pwd = old->pwd;
61943 path_get(&fs->pwd);
61944 spin_unlock(&old->lock);
61945@@ -139,8 +153,9 @@ int unshare_fs_struct(void)
61946
61947 task_lock(current);
61948 spin_lock(&fs->lock);
61949- kill = !--fs->users;
61950+ kill = !atomic_dec_return(&fs->users);
61951 current->fs = new_fs;
61952+ gr_set_chroot_entries(current, &new_fs->root);
61953 spin_unlock(&fs->lock);
61954 task_unlock(current);
61955
61956@@ -153,13 +168,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
61957
61958 int current_umask(void)
61959 {
61960- return current->fs->umask;
61961+ return current->fs->umask | gr_acl_umask();
61962 }
61963 EXPORT_SYMBOL(current_umask);
61964
61965 /* to be mentioned only in INIT_TASK */
61966 struct fs_struct init_fs = {
61967- .users = 1,
61968+ .users = ATOMIC_INIT(1),
61969 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
61970 .seq = SEQCNT_ZERO(init_fs.seq),
61971 .umask = 0022,
61972diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
61973index 89acec7..a575262 100644
61974--- a/fs/fscache/cookie.c
61975+++ b/fs/fscache/cookie.c
61976@@ -19,7 +19,7 @@
61977
61978 struct kmem_cache *fscache_cookie_jar;
61979
61980-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
61981+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
61982
61983 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
61984 static int fscache_alloc_object(struct fscache_cache *cache,
61985@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
61986 parent ? (char *) parent->def->name : "<no-parent>",
61987 def->name, netfs_data, enable);
61988
61989- fscache_stat(&fscache_n_acquires);
61990+ fscache_stat_unchecked(&fscache_n_acquires);
61991
61992 /* if there's no parent cookie, then we don't create one here either */
61993 if (!parent) {
61994- fscache_stat(&fscache_n_acquires_null);
61995+ fscache_stat_unchecked(&fscache_n_acquires_null);
61996 _leave(" [no parent]");
61997 return NULL;
61998 }
61999@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62000 /* allocate and initialise a cookie */
62001 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
62002 if (!cookie) {
62003- fscache_stat(&fscache_n_acquires_oom);
62004+ fscache_stat_unchecked(&fscache_n_acquires_oom);
62005 _leave(" [ENOMEM]");
62006 return NULL;
62007 }
62008@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
62009
62010 switch (cookie->def->type) {
62011 case FSCACHE_COOKIE_TYPE_INDEX:
62012- fscache_stat(&fscache_n_cookie_index);
62013+ fscache_stat_unchecked(&fscache_n_cookie_index);
62014 break;
62015 case FSCACHE_COOKIE_TYPE_DATAFILE:
62016- fscache_stat(&fscache_n_cookie_data);
62017+ fscache_stat_unchecked(&fscache_n_cookie_data);
62018 break;
62019 default:
62020- fscache_stat(&fscache_n_cookie_special);
62021+ fscache_stat_unchecked(&fscache_n_cookie_special);
62022 break;
62023 }
62024
62025@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62026 } else {
62027 atomic_dec(&parent->n_children);
62028 __fscache_cookie_put(cookie);
62029- fscache_stat(&fscache_n_acquires_nobufs);
62030+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
62031 _leave(" = NULL");
62032 return NULL;
62033 }
62034@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62035 }
62036 }
62037
62038- fscache_stat(&fscache_n_acquires_ok);
62039+ fscache_stat_unchecked(&fscache_n_acquires_ok);
62040 _leave(" = %p", cookie);
62041 return cookie;
62042 }
62043@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
62044 cache = fscache_select_cache_for_object(cookie->parent);
62045 if (!cache) {
62046 up_read(&fscache_addremove_sem);
62047- fscache_stat(&fscache_n_acquires_no_cache);
62048+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
62049 _leave(" = -ENOMEDIUM [no cache]");
62050 return -ENOMEDIUM;
62051 }
62052@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
62053 object = cache->ops->alloc_object(cache, cookie);
62054 fscache_stat_d(&fscache_n_cop_alloc_object);
62055 if (IS_ERR(object)) {
62056- fscache_stat(&fscache_n_object_no_alloc);
62057+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
62058 ret = PTR_ERR(object);
62059 goto error;
62060 }
62061
62062- fscache_stat(&fscache_n_object_alloc);
62063+ fscache_stat_unchecked(&fscache_n_object_alloc);
62064
62065- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
62066+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
62067
62068 _debug("ALLOC OBJ%x: %s {%lx}",
62069 object->debug_id, cookie->def->name, object->events);
62070@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
62071
62072 _enter("{%s}", cookie->def->name);
62073
62074- fscache_stat(&fscache_n_invalidates);
62075+ fscache_stat_unchecked(&fscache_n_invalidates);
62076
62077 /* Only permit invalidation of data files. Invalidating an index will
62078 * require the caller to release all its attachments to the tree rooted
62079@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
62080 {
62081 struct fscache_object *object;
62082
62083- fscache_stat(&fscache_n_updates);
62084+ fscache_stat_unchecked(&fscache_n_updates);
62085
62086 if (!cookie) {
62087- fscache_stat(&fscache_n_updates_null);
62088+ fscache_stat_unchecked(&fscache_n_updates_null);
62089 _leave(" [no cookie]");
62090 return;
62091 }
62092@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
62093 */
62094 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
62095 {
62096- fscache_stat(&fscache_n_relinquishes);
62097+ fscache_stat_unchecked(&fscache_n_relinquishes);
62098 if (retire)
62099- fscache_stat(&fscache_n_relinquishes_retire);
62100+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
62101
62102 if (!cookie) {
62103- fscache_stat(&fscache_n_relinquishes_null);
62104+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
62105 _leave(" [no cookie]");
62106 return;
62107 }
62108@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
62109 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
62110 goto inconsistent;
62111
62112- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
62113+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62114
62115 __fscache_use_cookie(cookie);
62116 if (fscache_submit_op(object, op) < 0)
62117diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
62118index 7872a62..d91b19f 100644
62119--- a/fs/fscache/internal.h
62120+++ b/fs/fscache/internal.h
62121@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
62122 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
62123 extern int fscache_wait_for_operation_activation(struct fscache_object *,
62124 struct fscache_operation *,
62125- atomic_t *,
62126- atomic_t *,
62127+ atomic_unchecked_t *,
62128+ atomic_unchecked_t *,
62129 void (*)(struct fscache_operation *));
62130 extern void fscache_invalidate_writes(struct fscache_cookie *);
62131
62132@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
62133 * stats.c
62134 */
62135 #ifdef CONFIG_FSCACHE_STATS
62136-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62137-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62138+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62139+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62140
62141-extern atomic_t fscache_n_op_pend;
62142-extern atomic_t fscache_n_op_run;
62143-extern atomic_t fscache_n_op_enqueue;
62144-extern atomic_t fscache_n_op_deferred_release;
62145-extern atomic_t fscache_n_op_release;
62146-extern atomic_t fscache_n_op_gc;
62147-extern atomic_t fscache_n_op_cancelled;
62148-extern atomic_t fscache_n_op_rejected;
62149+extern atomic_unchecked_t fscache_n_op_pend;
62150+extern atomic_unchecked_t fscache_n_op_run;
62151+extern atomic_unchecked_t fscache_n_op_enqueue;
62152+extern atomic_unchecked_t fscache_n_op_deferred_release;
62153+extern atomic_unchecked_t fscache_n_op_release;
62154+extern atomic_unchecked_t fscache_n_op_gc;
62155+extern atomic_unchecked_t fscache_n_op_cancelled;
62156+extern atomic_unchecked_t fscache_n_op_rejected;
62157
62158-extern atomic_t fscache_n_attr_changed;
62159-extern atomic_t fscache_n_attr_changed_ok;
62160-extern atomic_t fscache_n_attr_changed_nobufs;
62161-extern atomic_t fscache_n_attr_changed_nomem;
62162-extern atomic_t fscache_n_attr_changed_calls;
62163+extern atomic_unchecked_t fscache_n_attr_changed;
62164+extern atomic_unchecked_t fscache_n_attr_changed_ok;
62165+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
62166+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
62167+extern atomic_unchecked_t fscache_n_attr_changed_calls;
62168
62169-extern atomic_t fscache_n_allocs;
62170-extern atomic_t fscache_n_allocs_ok;
62171-extern atomic_t fscache_n_allocs_wait;
62172-extern atomic_t fscache_n_allocs_nobufs;
62173-extern atomic_t fscache_n_allocs_intr;
62174-extern atomic_t fscache_n_allocs_object_dead;
62175-extern atomic_t fscache_n_alloc_ops;
62176-extern atomic_t fscache_n_alloc_op_waits;
62177+extern atomic_unchecked_t fscache_n_allocs;
62178+extern atomic_unchecked_t fscache_n_allocs_ok;
62179+extern atomic_unchecked_t fscache_n_allocs_wait;
62180+extern atomic_unchecked_t fscache_n_allocs_nobufs;
62181+extern atomic_unchecked_t fscache_n_allocs_intr;
62182+extern atomic_unchecked_t fscache_n_allocs_object_dead;
62183+extern atomic_unchecked_t fscache_n_alloc_ops;
62184+extern atomic_unchecked_t fscache_n_alloc_op_waits;
62185
62186-extern atomic_t fscache_n_retrievals;
62187-extern atomic_t fscache_n_retrievals_ok;
62188-extern atomic_t fscache_n_retrievals_wait;
62189-extern atomic_t fscache_n_retrievals_nodata;
62190-extern atomic_t fscache_n_retrievals_nobufs;
62191-extern atomic_t fscache_n_retrievals_intr;
62192-extern atomic_t fscache_n_retrievals_nomem;
62193-extern atomic_t fscache_n_retrievals_object_dead;
62194-extern atomic_t fscache_n_retrieval_ops;
62195-extern atomic_t fscache_n_retrieval_op_waits;
62196+extern atomic_unchecked_t fscache_n_retrievals;
62197+extern atomic_unchecked_t fscache_n_retrievals_ok;
62198+extern atomic_unchecked_t fscache_n_retrievals_wait;
62199+extern atomic_unchecked_t fscache_n_retrievals_nodata;
62200+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
62201+extern atomic_unchecked_t fscache_n_retrievals_intr;
62202+extern atomic_unchecked_t fscache_n_retrievals_nomem;
62203+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
62204+extern atomic_unchecked_t fscache_n_retrieval_ops;
62205+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
62206
62207-extern atomic_t fscache_n_stores;
62208-extern atomic_t fscache_n_stores_ok;
62209-extern atomic_t fscache_n_stores_again;
62210-extern atomic_t fscache_n_stores_nobufs;
62211-extern atomic_t fscache_n_stores_oom;
62212-extern atomic_t fscache_n_store_ops;
62213-extern atomic_t fscache_n_store_calls;
62214-extern atomic_t fscache_n_store_pages;
62215-extern atomic_t fscache_n_store_radix_deletes;
62216-extern atomic_t fscache_n_store_pages_over_limit;
62217+extern atomic_unchecked_t fscache_n_stores;
62218+extern atomic_unchecked_t fscache_n_stores_ok;
62219+extern atomic_unchecked_t fscache_n_stores_again;
62220+extern atomic_unchecked_t fscache_n_stores_nobufs;
62221+extern atomic_unchecked_t fscache_n_stores_oom;
62222+extern atomic_unchecked_t fscache_n_store_ops;
62223+extern atomic_unchecked_t fscache_n_store_calls;
62224+extern atomic_unchecked_t fscache_n_store_pages;
62225+extern atomic_unchecked_t fscache_n_store_radix_deletes;
62226+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
62227
62228-extern atomic_t fscache_n_store_vmscan_not_storing;
62229-extern atomic_t fscache_n_store_vmscan_gone;
62230-extern atomic_t fscache_n_store_vmscan_busy;
62231-extern atomic_t fscache_n_store_vmscan_cancelled;
62232-extern atomic_t fscache_n_store_vmscan_wait;
62233+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
62234+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
62235+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
62236+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
62237+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
62238
62239-extern atomic_t fscache_n_marks;
62240-extern atomic_t fscache_n_uncaches;
62241+extern atomic_unchecked_t fscache_n_marks;
62242+extern atomic_unchecked_t fscache_n_uncaches;
62243
62244-extern atomic_t fscache_n_acquires;
62245-extern atomic_t fscache_n_acquires_null;
62246-extern atomic_t fscache_n_acquires_no_cache;
62247-extern atomic_t fscache_n_acquires_ok;
62248-extern atomic_t fscache_n_acquires_nobufs;
62249-extern atomic_t fscache_n_acquires_oom;
62250+extern atomic_unchecked_t fscache_n_acquires;
62251+extern atomic_unchecked_t fscache_n_acquires_null;
62252+extern atomic_unchecked_t fscache_n_acquires_no_cache;
62253+extern atomic_unchecked_t fscache_n_acquires_ok;
62254+extern atomic_unchecked_t fscache_n_acquires_nobufs;
62255+extern atomic_unchecked_t fscache_n_acquires_oom;
62256
62257-extern atomic_t fscache_n_invalidates;
62258-extern atomic_t fscache_n_invalidates_run;
62259+extern atomic_unchecked_t fscache_n_invalidates;
62260+extern atomic_unchecked_t fscache_n_invalidates_run;
62261
62262-extern atomic_t fscache_n_updates;
62263-extern atomic_t fscache_n_updates_null;
62264-extern atomic_t fscache_n_updates_run;
62265+extern atomic_unchecked_t fscache_n_updates;
62266+extern atomic_unchecked_t fscache_n_updates_null;
62267+extern atomic_unchecked_t fscache_n_updates_run;
62268
62269-extern atomic_t fscache_n_relinquishes;
62270-extern atomic_t fscache_n_relinquishes_null;
62271-extern atomic_t fscache_n_relinquishes_waitcrt;
62272-extern atomic_t fscache_n_relinquishes_retire;
62273+extern atomic_unchecked_t fscache_n_relinquishes;
62274+extern atomic_unchecked_t fscache_n_relinquishes_null;
62275+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
62276+extern atomic_unchecked_t fscache_n_relinquishes_retire;
62277
62278-extern atomic_t fscache_n_cookie_index;
62279-extern atomic_t fscache_n_cookie_data;
62280-extern atomic_t fscache_n_cookie_special;
62281+extern atomic_unchecked_t fscache_n_cookie_index;
62282+extern atomic_unchecked_t fscache_n_cookie_data;
62283+extern atomic_unchecked_t fscache_n_cookie_special;
62284
62285-extern atomic_t fscache_n_object_alloc;
62286-extern atomic_t fscache_n_object_no_alloc;
62287-extern atomic_t fscache_n_object_lookups;
62288-extern atomic_t fscache_n_object_lookups_negative;
62289-extern atomic_t fscache_n_object_lookups_positive;
62290-extern atomic_t fscache_n_object_lookups_timed_out;
62291-extern atomic_t fscache_n_object_created;
62292-extern atomic_t fscache_n_object_avail;
62293-extern atomic_t fscache_n_object_dead;
62294+extern atomic_unchecked_t fscache_n_object_alloc;
62295+extern atomic_unchecked_t fscache_n_object_no_alloc;
62296+extern atomic_unchecked_t fscache_n_object_lookups;
62297+extern atomic_unchecked_t fscache_n_object_lookups_negative;
62298+extern atomic_unchecked_t fscache_n_object_lookups_positive;
62299+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
62300+extern atomic_unchecked_t fscache_n_object_created;
62301+extern atomic_unchecked_t fscache_n_object_avail;
62302+extern atomic_unchecked_t fscache_n_object_dead;
62303
62304-extern atomic_t fscache_n_checkaux_none;
62305-extern atomic_t fscache_n_checkaux_okay;
62306-extern atomic_t fscache_n_checkaux_update;
62307-extern atomic_t fscache_n_checkaux_obsolete;
62308+extern atomic_unchecked_t fscache_n_checkaux_none;
62309+extern atomic_unchecked_t fscache_n_checkaux_okay;
62310+extern atomic_unchecked_t fscache_n_checkaux_update;
62311+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
62312
62313 extern atomic_t fscache_n_cop_alloc_object;
62314 extern atomic_t fscache_n_cop_lookup_object;
62315@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
62316 atomic_inc(stat);
62317 }
62318
62319+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
62320+{
62321+ atomic_inc_unchecked(stat);
62322+}
62323+
62324 static inline void fscache_stat_d(atomic_t *stat)
62325 {
62326 atomic_dec(stat);
62327@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
62328
62329 #define __fscache_stat(stat) (NULL)
62330 #define fscache_stat(stat) do {} while (0)
62331+#define fscache_stat_unchecked(stat) do {} while (0)
62332 #define fscache_stat_d(stat) do {} while (0)
62333 #endif
62334
62335diff --git a/fs/fscache/object.c b/fs/fscache/object.c
62336index da032da..0076ce7 100644
62337--- a/fs/fscache/object.c
62338+++ b/fs/fscache/object.c
62339@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62340 _debug("LOOKUP \"%s\" in \"%s\"",
62341 cookie->def->name, object->cache->tag->name);
62342
62343- fscache_stat(&fscache_n_object_lookups);
62344+ fscache_stat_unchecked(&fscache_n_object_lookups);
62345 fscache_stat(&fscache_n_cop_lookup_object);
62346 ret = object->cache->ops->lookup_object(object);
62347 fscache_stat_d(&fscache_n_cop_lookup_object);
62348@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62349 if (ret == -ETIMEDOUT) {
62350 /* probably stuck behind another object, so move this one to
62351 * the back of the queue */
62352- fscache_stat(&fscache_n_object_lookups_timed_out);
62353+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
62354 _leave(" [timeout]");
62355 return NO_TRANSIT;
62356 }
62357@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
62358 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
62359
62360 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62361- fscache_stat(&fscache_n_object_lookups_negative);
62362+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
62363
62364 /* Allow write requests to begin stacking up and read requests to begin
62365 * returning ENODATA.
62366@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
62367 /* if we were still looking up, then we must have a positive lookup
62368 * result, in which case there may be data available */
62369 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62370- fscache_stat(&fscache_n_object_lookups_positive);
62371+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
62372
62373 /* We do (presumably) have data */
62374 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
62375@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
62376 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
62377 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
62378 } else {
62379- fscache_stat(&fscache_n_object_created);
62380+ fscache_stat_unchecked(&fscache_n_object_created);
62381 }
62382
62383 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
62384@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
62385 fscache_stat_d(&fscache_n_cop_lookup_complete);
62386
62387 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
62388- fscache_stat(&fscache_n_object_avail);
62389+ fscache_stat_unchecked(&fscache_n_object_avail);
62390
62391 _leave("");
62392 return transit_to(JUMPSTART_DEPS);
62393@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
62394
62395 /* this just shifts the object release to the work processor */
62396 fscache_put_object(object);
62397- fscache_stat(&fscache_n_object_dead);
62398+ fscache_stat_unchecked(&fscache_n_object_dead);
62399
62400 _leave("");
62401 return transit_to(OBJECT_DEAD);
62402@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62403 enum fscache_checkaux result;
62404
62405 if (!object->cookie->def->check_aux) {
62406- fscache_stat(&fscache_n_checkaux_none);
62407+ fscache_stat_unchecked(&fscache_n_checkaux_none);
62408 return FSCACHE_CHECKAUX_OKAY;
62409 }
62410
62411@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62412 switch (result) {
62413 /* entry okay as is */
62414 case FSCACHE_CHECKAUX_OKAY:
62415- fscache_stat(&fscache_n_checkaux_okay);
62416+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
62417 break;
62418
62419 /* entry requires update */
62420 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
62421- fscache_stat(&fscache_n_checkaux_update);
62422+ fscache_stat_unchecked(&fscache_n_checkaux_update);
62423 break;
62424
62425 /* entry requires deletion */
62426 case FSCACHE_CHECKAUX_OBSOLETE:
62427- fscache_stat(&fscache_n_checkaux_obsolete);
62428+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
62429 break;
62430
62431 default:
62432@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
62433 {
62434 const struct fscache_state *s;
62435
62436- fscache_stat(&fscache_n_invalidates_run);
62437+ fscache_stat_unchecked(&fscache_n_invalidates_run);
62438 fscache_stat(&fscache_n_cop_invalidate_object);
62439 s = _fscache_invalidate_object(object, event);
62440 fscache_stat_d(&fscache_n_cop_invalidate_object);
62441@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
62442 {
62443 _enter("{OBJ%x},%d", object->debug_id, event);
62444
62445- fscache_stat(&fscache_n_updates_run);
62446+ fscache_stat_unchecked(&fscache_n_updates_run);
62447 fscache_stat(&fscache_n_cop_update_object);
62448 object->cache->ops->update_object(object);
62449 fscache_stat_d(&fscache_n_cop_update_object);
62450diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
62451index e7b87a0..a85d47a 100644
62452--- a/fs/fscache/operation.c
62453+++ b/fs/fscache/operation.c
62454@@ -17,7 +17,7 @@
62455 #include <linux/slab.h>
62456 #include "internal.h"
62457
62458-atomic_t fscache_op_debug_id;
62459+atomic_unchecked_t fscache_op_debug_id;
62460 EXPORT_SYMBOL(fscache_op_debug_id);
62461
62462 /**
62463@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
62464 ASSERTCMP(atomic_read(&op->usage), >, 0);
62465 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
62466
62467- fscache_stat(&fscache_n_op_enqueue);
62468+ fscache_stat_unchecked(&fscache_n_op_enqueue);
62469 switch (op->flags & FSCACHE_OP_TYPE) {
62470 case FSCACHE_OP_ASYNC:
62471 _debug("queue async");
62472@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
62473 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
62474 if (op->processor)
62475 fscache_enqueue_operation(op);
62476- fscache_stat(&fscache_n_op_run);
62477+ fscache_stat_unchecked(&fscache_n_op_run);
62478 }
62479
62480 /*
62481@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62482 if (object->n_in_progress > 0) {
62483 atomic_inc(&op->usage);
62484 list_add_tail(&op->pend_link, &object->pending_ops);
62485- fscache_stat(&fscache_n_op_pend);
62486+ fscache_stat_unchecked(&fscache_n_op_pend);
62487 } else if (!list_empty(&object->pending_ops)) {
62488 atomic_inc(&op->usage);
62489 list_add_tail(&op->pend_link, &object->pending_ops);
62490- fscache_stat(&fscache_n_op_pend);
62491+ fscache_stat_unchecked(&fscache_n_op_pend);
62492 fscache_start_operations(object);
62493 } else {
62494 ASSERTCMP(object->n_in_progress, ==, 0);
62495@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62496 object->n_exclusive++; /* reads and writes must wait */
62497 atomic_inc(&op->usage);
62498 list_add_tail(&op->pend_link, &object->pending_ops);
62499- fscache_stat(&fscache_n_op_pend);
62500+ fscache_stat_unchecked(&fscache_n_op_pend);
62501 ret = 0;
62502 } else {
62503 /* If we're in any other state, there must have been an I/O
62504@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
62505 if (object->n_exclusive > 0) {
62506 atomic_inc(&op->usage);
62507 list_add_tail(&op->pend_link, &object->pending_ops);
62508- fscache_stat(&fscache_n_op_pend);
62509+ fscache_stat_unchecked(&fscache_n_op_pend);
62510 } else if (!list_empty(&object->pending_ops)) {
62511 atomic_inc(&op->usage);
62512 list_add_tail(&op->pend_link, &object->pending_ops);
62513- fscache_stat(&fscache_n_op_pend);
62514+ fscache_stat_unchecked(&fscache_n_op_pend);
62515 fscache_start_operations(object);
62516 } else {
62517 ASSERTCMP(object->n_exclusive, ==, 0);
62518@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
62519 object->n_ops++;
62520 atomic_inc(&op->usage);
62521 list_add_tail(&op->pend_link, &object->pending_ops);
62522- fscache_stat(&fscache_n_op_pend);
62523+ fscache_stat_unchecked(&fscache_n_op_pend);
62524 ret = 0;
62525 } else if (fscache_object_is_dying(object)) {
62526- fscache_stat(&fscache_n_op_rejected);
62527+ fscache_stat_unchecked(&fscache_n_op_rejected);
62528 op->state = FSCACHE_OP_ST_CANCELLED;
62529 ret = -ENOBUFS;
62530 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
62531@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
62532 ret = -EBUSY;
62533 if (op->state == FSCACHE_OP_ST_PENDING) {
62534 ASSERT(!list_empty(&op->pend_link));
62535- fscache_stat(&fscache_n_op_cancelled);
62536+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62537 list_del_init(&op->pend_link);
62538 if (do_cancel)
62539 do_cancel(op);
62540@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
62541 while (!list_empty(&object->pending_ops)) {
62542 op = list_entry(object->pending_ops.next,
62543 struct fscache_operation, pend_link);
62544- fscache_stat(&fscache_n_op_cancelled);
62545+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62546 list_del_init(&op->pend_link);
62547
62548 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
62549@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
62550 op->state, ==, FSCACHE_OP_ST_CANCELLED);
62551 op->state = FSCACHE_OP_ST_DEAD;
62552
62553- fscache_stat(&fscache_n_op_release);
62554+ fscache_stat_unchecked(&fscache_n_op_release);
62555
62556 if (op->release) {
62557 op->release(op);
62558@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
62559 * lock, and defer it otherwise */
62560 if (!spin_trylock(&object->lock)) {
62561 _debug("defer put");
62562- fscache_stat(&fscache_n_op_deferred_release);
62563+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
62564
62565 cache = object->cache;
62566 spin_lock(&cache->op_gc_list_lock);
62567@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
62568
62569 _debug("GC DEFERRED REL OBJ%x OP%x",
62570 object->debug_id, op->debug_id);
62571- fscache_stat(&fscache_n_op_gc);
62572+ fscache_stat_unchecked(&fscache_n_op_gc);
62573
62574 ASSERTCMP(atomic_read(&op->usage), ==, 0);
62575 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
62576diff --git a/fs/fscache/page.c b/fs/fscache/page.c
62577index de33b3f..8be4d29 100644
62578--- a/fs/fscache/page.c
62579+++ b/fs/fscache/page.c
62580@@ -74,7 +74,7 @@ try_again:
62581 val = radix_tree_lookup(&cookie->stores, page->index);
62582 if (!val) {
62583 rcu_read_unlock();
62584- fscache_stat(&fscache_n_store_vmscan_not_storing);
62585+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
62586 __fscache_uncache_page(cookie, page);
62587 return true;
62588 }
62589@@ -104,11 +104,11 @@ try_again:
62590 spin_unlock(&cookie->stores_lock);
62591
62592 if (xpage) {
62593- fscache_stat(&fscache_n_store_vmscan_cancelled);
62594- fscache_stat(&fscache_n_store_radix_deletes);
62595+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
62596+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62597 ASSERTCMP(xpage, ==, page);
62598 } else {
62599- fscache_stat(&fscache_n_store_vmscan_gone);
62600+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
62601 }
62602
62603 wake_up_bit(&cookie->flags, 0);
62604@@ -123,11 +123,11 @@ page_busy:
62605 * sleeping on memory allocation, so we may need to impose a timeout
62606 * too. */
62607 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
62608- fscache_stat(&fscache_n_store_vmscan_busy);
62609+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
62610 return false;
62611 }
62612
62613- fscache_stat(&fscache_n_store_vmscan_wait);
62614+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
62615 if (!release_page_wait_timeout(cookie, page))
62616 _debug("fscache writeout timeout page: %p{%lx}",
62617 page, page->index);
62618@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
62619 FSCACHE_COOKIE_STORING_TAG);
62620 if (!radix_tree_tag_get(&cookie->stores, page->index,
62621 FSCACHE_COOKIE_PENDING_TAG)) {
62622- fscache_stat(&fscache_n_store_radix_deletes);
62623+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62624 xpage = radix_tree_delete(&cookie->stores, page->index);
62625 }
62626 spin_unlock(&cookie->stores_lock);
62627@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
62628
62629 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
62630
62631- fscache_stat(&fscache_n_attr_changed_calls);
62632+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
62633
62634 if (fscache_object_is_active(object)) {
62635 fscache_stat(&fscache_n_cop_attr_changed);
62636@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62637
62638 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62639
62640- fscache_stat(&fscache_n_attr_changed);
62641+ fscache_stat_unchecked(&fscache_n_attr_changed);
62642
62643 op = kzalloc(sizeof(*op), GFP_KERNEL);
62644 if (!op) {
62645- fscache_stat(&fscache_n_attr_changed_nomem);
62646+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
62647 _leave(" = -ENOMEM");
62648 return -ENOMEM;
62649 }
62650@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62651 if (fscache_submit_exclusive_op(object, op) < 0)
62652 goto nobufs_dec;
62653 spin_unlock(&cookie->lock);
62654- fscache_stat(&fscache_n_attr_changed_ok);
62655+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
62656 fscache_put_operation(op);
62657 _leave(" = 0");
62658 return 0;
62659@@ -242,7 +242,7 @@ nobufs:
62660 kfree(op);
62661 if (wake_cookie)
62662 __fscache_wake_unused_cookie(cookie);
62663- fscache_stat(&fscache_n_attr_changed_nobufs);
62664+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
62665 _leave(" = %d", -ENOBUFS);
62666 return -ENOBUFS;
62667 }
62668@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
62669 /* allocate a retrieval operation and attempt to submit it */
62670 op = kzalloc(sizeof(*op), GFP_NOIO);
62671 if (!op) {
62672- fscache_stat(&fscache_n_retrievals_nomem);
62673+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62674 return NULL;
62675 }
62676
62677@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
62678 return 0;
62679 }
62680
62681- fscache_stat(&fscache_n_retrievals_wait);
62682+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
62683
62684 jif = jiffies;
62685 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
62686 TASK_INTERRUPTIBLE) != 0) {
62687- fscache_stat(&fscache_n_retrievals_intr);
62688+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62689 _leave(" = -ERESTARTSYS");
62690 return -ERESTARTSYS;
62691 }
62692@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
62693 */
62694 int fscache_wait_for_operation_activation(struct fscache_object *object,
62695 struct fscache_operation *op,
62696- atomic_t *stat_op_waits,
62697- atomic_t *stat_object_dead,
62698+ atomic_unchecked_t *stat_op_waits,
62699+ atomic_unchecked_t *stat_object_dead,
62700 void (*do_cancel)(struct fscache_operation *))
62701 {
62702 int ret;
62703@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62704
62705 _debug(">>> WT");
62706 if (stat_op_waits)
62707- fscache_stat(stat_op_waits);
62708+ fscache_stat_unchecked(stat_op_waits);
62709 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
62710 TASK_INTERRUPTIBLE) != 0) {
62711 ret = fscache_cancel_op(op, do_cancel);
62712@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62713 check_if_dead:
62714 if (op->state == FSCACHE_OP_ST_CANCELLED) {
62715 if (stat_object_dead)
62716- fscache_stat(stat_object_dead);
62717+ fscache_stat_unchecked(stat_object_dead);
62718 _leave(" = -ENOBUFS [cancelled]");
62719 return -ENOBUFS;
62720 }
62721@@ -381,7 +381,7 @@ check_if_dead:
62722 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
62723 fscache_cancel_op(op, do_cancel);
62724 if (stat_object_dead)
62725- fscache_stat(stat_object_dead);
62726+ fscache_stat_unchecked(stat_object_dead);
62727 return -ENOBUFS;
62728 }
62729 return 0;
62730@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62731
62732 _enter("%p,%p,,,", cookie, page);
62733
62734- fscache_stat(&fscache_n_retrievals);
62735+ fscache_stat_unchecked(&fscache_n_retrievals);
62736
62737 if (hlist_empty(&cookie->backing_objects))
62738 goto nobufs;
62739@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62740 goto nobufs_unlock_dec;
62741 spin_unlock(&cookie->lock);
62742
62743- fscache_stat(&fscache_n_retrieval_ops);
62744+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
62745
62746 /* pin the netfs read context in case we need to do the actual netfs
62747 * read because we've encountered a cache read failure */
62748@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62749
62750 error:
62751 if (ret == -ENOMEM)
62752- fscache_stat(&fscache_n_retrievals_nomem);
62753+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62754 else if (ret == -ERESTARTSYS)
62755- fscache_stat(&fscache_n_retrievals_intr);
62756+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62757 else if (ret == -ENODATA)
62758- fscache_stat(&fscache_n_retrievals_nodata);
62759+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
62760 else if (ret < 0)
62761- fscache_stat(&fscache_n_retrievals_nobufs);
62762+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62763 else
62764- fscache_stat(&fscache_n_retrievals_ok);
62765+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
62766
62767 fscache_put_retrieval(op);
62768 _leave(" = %d", ret);
62769@@ -505,7 +505,7 @@ nobufs_unlock:
62770 __fscache_wake_unused_cookie(cookie);
62771 kfree(op);
62772 nobufs:
62773- fscache_stat(&fscache_n_retrievals_nobufs);
62774+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62775 _leave(" = -ENOBUFS");
62776 return -ENOBUFS;
62777 }
62778@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62779
62780 _enter("%p,,%d,,,", cookie, *nr_pages);
62781
62782- fscache_stat(&fscache_n_retrievals);
62783+ fscache_stat_unchecked(&fscache_n_retrievals);
62784
62785 if (hlist_empty(&cookie->backing_objects))
62786 goto nobufs;
62787@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62788 goto nobufs_unlock_dec;
62789 spin_unlock(&cookie->lock);
62790
62791- fscache_stat(&fscache_n_retrieval_ops);
62792+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
62793
62794 /* pin the netfs read context in case we need to do the actual netfs
62795 * read because we've encountered a cache read failure */
62796@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62797
62798 error:
62799 if (ret == -ENOMEM)
62800- fscache_stat(&fscache_n_retrievals_nomem);
62801+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62802 else if (ret == -ERESTARTSYS)
62803- fscache_stat(&fscache_n_retrievals_intr);
62804+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62805 else if (ret == -ENODATA)
62806- fscache_stat(&fscache_n_retrievals_nodata);
62807+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
62808 else if (ret < 0)
62809- fscache_stat(&fscache_n_retrievals_nobufs);
62810+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62811 else
62812- fscache_stat(&fscache_n_retrievals_ok);
62813+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
62814
62815 fscache_put_retrieval(op);
62816 _leave(" = %d", ret);
62817@@ -636,7 +636,7 @@ nobufs_unlock:
62818 if (wake_cookie)
62819 __fscache_wake_unused_cookie(cookie);
62820 nobufs:
62821- fscache_stat(&fscache_n_retrievals_nobufs);
62822+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62823 _leave(" = -ENOBUFS");
62824 return -ENOBUFS;
62825 }
62826@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62827
62828 _enter("%p,%p,,,", cookie, page);
62829
62830- fscache_stat(&fscache_n_allocs);
62831+ fscache_stat_unchecked(&fscache_n_allocs);
62832
62833 if (hlist_empty(&cookie->backing_objects))
62834 goto nobufs;
62835@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62836 goto nobufs_unlock_dec;
62837 spin_unlock(&cookie->lock);
62838
62839- fscache_stat(&fscache_n_alloc_ops);
62840+ fscache_stat_unchecked(&fscache_n_alloc_ops);
62841
62842 ret = fscache_wait_for_operation_activation(
62843 object, &op->op,
62844@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62845
62846 error:
62847 if (ret == -ERESTARTSYS)
62848- fscache_stat(&fscache_n_allocs_intr);
62849+ fscache_stat_unchecked(&fscache_n_allocs_intr);
62850 else if (ret < 0)
62851- fscache_stat(&fscache_n_allocs_nobufs);
62852+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
62853 else
62854- fscache_stat(&fscache_n_allocs_ok);
62855+ fscache_stat_unchecked(&fscache_n_allocs_ok);
62856
62857 fscache_put_retrieval(op);
62858 _leave(" = %d", ret);
62859@@ -730,7 +730,7 @@ nobufs_unlock:
62860 if (wake_cookie)
62861 __fscache_wake_unused_cookie(cookie);
62862 nobufs:
62863- fscache_stat(&fscache_n_allocs_nobufs);
62864+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
62865 _leave(" = -ENOBUFS");
62866 return -ENOBUFS;
62867 }
62868@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62869
62870 spin_lock(&cookie->stores_lock);
62871
62872- fscache_stat(&fscache_n_store_calls);
62873+ fscache_stat_unchecked(&fscache_n_store_calls);
62874
62875 /* find a page to store */
62876 page = NULL;
62877@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62878 page = results[0];
62879 _debug("gang %d [%lx]", n, page->index);
62880 if (page->index > op->store_limit) {
62881- fscache_stat(&fscache_n_store_pages_over_limit);
62882+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
62883 goto superseded;
62884 }
62885
62886@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
62887 spin_unlock(&cookie->stores_lock);
62888 spin_unlock(&object->lock);
62889
62890- fscache_stat(&fscache_n_store_pages);
62891+ fscache_stat_unchecked(&fscache_n_store_pages);
62892 fscache_stat(&fscache_n_cop_write_page);
62893 ret = object->cache->ops->write_page(op, page);
62894 fscache_stat_d(&fscache_n_cop_write_page);
62895@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62896 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62897 ASSERT(PageFsCache(page));
62898
62899- fscache_stat(&fscache_n_stores);
62900+ fscache_stat_unchecked(&fscache_n_stores);
62901
62902 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
62903 _leave(" = -ENOBUFS [invalidating]");
62904@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62905 spin_unlock(&cookie->stores_lock);
62906 spin_unlock(&object->lock);
62907
62908- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
62909+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62910 op->store_limit = object->store_limit;
62911
62912 __fscache_use_cookie(cookie);
62913@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62914
62915 spin_unlock(&cookie->lock);
62916 radix_tree_preload_end();
62917- fscache_stat(&fscache_n_store_ops);
62918- fscache_stat(&fscache_n_stores_ok);
62919+ fscache_stat_unchecked(&fscache_n_store_ops);
62920+ fscache_stat_unchecked(&fscache_n_stores_ok);
62921
62922 /* the work queue now carries its own ref on the object */
62923 fscache_put_operation(&op->op);
62924@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
62925 return 0;
62926
62927 already_queued:
62928- fscache_stat(&fscache_n_stores_again);
62929+ fscache_stat_unchecked(&fscache_n_stores_again);
62930 already_pending:
62931 spin_unlock(&cookie->stores_lock);
62932 spin_unlock(&object->lock);
62933 spin_unlock(&cookie->lock);
62934 radix_tree_preload_end();
62935 kfree(op);
62936- fscache_stat(&fscache_n_stores_ok);
62937+ fscache_stat_unchecked(&fscache_n_stores_ok);
62938 _leave(" = 0");
62939 return 0;
62940
62941@@ -1039,14 +1039,14 @@ nobufs:
62942 kfree(op);
62943 if (wake_cookie)
62944 __fscache_wake_unused_cookie(cookie);
62945- fscache_stat(&fscache_n_stores_nobufs);
62946+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
62947 _leave(" = -ENOBUFS");
62948 return -ENOBUFS;
62949
62950 nomem_free:
62951 kfree(op);
62952 nomem:
62953- fscache_stat(&fscache_n_stores_oom);
62954+ fscache_stat_unchecked(&fscache_n_stores_oom);
62955 _leave(" = -ENOMEM");
62956 return -ENOMEM;
62957 }
62958@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
62959 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62960 ASSERTCMP(page, !=, NULL);
62961
62962- fscache_stat(&fscache_n_uncaches);
62963+ fscache_stat_unchecked(&fscache_n_uncaches);
62964
62965 /* cache withdrawal may beat us to it */
62966 if (!PageFsCache(page))
62967@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
62968 struct fscache_cookie *cookie = op->op.object->cookie;
62969
62970 #ifdef CONFIG_FSCACHE_STATS
62971- atomic_inc(&fscache_n_marks);
62972+ atomic_inc_unchecked(&fscache_n_marks);
62973 #endif
62974
62975 _debug("- mark %p{%lx}", page, page->index);
62976diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
62977index 40d13c7..ddf52b9 100644
62978--- a/fs/fscache/stats.c
62979+++ b/fs/fscache/stats.c
62980@@ -18,99 +18,99 @@
62981 /*
62982 * operation counters
62983 */
62984-atomic_t fscache_n_op_pend;
62985-atomic_t fscache_n_op_run;
62986-atomic_t fscache_n_op_enqueue;
62987-atomic_t fscache_n_op_requeue;
62988-atomic_t fscache_n_op_deferred_release;
62989-atomic_t fscache_n_op_release;
62990-atomic_t fscache_n_op_gc;
62991-atomic_t fscache_n_op_cancelled;
62992-atomic_t fscache_n_op_rejected;
62993+atomic_unchecked_t fscache_n_op_pend;
62994+atomic_unchecked_t fscache_n_op_run;
62995+atomic_unchecked_t fscache_n_op_enqueue;
62996+atomic_unchecked_t fscache_n_op_requeue;
62997+atomic_unchecked_t fscache_n_op_deferred_release;
62998+atomic_unchecked_t fscache_n_op_release;
62999+atomic_unchecked_t fscache_n_op_gc;
63000+atomic_unchecked_t fscache_n_op_cancelled;
63001+atomic_unchecked_t fscache_n_op_rejected;
63002
63003-atomic_t fscache_n_attr_changed;
63004-atomic_t fscache_n_attr_changed_ok;
63005-atomic_t fscache_n_attr_changed_nobufs;
63006-atomic_t fscache_n_attr_changed_nomem;
63007-atomic_t fscache_n_attr_changed_calls;
63008+atomic_unchecked_t fscache_n_attr_changed;
63009+atomic_unchecked_t fscache_n_attr_changed_ok;
63010+atomic_unchecked_t fscache_n_attr_changed_nobufs;
63011+atomic_unchecked_t fscache_n_attr_changed_nomem;
63012+atomic_unchecked_t fscache_n_attr_changed_calls;
63013
63014-atomic_t fscache_n_allocs;
63015-atomic_t fscache_n_allocs_ok;
63016-atomic_t fscache_n_allocs_wait;
63017-atomic_t fscache_n_allocs_nobufs;
63018-atomic_t fscache_n_allocs_intr;
63019-atomic_t fscache_n_allocs_object_dead;
63020-atomic_t fscache_n_alloc_ops;
63021-atomic_t fscache_n_alloc_op_waits;
63022+atomic_unchecked_t fscache_n_allocs;
63023+atomic_unchecked_t fscache_n_allocs_ok;
63024+atomic_unchecked_t fscache_n_allocs_wait;
63025+atomic_unchecked_t fscache_n_allocs_nobufs;
63026+atomic_unchecked_t fscache_n_allocs_intr;
63027+atomic_unchecked_t fscache_n_allocs_object_dead;
63028+atomic_unchecked_t fscache_n_alloc_ops;
63029+atomic_unchecked_t fscache_n_alloc_op_waits;
63030
63031-atomic_t fscache_n_retrievals;
63032-atomic_t fscache_n_retrievals_ok;
63033-atomic_t fscache_n_retrievals_wait;
63034-atomic_t fscache_n_retrievals_nodata;
63035-atomic_t fscache_n_retrievals_nobufs;
63036-atomic_t fscache_n_retrievals_intr;
63037-atomic_t fscache_n_retrievals_nomem;
63038-atomic_t fscache_n_retrievals_object_dead;
63039-atomic_t fscache_n_retrieval_ops;
63040-atomic_t fscache_n_retrieval_op_waits;
63041+atomic_unchecked_t fscache_n_retrievals;
63042+atomic_unchecked_t fscache_n_retrievals_ok;
63043+atomic_unchecked_t fscache_n_retrievals_wait;
63044+atomic_unchecked_t fscache_n_retrievals_nodata;
63045+atomic_unchecked_t fscache_n_retrievals_nobufs;
63046+atomic_unchecked_t fscache_n_retrievals_intr;
63047+atomic_unchecked_t fscache_n_retrievals_nomem;
63048+atomic_unchecked_t fscache_n_retrievals_object_dead;
63049+atomic_unchecked_t fscache_n_retrieval_ops;
63050+atomic_unchecked_t fscache_n_retrieval_op_waits;
63051
63052-atomic_t fscache_n_stores;
63053-atomic_t fscache_n_stores_ok;
63054-atomic_t fscache_n_stores_again;
63055-atomic_t fscache_n_stores_nobufs;
63056-atomic_t fscache_n_stores_oom;
63057-atomic_t fscache_n_store_ops;
63058-atomic_t fscache_n_store_calls;
63059-atomic_t fscache_n_store_pages;
63060-atomic_t fscache_n_store_radix_deletes;
63061-atomic_t fscache_n_store_pages_over_limit;
63062+atomic_unchecked_t fscache_n_stores;
63063+atomic_unchecked_t fscache_n_stores_ok;
63064+atomic_unchecked_t fscache_n_stores_again;
63065+atomic_unchecked_t fscache_n_stores_nobufs;
63066+atomic_unchecked_t fscache_n_stores_oom;
63067+atomic_unchecked_t fscache_n_store_ops;
63068+atomic_unchecked_t fscache_n_store_calls;
63069+atomic_unchecked_t fscache_n_store_pages;
63070+atomic_unchecked_t fscache_n_store_radix_deletes;
63071+atomic_unchecked_t fscache_n_store_pages_over_limit;
63072
63073-atomic_t fscache_n_store_vmscan_not_storing;
63074-atomic_t fscache_n_store_vmscan_gone;
63075-atomic_t fscache_n_store_vmscan_busy;
63076-atomic_t fscache_n_store_vmscan_cancelled;
63077-atomic_t fscache_n_store_vmscan_wait;
63078+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63079+atomic_unchecked_t fscache_n_store_vmscan_gone;
63080+atomic_unchecked_t fscache_n_store_vmscan_busy;
63081+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63082+atomic_unchecked_t fscache_n_store_vmscan_wait;
63083
63084-atomic_t fscache_n_marks;
63085-atomic_t fscache_n_uncaches;
63086+atomic_unchecked_t fscache_n_marks;
63087+atomic_unchecked_t fscache_n_uncaches;
63088
63089-atomic_t fscache_n_acquires;
63090-atomic_t fscache_n_acquires_null;
63091-atomic_t fscache_n_acquires_no_cache;
63092-atomic_t fscache_n_acquires_ok;
63093-atomic_t fscache_n_acquires_nobufs;
63094-atomic_t fscache_n_acquires_oom;
63095+atomic_unchecked_t fscache_n_acquires;
63096+atomic_unchecked_t fscache_n_acquires_null;
63097+atomic_unchecked_t fscache_n_acquires_no_cache;
63098+atomic_unchecked_t fscache_n_acquires_ok;
63099+atomic_unchecked_t fscache_n_acquires_nobufs;
63100+atomic_unchecked_t fscache_n_acquires_oom;
63101
63102-atomic_t fscache_n_invalidates;
63103-atomic_t fscache_n_invalidates_run;
63104+atomic_unchecked_t fscache_n_invalidates;
63105+atomic_unchecked_t fscache_n_invalidates_run;
63106
63107-atomic_t fscache_n_updates;
63108-atomic_t fscache_n_updates_null;
63109-atomic_t fscache_n_updates_run;
63110+atomic_unchecked_t fscache_n_updates;
63111+atomic_unchecked_t fscache_n_updates_null;
63112+atomic_unchecked_t fscache_n_updates_run;
63113
63114-atomic_t fscache_n_relinquishes;
63115-atomic_t fscache_n_relinquishes_null;
63116-atomic_t fscache_n_relinquishes_waitcrt;
63117-atomic_t fscache_n_relinquishes_retire;
63118+atomic_unchecked_t fscache_n_relinquishes;
63119+atomic_unchecked_t fscache_n_relinquishes_null;
63120+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63121+atomic_unchecked_t fscache_n_relinquishes_retire;
63122
63123-atomic_t fscache_n_cookie_index;
63124-atomic_t fscache_n_cookie_data;
63125-atomic_t fscache_n_cookie_special;
63126+atomic_unchecked_t fscache_n_cookie_index;
63127+atomic_unchecked_t fscache_n_cookie_data;
63128+atomic_unchecked_t fscache_n_cookie_special;
63129
63130-atomic_t fscache_n_object_alloc;
63131-atomic_t fscache_n_object_no_alloc;
63132-atomic_t fscache_n_object_lookups;
63133-atomic_t fscache_n_object_lookups_negative;
63134-atomic_t fscache_n_object_lookups_positive;
63135-atomic_t fscache_n_object_lookups_timed_out;
63136-atomic_t fscache_n_object_created;
63137-atomic_t fscache_n_object_avail;
63138-atomic_t fscache_n_object_dead;
63139+atomic_unchecked_t fscache_n_object_alloc;
63140+atomic_unchecked_t fscache_n_object_no_alloc;
63141+atomic_unchecked_t fscache_n_object_lookups;
63142+atomic_unchecked_t fscache_n_object_lookups_negative;
63143+atomic_unchecked_t fscache_n_object_lookups_positive;
63144+atomic_unchecked_t fscache_n_object_lookups_timed_out;
63145+atomic_unchecked_t fscache_n_object_created;
63146+atomic_unchecked_t fscache_n_object_avail;
63147+atomic_unchecked_t fscache_n_object_dead;
63148
63149-atomic_t fscache_n_checkaux_none;
63150-atomic_t fscache_n_checkaux_okay;
63151-atomic_t fscache_n_checkaux_update;
63152-atomic_t fscache_n_checkaux_obsolete;
63153+atomic_unchecked_t fscache_n_checkaux_none;
63154+atomic_unchecked_t fscache_n_checkaux_okay;
63155+atomic_unchecked_t fscache_n_checkaux_update;
63156+atomic_unchecked_t fscache_n_checkaux_obsolete;
63157
63158 atomic_t fscache_n_cop_alloc_object;
63159 atomic_t fscache_n_cop_lookup_object;
63160@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
63161 seq_puts(m, "FS-Cache statistics\n");
63162
63163 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
63164- atomic_read(&fscache_n_cookie_index),
63165- atomic_read(&fscache_n_cookie_data),
63166- atomic_read(&fscache_n_cookie_special));
63167+ atomic_read_unchecked(&fscache_n_cookie_index),
63168+ atomic_read_unchecked(&fscache_n_cookie_data),
63169+ atomic_read_unchecked(&fscache_n_cookie_special));
63170
63171 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
63172- atomic_read(&fscache_n_object_alloc),
63173- atomic_read(&fscache_n_object_no_alloc),
63174- atomic_read(&fscache_n_object_avail),
63175- atomic_read(&fscache_n_object_dead));
63176+ atomic_read_unchecked(&fscache_n_object_alloc),
63177+ atomic_read_unchecked(&fscache_n_object_no_alloc),
63178+ atomic_read_unchecked(&fscache_n_object_avail),
63179+ atomic_read_unchecked(&fscache_n_object_dead));
63180 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
63181- atomic_read(&fscache_n_checkaux_none),
63182- atomic_read(&fscache_n_checkaux_okay),
63183- atomic_read(&fscache_n_checkaux_update),
63184- atomic_read(&fscache_n_checkaux_obsolete));
63185+ atomic_read_unchecked(&fscache_n_checkaux_none),
63186+ atomic_read_unchecked(&fscache_n_checkaux_okay),
63187+ atomic_read_unchecked(&fscache_n_checkaux_update),
63188+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
63189
63190 seq_printf(m, "Pages : mrk=%u unc=%u\n",
63191- atomic_read(&fscache_n_marks),
63192- atomic_read(&fscache_n_uncaches));
63193+ atomic_read_unchecked(&fscache_n_marks),
63194+ atomic_read_unchecked(&fscache_n_uncaches));
63195
63196 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
63197 " oom=%u\n",
63198- atomic_read(&fscache_n_acquires),
63199- atomic_read(&fscache_n_acquires_null),
63200- atomic_read(&fscache_n_acquires_no_cache),
63201- atomic_read(&fscache_n_acquires_ok),
63202- atomic_read(&fscache_n_acquires_nobufs),
63203- atomic_read(&fscache_n_acquires_oom));
63204+ atomic_read_unchecked(&fscache_n_acquires),
63205+ atomic_read_unchecked(&fscache_n_acquires_null),
63206+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
63207+ atomic_read_unchecked(&fscache_n_acquires_ok),
63208+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
63209+ atomic_read_unchecked(&fscache_n_acquires_oom));
63210
63211 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
63212- atomic_read(&fscache_n_object_lookups),
63213- atomic_read(&fscache_n_object_lookups_negative),
63214- atomic_read(&fscache_n_object_lookups_positive),
63215- atomic_read(&fscache_n_object_created),
63216- atomic_read(&fscache_n_object_lookups_timed_out));
63217+ atomic_read_unchecked(&fscache_n_object_lookups),
63218+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
63219+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
63220+ atomic_read_unchecked(&fscache_n_object_created),
63221+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
63222
63223 seq_printf(m, "Invals : n=%u run=%u\n",
63224- atomic_read(&fscache_n_invalidates),
63225- atomic_read(&fscache_n_invalidates_run));
63226+ atomic_read_unchecked(&fscache_n_invalidates),
63227+ atomic_read_unchecked(&fscache_n_invalidates_run));
63228
63229 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
63230- atomic_read(&fscache_n_updates),
63231- atomic_read(&fscache_n_updates_null),
63232- atomic_read(&fscache_n_updates_run));
63233+ atomic_read_unchecked(&fscache_n_updates),
63234+ atomic_read_unchecked(&fscache_n_updates_null),
63235+ atomic_read_unchecked(&fscache_n_updates_run));
63236
63237 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
63238- atomic_read(&fscache_n_relinquishes),
63239- atomic_read(&fscache_n_relinquishes_null),
63240- atomic_read(&fscache_n_relinquishes_waitcrt),
63241- atomic_read(&fscache_n_relinquishes_retire));
63242+ atomic_read_unchecked(&fscache_n_relinquishes),
63243+ atomic_read_unchecked(&fscache_n_relinquishes_null),
63244+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
63245+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
63246
63247 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
63248- atomic_read(&fscache_n_attr_changed),
63249- atomic_read(&fscache_n_attr_changed_ok),
63250- atomic_read(&fscache_n_attr_changed_nobufs),
63251- atomic_read(&fscache_n_attr_changed_nomem),
63252- atomic_read(&fscache_n_attr_changed_calls));
63253+ atomic_read_unchecked(&fscache_n_attr_changed),
63254+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
63255+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
63256+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
63257+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
63258
63259 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
63260- atomic_read(&fscache_n_allocs),
63261- atomic_read(&fscache_n_allocs_ok),
63262- atomic_read(&fscache_n_allocs_wait),
63263- atomic_read(&fscache_n_allocs_nobufs),
63264- atomic_read(&fscache_n_allocs_intr));
63265+ atomic_read_unchecked(&fscache_n_allocs),
63266+ atomic_read_unchecked(&fscache_n_allocs_ok),
63267+ atomic_read_unchecked(&fscache_n_allocs_wait),
63268+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
63269+ atomic_read_unchecked(&fscache_n_allocs_intr));
63270 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
63271- atomic_read(&fscache_n_alloc_ops),
63272- atomic_read(&fscache_n_alloc_op_waits),
63273- atomic_read(&fscache_n_allocs_object_dead));
63274+ atomic_read_unchecked(&fscache_n_alloc_ops),
63275+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
63276+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
63277
63278 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
63279 " int=%u oom=%u\n",
63280- atomic_read(&fscache_n_retrievals),
63281- atomic_read(&fscache_n_retrievals_ok),
63282- atomic_read(&fscache_n_retrievals_wait),
63283- atomic_read(&fscache_n_retrievals_nodata),
63284- atomic_read(&fscache_n_retrievals_nobufs),
63285- atomic_read(&fscache_n_retrievals_intr),
63286- atomic_read(&fscache_n_retrievals_nomem));
63287+ atomic_read_unchecked(&fscache_n_retrievals),
63288+ atomic_read_unchecked(&fscache_n_retrievals_ok),
63289+ atomic_read_unchecked(&fscache_n_retrievals_wait),
63290+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
63291+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
63292+ atomic_read_unchecked(&fscache_n_retrievals_intr),
63293+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
63294 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
63295- atomic_read(&fscache_n_retrieval_ops),
63296- atomic_read(&fscache_n_retrieval_op_waits),
63297- atomic_read(&fscache_n_retrievals_object_dead));
63298+ atomic_read_unchecked(&fscache_n_retrieval_ops),
63299+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
63300+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
63301
63302 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
63303- atomic_read(&fscache_n_stores),
63304- atomic_read(&fscache_n_stores_ok),
63305- atomic_read(&fscache_n_stores_again),
63306- atomic_read(&fscache_n_stores_nobufs),
63307- atomic_read(&fscache_n_stores_oom));
63308+ atomic_read_unchecked(&fscache_n_stores),
63309+ atomic_read_unchecked(&fscache_n_stores_ok),
63310+ atomic_read_unchecked(&fscache_n_stores_again),
63311+ atomic_read_unchecked(&fscache_n_stores_nobufs),
63312+ atomic_read_unchecked(&fscache_n_stores_oom));
63313 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
63314- atomic_read(&fscache_n_store_ops),
63315- atomic_read(&fscache_n_store_calls),
63316- atomic_read(&fscache_n_store_pages),
63317- atomic_read(&fscache_n_store_radix_deletes),
63318- atomic_read(&fscache_n_store_pages_over_limit));
63319+ atomic_read_unchecked(&fscache_n_store_ops),
63320+ atomic_read_unchecked(&fscache_n_store_calls),
63321+ atomic_read_unchecked(&fscache_n_store_pages),
63322+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
63323+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
63324
63325 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
63326- atomic_read(&fscache_n_store_vmscan_not_storing),
63327- atomic_read(&fscache_n_store_vmscan_gone),
63328- atomic_read(&fscache_n_store_vmscan_busy),
63329- atomic_read(&fscache_n_store_vmscan_cancelled),
63330- atomic_read(&fscache_n_store_vmscan_wait));
63331+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
63332+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
63333+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
63334+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
63335+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
63336
63337 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
63338- atomic_read(&fscache_n_op_pend),
63339- atomic_read(&fscache_n_op_run),
63340- atomic_read(&fscache_n_op_enqueue),
63341- atomic_read(&fscache_n_op_cancelled),
63342- atomic_read(&fscache_n_op_rejected));
63343+ atomic_read_unchecked(&fscache_n_op_pend),
63344+ atomic_read_unchecked(&fscache_n_op_run),
63345+ atomic_read_unchecked(&fscache_n_op_enqueue),
63346+ atomic_read_unchecked(&fscache_n_op_cancelled),
63347+ atomic_read_unchecked(&fscache_n_op_rejected));
63348 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
63349- atomic_read(&fscache_n_op_deferred_release),
63350- atomic_read(&fscache_n_op_release),
63351- atomic_read(&fscache_n_op_gc));
63352+ atomic_read_unchecked(&fscache_n_op_deferred_release),
63353+ atomic_read_unchecked(&fscache_n_op_release),
63354+ atomic_read_unchecked(&fscache_n_op_gc));
63355
63356 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
63357 atomic_read(&fscache_n_cop_alloc_object),
63358diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
63359index 28d0c7a..04816b7 100644
63360--- a/fs/fuse/cuse.c
63361+++ b/fs/fuse/cuse.c
63362@@ -611,10 +611,12 @@ static int __init cuse_init(void)
63363 INIT_LIST_HEAD(&cuse_conntbl[i]);
63364
63365 /* inherit and extend fuse_dev_operations */
63366- cuse_channel_fops = fuse_dev_operations;
63367- cuse_channel_fops.owner = THIS_MODULE;
63368- cuse_channel_fops.open = cuse_channel_open;
63369- cuse_channel_fops.release = cuse_channel_release;
63370+ pax_open_kernel();
63371+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
63372+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
63373+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
63374+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
63375+ pax_close_kernel();
63376
63377 cuse_class = class_create(THIS_MODULE, "cuse");
63378 if (IS_ERR(cuse_class))
63379diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
63380index 71c4619..6a9f6d4 100644
63381--- a/fs/fuse/dev.c
63382+++ b/fs/fuse/dev.c
63383@@ -1394,7 +1394,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63384 ret = 0;
63385 pipe_lock(pipe);
63386
63387- if (!pipe->readers) {
63388+ if (!atomic_read(&pipe->readers)) {
63389 send_sig(SIGPIPE, current, 0);
63390 if (!ret)
63391 ret = -EPIPE;
63392@@ -1423,7 +1423,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63393 page_nr++;
63394 ret += buf->len;
63395
63396- if (pipe->files)
63397+ if (atomic_read(&pipe->files))
63398 do_wakeup = 1;
63399 }
63400
63401diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
63402index 08e7b1a..d91c6ee 100644
63403--- a/fs/fuse/dir.c
63404+++ b/fs/fuse/dir.c
63405@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
63406 return link;
63407 }
63408
63409-static void free_link(char *link)
63410+static void free_link(const char *link)
63411 {
63412 if (!IS_ERR(link))
63413 free_page((unsigned long) link);
63414diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
63415index fd62cae..3494dfa 100644
63416--- a/fs/hostfs/hostfs_kern.c
63417+++ b/fs/hostfs/hostfs_kern.c
63418@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
63419
63420 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
63421 {
63422- char *s = nd_get_link(nd);
63423+ const char *s = nd_get_link(nd);
63424 if (!IS_ERR(s))
63425 __putname(s);
63426 }
63427diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
63428index 5eba47f..d353c22 100644
63429--- a/fs/hugetlbfs/inode.c
63430+++ b/fs/hugetlbfs/inode.c
63431@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63432 struct mm_struct *mm = current->mm;
63433 struct vm_area_struct *vma;
63434 struct hstate *h = hstate_file(file);
63435+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
63436 struct vm_unmapped_area_info info;
63437
63438 if (len & ~huge_page_mask(h))
63439@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63440 return addr;
63441 }
63442
63443+#ifdef CONFIG_PAX_RANDMMAP
63444+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
63445+#endif
63446+
63447 if (addr) {
63448 addr = ALIGN(addr, huge_page_size(h));
63449 vma = find_vma(mm, addr);
63450- if (TASK_SIZE - len >= addr &&
63451- (!vma || addr + len <= vma->vm_start))
63452+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
63453 return addr;
63454 }
63455
63456 info.flags = 0;
63457 info.length = len;
63458 info.low_limit = TASK_UNMAPPED_BASE;
63459+
63460+#ifdef CONFIG_PAX_RANDMMAP
63461+ if (mm->pax_flags & MF_PAX_RANDMMAP)
63462+ info.low_limit += mm->delta_mmap;
63463+#endif
63464+
63465 info.high_limit = TASK_SIZE;
63466 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
63467 info.align_offset = 0;
63468@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
63469 };
63470 MODULE_ALIAS_FS("hugetlbfs");
63471
63472-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63473+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63474
63475 static int can_do_hugetlb_shm(void)
63476 {
63477diff --git a/fs/inode.c b/fs/inode.c
63478index aa149e7..46f1f65 100644
63479--- a/fs/inode.c
63480+++ b/fs/inode.c
63481@@ -842,16 +842,20 @@ unsigned int get_next_ino(void)
63482 unsigned int *p = &get_cpu_var(last_ino);
63483 unsigned int res = *p;
63484
63485+start:
63486+
63487 #ifdef CONFIG_SMP
63488 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
63489- static atomic_t shared_last_ino;
63490- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
63491+ static atomic_unchecked_t shared_last_ino;
63492+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
63493
63494 res = next - LAST_INO_BATCH;
63495 }
63496 #endif
63497
63498- *p = ++res;
63499+ if (unlikely(!++res))
63500+ goto start; /* never zero */
63501+ *p = res;
63502 put_cpu_var(last_ino);
63503 return res;
63504 }
63505diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
63506index 4a6cf28..d3a29d3 100644
63507--- a/fs/jffs2/erase.c
63508+++ b/fs/jffs2/erase.c
63509@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
63510 struct jffs2_unknown_node marker = {
63511 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
63512 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63513- .totlen = cpu_to_je32(c->cleanmarker_size)
63514+ .totlen = cpu_to_je32(c->cleanmarker_size),
63515+ .hdr_crc = cpu_to_je32(0)
63516 };
63517
63518 jffs2_prealloc_raw_node_refs(c, jeb, 1);
63519diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
63520index 09ed551..45684f8 100644
63521--- a/fs/jffs2/wbuf.c
63522+++ b/fs/jffs2/wbuf.c
63523@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
63524 {
63525 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
63526 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63527- .totlen = constant_cpu_to_je32(8)
63528+ .totlen = constant_cpu_to_je32(8),
63529+ .hdr_crc = constant_cpu_to_je32(0)
63530 };
63531
63532 /*
63533diff --git a/fs/jfs/super.c b/fs/jfs/super.c
63534index 16c3a95..e9cb75d 100644
63535--- a/fs/jfs/super.c
63536+++ b/fs/jfs/super.c
63537@@ -902,7 +902,7 @@ static int __init init_jfs_fs(void)
63538
63539 jfs_inode_cachep =
63540 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
63541- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
63542+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
63543 init_once);
63544 if (jfs_inode_cachep == NULL)
63545 return -ENOMEM;
63546diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
63547index 2d881b3..fe1ac77 100644
63548--- a/fs/kernfs/dir.c
63549+++ b/fs/kernfs/dir.c
63550@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
63551 *
63552 * Returns 31 bit hash of ns + name (so it fits in an off_t )
63553 */
63554-static unsigned int kernfs_name_hash(const char *name, const void *ns)
63555+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
63556 {
63557 unsigned long hash = init_name_hash();
63558 unsigned int len = strlen(name);
63559@@ -833,6 +833,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
63560 ret = scops->mkdir(parent, dentry->d_name.name, mode);
63561
63562 kernfs_put_active(parent);
63563+
63564+ if (!ret) {
63565+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
63566+ ret = PTR_ERR_OR_ZERO(dentry_ret);
63567+ }
63568+
63569 return ret;
63570 }
63571
63572diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
63573index ddc9f96..4e450ad 100644
63574--- a/fs/kernfs/file.c
63575+++ b/fs/kernfs/file.c
63576@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
63577
63578 struct kernfs_open_node {
63579 atomic_t refcnt;
63580- atomic_t event;
63581+ atomic_unchecked_t event;
63582 wait_queue_head_t poll;
63583 struct list_head files; /* goes through kernfs_open_file.list */
63584 };
63585@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
63586 {
63587 struct kernfs_open_file *of = sf->private;
63588
63589- of->event = atomic_read(&of->kn->attr.open->event);
63590+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
63591
63592 return of->kn->attr.ops->seq_show(sf, v);
63593 }
63594@@ -271,7 +271,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
63595 {
63596 struct kernfs_open_file *of = kernfs_of(file);
63597 const struct kernfs_ops *ops;
63598- size_t len;
63599+ ssize_t len;
63600 char *buf;
63601
63602 if (of->atomic_write_len) {
63603@@ -384,12 +384,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
63604 return ret;
63605 }
63606
63607-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63608- void *buf, int len, int write)
63609+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63610+ void *buf, size_t len, int write)
63611 {
63612 struct file *file = vma->vm_file;
63613 struct kernfs_open_file *of = kernfs_of(file);
63614- int ret;
63615+ ssize_t ret;
63616
63617 if (!of->vm_ops)
63618 return -EINVAL;
63619@@ -568,7 +568,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
63620 return -ENOMEM;
63621
63622 atomic_set(&new_on->refcnt, 0);
63623- atomic_set(&new_on->event, 1);
63624+ atomic_set_unchecked(&new_on->event, 1);
63625 init_waitqueue_head(&new_on->poll);
63626 INIT_LIST_HEAD(&new_on->files);
63627 goto retry;
63628@@ -792,7 +792,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
63629
63630 kernfs_put_active(kn);
63631
63632- if (of->event != atomic_read(&on->event))
63633+ if (of->event != atomic_read_unchecked(&on->event))
63634 goto trigger;
63635
63636 return DEFAULT_POLLMASK;
63637@@ -823,7 +823,7 @@ repeat:
63638
63639 on = kn->attr.open;
63640 if (on) {
63641- atomic_inc(&on->event);
63642+ atomic_inc_unchecked(&on->event);
63643 wake_up_interruptible(&on->poll);
63644 }
63645
63646diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
63647index 8a19889..4c3069a 100644
63648--- a/fs/kernfs/symlink.c
63649+++ b/fs/kernfs/symlink.c
63650@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
63651 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
63652 void *cookie)
63653 {
63654- char *page = nd_get_link(nd);
63655+ const char *page = nd_get_link(nd);
63656 if (!IS_ERR(page))
63657 free_page((unsigned long)page);
63658 }
63659diff --git a/fs/libfs.c b/fs/libfs.c
63660index 005843c..06c4191 100644
63661--- a/fs/libfs.c
63662+++ b/fs/libfs.c
63663@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63664
63665 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
63666 struct dentry *next = list_entry(p, struct dentry, d_child);
63667+ char d_name[sizeof(next->d_iname)];
63668+ const unsigned char *name;
63669+
63670 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
63671 if (!simple_positive(next)) {
63672 spin_unlock(&next->d_lock);
63673@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63674
63675 spin_unlock(&next->d_lock);
63676 spin_unlock(&dentry->d_lock);
63677- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
63678+ name = next->d_name.name;
63679+ if (name == next->d_iname) {
63680+ memcpy(d_name, name, next->d_name.len);
63681+ name = d_name;
63682+ }
63683+ if (!dir_emit(ctx, name, next->d_name.len,
63684 next->d_inode->i_ino, dt_type(next->d_inode)))
63685 return 0;
63686 spin_lock(&dentry->d_lock);
63687@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
63688 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
63689 void *cookie)
63690 {
63691- char *s = nd_get_link(nd);
63692+ const char *s = nd_get_link(nd);
63693 if (!IS_ERR(s))
63694 kfree(s);
63695 }
63696diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
63697index acd3947..1f896e2 100644
63698--- a/fs/lockd/clntproc.c
63699+++ b/fs/lockd/clntproc.c
63700@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
63701 /*
63702 * Cookie counter for NLM requests
63703 */
63704-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
63705+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
63706
63707 void nlmclnt_next_cookie(struct nlm_cookie *c)
63708 {
63709- u32 cookie = atomic_inc_return(&nlm_cookie);
63710+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
63711
63712 memcpy(c->data, &cookie, 4);
63713 c->len=4;
63714diff --git a/fs/locks.c b/fs/locks.c
63715index 59e2f90..bd69071 100644
63716--- a/fs/locks.c
63717+++ b/fs/locks.c
63718@@ -2374,7 +2374,7 @@ void locks_remove_file(struct file *filp)
63719 locks_remove_posix(filp, filp);
63720
63721 if (filp->f_op->flock) {
63722- struct file_lock fl = {
63723+ struct file_lock flock = {
63724 .fl_owner = filp,
63725 .fl_pid = current->tgid,
63726 .fl_file = filp,
63727@@ -2382,9 +2382,9 @@ void locks_remove_file(struct file *filp)
63728 .fl_type = F_UNLCK,
63729 .fl_end = OFFSET_MAX,
63730 };
63731- filp->f_op->flock(filp, F_SETLKW, &fl);
63732- if (fl.fl_ops && fl.fl_ops->fl_release_private)
63733- fl.fl_ops->fl_release_private(&fl);
63734+ filp->f_op->flock(filp, F_SETLKW, &flock);
63735+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
63736+ flock.fl_ops->fl_release_private(&flock);
63737 }
63738
63739 spin_lock(&inode->i_lock);
63740diff --git a/fs/mount.h b/fs/mount.h
63741index 0ad6f76..a04c146 100644
63742--- a/fs/mount.h
63743+++ b/fs/mount.h
63744@@ -12,7 +12,7 @@ struct mnt_namespace {
63745 u64 seq; /* Sequence number to prevent loops */
63746 wait_queue_head_t poll;
63747 u64 event;
63748-};
63749+} __randomize_layout;
63750
63751 struct mnt_pcp {
63752 int mnt_count;
63753@@ -63,7 +63,7 @@ struct mount {
63754 int mnt_expiry_mark; /* true if marked for expiry */
63755 struct hlist_head mnt_pins;
63756 struct path mnt_ex_mountpoint;
63757-};
63758+} __randomize_layout;
63759
63760 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
63761
63762diff --git a/fs/namei.c b/fs/namei.c
63763index bc35b02..7ed1f1d 100644
63764--- a/fs/namei.c
63765+++ b/fs/namei.c
63766@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
63767 if (ret != -EACCES)
63768 return ret;
63769
63770+#ifdef CONFIG_GRKERNSEC
63771+ /* we'll block if we have to log due to a denied capability use */
63772+ if (mask & MAY_NOT_BLOCK)
63773+ return -ECHILD;
63774+#endif
63775+
63776 if (S_ISDIR(inode->i_mode)) {
63777 /* DACs are overridable for directories */
63778- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63779- return 0;
63780 if (!(mask & MAY_WRITE))
63781- if (capable_wrt_inode_uidgid(inode,
63782- CAP_DAC_READ_SEARCH))
63783+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
63784+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63785 return 0;
63786+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63787+ return 0;
63788 return -EACCES;
63789 }
63790 /*
63791+ * Searching includes executable on directories, else just read.
63792+ */
63793+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
63794+ if (mask == MAY_READ)
63795+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
63796+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63797+ return 0;
63798+
63799+ /*
63800 * Read/write DACs are always overridable.
63801 * Executable DACs are overridable when there is
63802 * at least one exec bit set.
63803@@ -350,14 +365,6 @@ int generic_permission(struct inode *inode, int mask)
63804 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63805 return 0;
63806
63807- /*
63808- * Searching includes executable on directories, else just read.
63809- */
63810- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
63811- if (mask == MAY_READ)
63812- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63813- return 0;
63814-
63815 return -EACCES;
63816 }
63817 EXPORT_SYMBOL(generic_permission);
63818@@ -497,7 +504,7 @@ struct nameidata {
63819 int last_type;
63820 unsigned depth;
63821 struct file *base;
63822- char *saved_names[MAX_NESTED_LINKS + 1];
63823+ const char *saved_names[MAX_NESTED_LINKS + 1];
63824 };
63825
63826 /*
63827@@ -708,13 +715,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
63828 nd->flags |= LOOKUP_JUMPED;
63829 }
63830
63831-void nd_set_link(struct nameidata *nd, char *path)
63832+void nd_set_link(struct nameidata *nd, const char *path)
63833 {
63834 nd->saved_names[nd->depth] = path;
63835 }
63836 EXPORT_SYMBOL(nd_set_link);
63837
63838-char *nd_get_link(struct nameidata *nd)
63839+const char *nd_get_link(const struct nameidata *nd)
63840 {
63841 return nd->saved_names[nd->depth];
63842 }
63843@@ -849,7 +856,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
63844 {
63845 struct dentry *dentry = link->dentry;
63846 int error;
63847- char *s;
63848+ const char *s;
63849
63850 BUG_ON(nd->flags & LOOKUP_RCU);
63851
63852@@ -870,6 +877,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
63853 if (error)
63854 goto out_put_nd_path;
63855
63856+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
63857+ dentry->d_inode, dentry, nd->path.mnt)) {
63858+ error = -EACCES;
63859+ goto out_put_nd_path;
63860+ }
63861+
63862 nd->last_type = LAST_BIND;
63863 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
63864 error = PTR_ERR(*p);
63865@@ -1633,6 +1646,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
63866 if (res)
63867 break;
63868 res = walk_component(nd, path, LOOKUP_FOLLOW);
63869+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
63870+ res = -EACCES;
63871 put_link(nd, &link, cookie);
63872 } while (res > 0);
63873
63874@@ -1705,7 +1720,7 @@ EXPORT_SYMBOL(full_name_hash);
63875 static inline u64 hash_name(const char *name)
63876 {
63877 unsigned long a, b, adata, bdata, mask, hash, len;
63878- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
63879+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
63880
63881 hash = a = 0;
63882 len = -sizeof(unsigned long);
63883@@ -2000,6 +2015,8 @@ static int path_lookupat(int dfd, const char *name,
63884 if (err)
63885 break;
63886 err = lookup_last(nd, &path);
63887+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
63888+ err = -EACCES;
63889 put_link(nd, &link, cookie);
63890 }
63891 }
63892@@ -2007,6 +2024,13 @@ static int path_lookupat(int dfd, const char *name,
63893 if (!err)
63894 err = complete_walk(nd);
63895
63896+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
63897+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
63898+ path_put(&nd->path);
63899+ err = -ENOENT;
63900+ }
63901+ }
63902+
63903 if (!err && nd->flags & LOOKUP_DIRECTORY) {
63904 if (!d_can_lookup(nd->path.dentry)) {
63905 path_put(&nd->path);
63906@@ -2028,8 +2052,15 @@ static int filename_lookup(int dfd, struct filename *name,
63907 retval = path_lookupat(dfd, name->name,
63908 flags | LOOKUP_REVAL, nd);
63909
63910- if (likely(!retval))
63911+ if (likely(!retval)) {
63912 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
63913+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
63914+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
63915+ path_put(&nd->path);
63916+ return -ENOENT;
63917+ }
63918+ }
63919+ }
63920 return retval;
63921 }
63922
63923@@ -2595,6 +2626,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
63924 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
63925 return -EPERM;
63926
63927+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
63928+ return -EPERM;
63929+ if (gr_handle_rawio(inode))
63930+ return -EPERM;
63931+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
63932+ return -EACCES;
63933+
63934 return 0;
63935 }
63936
63937@@ -2826,7 +2864,7 @@ looked_up:
63938 * cleared otherwise prior to returning.
63939 */
63940 static int lookup_open(struct nameidata *nd, struct path *path,
63941- struct file *file,
63942+ struct path *link, struct file *file,
63943 const struct open_flags *op,
63944 bool got_write, int *opened)
63945 {
63946@@ -2861,6 +2899,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
63947 /* Negative dentry, just create the file */
63948 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
63949 umode_t mode = op->mode;
63950+
63951+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
63952+ error = -EACCES;
63953+ goto out_dput;
63954+ }
63955+
63956+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
63957+ error = -EACCES;
63958+ goto out_dput;
63959+ }
63960+
63961 if (!IS_POSIXACL(dir->d_inode))
63962 mode &= ~current_umask();
63963 /*
63964@@ -2882,6 +2931,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
63965 nd->flags & LOOKUP_EXCL);
63966 if (error)
63967 goto out_dput;
63968+ else
63969+ gr_handle_create(dentry, nd->path.mnt);
63970 }
63971 out_no_open:
63972 path->dentry = dentry;
63973@@ -2896,7 +2947,7 @@ out_dput:
63974 /*
63975 * Handle the last step of open()
63976 */
63977-static int do_last(struct nameidata *nd, struct path *path,
63978+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
63979 struct file *file, const struct open_flags *op,
63980 int *opened, struct filename *name)
63981 {
63982@@ -2946,6 +2997,15 @@ static int do_last(struct nameidata *nd, struct path *path,
63983 if (error)
63984 return error;
63985
63986+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
63987+ error = -ENOENT;
63988+ goto out;
63989+ }
63990+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
63991+ error = -EACCES;
63992+ goto out;
63993+ }
63994+
63995 audit_inode(name, dir, LOOKUP_PARENT);
63996 error = -EISDIR;
63997 /* trailing slashes? */
63998@@ -2965,7 +3025,7 @@ retry_lookup:
63999 */
64000 }
64001 mutex_lock(&dir->d_inode->i_mutex);
64002- error = lookup_open(nd, path, file, op, got_write, opened);
64003+ error = lookup_open(nd, path, link, file, op, got_write, opened);
64004 mutex_unlock(&dir->d_inode->i_mutex);
64005
64006 if (error <= 0) {
64007@@ -2989,11 +3049,28 @@ retry_lookup:
64008 goto finish_open_created;
64009 }
64010
64011+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
64012+ error = -ENOENT;
64013+ goto exit_dput;
64014+ }
64015+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
64016+ error = -EACCES;
64017+ goto exit_dput;
64018+ }
64019+
64020 /*
64021 * create/update audit record if it already exists.
64022 */
64023- if (d_is_positive(path->dentry))
64024+ if (d_is_positive(path->dentry)) {
64025+ /* only check if O_CREAT is specified, all other checks need to go
64026+ into may_open */
64027+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
64028+ error = -EACCES;
64029+ goto exit_dput;
64030+ }
64031+
64032 audit_inode(name, path->dentry, 0);
64033+ }
64034
64035 /*
64036 * If atomic_open() acquired write access it is dropped now due to
64037@@ -3034,6 +3111,11 @@ finish_lookup:
64038 }
64039 }
64040 BUG_ON(inode != path->dentry->d_inode);
64041+ /* if we're resolving a symlink to another symlink */
64042+ if (link && gr_handle_symlink_owner(link, inode)) {
64043+ error = -EACCES;
64044+ goto out;
64045+ }
64046 return 1;
64047 }
64048
64049@@ -3053,7 +3135,18 @@ finish_open:
64050 path_put(&save_parent);
64051 return error;
64052 }
64053+
64054+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64055+ error = -ENOENT;
64056+ goto out;
64057+ }
64058+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64059+ error = -EACCES;
64060+ goto out;
64061+ }
64062+
64063 audit_inode(name, nd->path.dentry, 0);
64064+
64065 error = -EISDIR;
64066 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
64067 goto out;
64068@@ -3214,7 +3307,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64069 if (unlikely(error))
64070 goto out;
64071
64072- error = do_last(nd, &path, file, op, &opened, pathname);
64073+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
64074 while (unlikely(error > 0)) { /* trailing symlink */
64075 struct path link = path;
64076 void *cookie;
64077@@ -3232,7 +3325,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64078 error = follow_link(&link, nd, &cookie);
64079 if (unlikely(error))
64080 break;
64081- error = do_last(nd, &path, file, op, &opened, pathname);
64082+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
64083 put_link(nd, &link, cookie);
64084 }
64085 out:
64086@@ -3329,9 +3422,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
64087 goto unlock;
64088
64089 error = -EEXIST;
64090- if (d_is_positive(dentry))
64091+ if (d_is_positive(dentry)) {
64092+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
64093+ error = -ENOENT;
64094 goto fail;
64095-
64096+ }
64097 /*
64098 * Special case - lookup gave negative, but... we had foo/bar/
64099 * From the vfs_mknod() POV we just have a negative dentry -
64100@@ -3383,6 +3478,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
64101 }
64102 EXPORT_SYMBOL(user_path_create);
64103
64104+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
64105+{
64106+ struct filename *tmp = getname(pathname);
64107+ struct dentry *res;
64108+ if (IS_ERR(tmp))
64109+ return ERR_CAST(tmp);
64110+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
64111+ if (IS_ERR(res))
64112+ putname(tmp);
64113+ else
64114+ *to = tmp;
64115+ return res;
64116+}
64117+
64118 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
64119 {
64120 int error = may_create(dir, dentry);
64121@@ -3446,6 +3555,17 @@ retry:
64122
64123 if (!IS_POSIXACL(path.dentry->d_inode))
64124 mode &= ~current_umask();
64125+
64126+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
64127+ error = -EPERM;
64128+ goto out;
64129+ }
64130+
64131+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
64132+ error = -EACCES;
64133+ goto out;
64134+ }
64135+
64136 error = security_path_mknod(&path, dentry, mode, dev);
64137 if (error)
64138 goto out;
64139@@ -3461,6 +3581,8 @@ retry:
64140 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
64141 break;
64142 }
64143+ if (!error)
64144+ gr_handle_create(dentry, path.mnt);
64145 out:
64146 done_path_create(&path, dentry);
64147 if (retry_estale(error, lookup_flags)) {
64148@@ -3515,9 +3637,16 @@ retry:
64149
64150 if (!IS_POSIXACL(path.dentry->d_inode))
64151 mode &= ~current_umask();
64152+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
64153+ error = -EACCES;
64154+ goto out;
64155+ }
64156 error = security_path_mkdir(&path, dentry, mode);
64157 if (!error)
64158 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
64159+ if (!error)
64160+ gr_handle_create(dentry, path.mnt);
64161+out:
64162 done_path_create(&path, dentry);
64163 if (retry_estale(error, lookup_flags)) {
64164 lookup_flags |= LOOKUP_REVAL;
64165@@ -3601,6 +3730,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
64166 struct filename *name;
64167 struct dentry *dentry;
64168 struct nameidata nd;
64169+ u64 saved_ino = 0;
64170+ dev_t saved_dev = 0;
64171 unsigned int lookup_flags = 0;
64172 retry:
64173 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64174@@ -3633,10 +3764,21 @@ retry:
64175 error = -ENOENT;
64176 goto exit3;
64177 }
64178+
64179+ saved_ino = gr_get_ino_from_dentry(dentry);
64180+ saved_dev = gr_get_dev_from_dentry(dentry);
64181+
64182+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
64183+ error = -EACCES;
64184+ goto exit3;
64185+ }
64186+
64187 error = security_path_rmdir(&nd.path, dentry);
64188 if (error)
64189 goto exit3;
64190 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
64191+ if (!error && (saved_dev || saved_ino))
64192+ gr_handle_delete(saved_ino, saved_dev);
64193 exit3:
64194 dput(dentry);
64195 exit2:
64196@@ -3729,6 +3871,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
64197 struct nameidata nd;
64198 struct inode *inode = NULL;
64199 struct inode *delegated_inode = NULL;
64200+ u64 saved_ino = 0;
64201+ dev_t saved_dev = 0;
64202 unsigned int lookup_flags = 0;
64203 retry:
64204 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64205@@ -3755,10 +3899,22 @@ retry_deleg:
64206 if (d_is_negative(dentry))
64207 goto slashes;
64208 ihold(inode);
64209+
64210+ if (inode->i_nlink <= 1) {
64211+ saved_ino = gr_get_ino_from_dentry(dentry);
64212+ saved_dev = gr_get_dev_from_dentry(dentry);
64213+ }
64214+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
64215+ error = -EACCES;
64216+ goto exit2;
64217+ }
64218+
64219 error = security_path_unlink(&nd.path, dentry);
64220 if (error)
64221 goto exit2;
64222 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
64223+ if (!error && (saved_ino || saved_dev))
64224+ gr_handle_delete(saved_ino, saved_dev);
64225 exit2:
64226 dput(dentry);
64227 }
64228@@ -3847,9 +4003,17 @@ retry:
64229 if (IS_ERR(dentry))
64230 goto out_putname;
64231
64232+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
64233+ error = -EACCES;
64234+ goto out;
64235+ }
64236+
64237 error = security_path_symlink(&path, dentry, from->name);
64238 if (!error)
64239 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
64240+ if (!error)
64241+ gr_handle_create(dentry, path.mnt);
64242+out:
64243 done_path_create(&path, dentry);
64244 if (retry_estale(error, lookup_flags)) {
64245 lookup_flags |= LOOKUP_REVAL;
64246@@ -3953,6 +4117,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
64247 struct dentry *new_dentry;
64248 struct path old_path, new_path;
64249 struct inode *delegated_inode = NULL;
64250+ struct filename *to = NULL;
64251 int how = 0;
64252 int error;
64253
64254@@ -3976,7 +4141,7 @@ retry:
64255 if (error)
64256 return error;
64257
64258- new_dentry = user_path_create(newdfd, newname, &new_path,
64259+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
64260 (how & LOOKUP_REVAL));
64261 error = PTR_ERR(new_dentry);
64262 if (IS_ERR(new_dentry))
64263@@ -3988,11 +4153,28 @@ retry:
64264 error = may_linkat(&old_path);
64265 if (unlikely(error))
64266 goto out_dput;
64267+
64268+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
64269+ old_path.dentry->d_inode,
64270+ old_path.dentry->d_inode->i_mode, to)) {
64271+ error = -EACCES;
64272+ goto out_dput;
64273+ }
64274+
64275+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
64276+ old_path.dentry, old_path.mnt, to)) {
64277+ error = -EACCES;
64278+ goto out_dput;
64279+ }
64280+
64281 error = security_path_link(old_path.dentry, &new_path, new_dentry);
64282 if (error)
64283 goto out_dput;
64284 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
64285+ if (!error)
64286+ gr_handle_create(new_dentry, new_path.mnt);
64287 out_dput:
64288+ putname(to);
64289 done_path_create(&new_path, new_dentry);
64290 if (delegated_inode) {
64291 error = break_deleg_wait(&delegated_inode);
64292@@ -4308,6 +4490,20 @@ retry_deleg:
64293 if (new_dentry == trap)
64294 goto exit5;
64295
64296+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
64297+ /* use EXDEV error to cause 'mv' to switch to an alternative
64298+ * method for usability
64299+ */
64300+ error = -EXDEV;
64301+ goto exit5;
64302+ }
64303+
64304+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
64305+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
64306+ to, flags);
64307+ if (error)
64308+ goto exit5;
64309+
64310 error = security_path_rename(&oldnd.path, old_dentry,
64311 &newnd.path, new_dentry, flags);
64312 if (error)
64313@@ -4315,6 +4511,9 @@ retry_deleg:
64314 error = vfs_rename(old_dir->d_inode, old_dentry,
64315 new_dir->d_inode, new_dentry,
64316 &delegated_inode, flags);
64317+ if (!error)
64318+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
64319+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
64320 exit5:
64321 dput(new_dentry);
64322 exit4:
64323@@ -4371,14 +4570,24 @@ EXPORT_SYMBOL(vfs_whiteout);
64324
64325 int readlink_copy(char __user *buffer, int buflen, const char *link)
64326 {
64327+ char tmpbuf[64];
64328+ const char *newlink;
64329 int len = PTR_ERR(link);
64330+
64331 if (IS_ERR(link))
64332 goto out;
64333
64334 len = strlen(link);
64335 if (len > (unsigned) buflen)
64336 len = buflen;
64337- if (copy_to_user(buffer, link, len))
64338+
64339+ if (len < sizeof(tmpbuf)) {
64340+ memcpy(tmpbuf, link, len);
64341+ newlink = tmpbuf;
64342+ } else
64343+ newlink = link;
64344+
64345+ if (copy_to_user(buffer, newlink, len))
64346 len = -EFAULT;
64347 out:
64348 return len;
64349diff --git a/fs/namespace.c b/fs/namespace.c
64350index cd1e968..e64ff16 100644
64351--- a/fs/namespace.c
64352+++ b/fs/namespace.c
64353@@ -1448,6 +1448,9 @@ static int do_umount(struct mount *mnt, int flags)
64354 if (!(sb->s_flags & MS_RDONLY))
64355 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
64356 up_write(&sb->s_umount);
64357+
64358+ gr_log_remount(mnt->mnt_devname, retval);
64359+
64360 return retval;
64361 }
64362
64363@@ -1470,6 +1473,9 @@ static int do_umount(struct mount *mnt, int flags)
64364 }
64365 unlock_mount_hash();
64366 namespace_unlock();
64367+
64368+ gr_log_unmount(mnt->mnt_devname, retval);
64369+
64370 return retval;
64371 }
64372
64373@@ -1520,7 +1526,7 @@ static inline bool may_mount(void)
64374 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
64375 */
64376
64377-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
64378+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
64379 {
64380 struct path path;
64381 struct mount *mnt;
64382@@ -1565,7 +1571,7 @@ out:
64383 /*
64384 * The 2.0 compatible umount. No flags.
64385 */
64386-SYSCALL_DEFINE1(oldumount, char __user *, name)
64387+SYSCALL_DEFINE1(oldumount, const char __user *, name)
64388 {
64389 return sys_umount(name, 0);
64390 }
64391@@ -2631,6 +2637,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64392 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
64393 MS_STRICTATIME);
64394
64395+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
64396+ retval = -EPERM;
64397+ goto dput_out;
64398+ }
64399+
64400+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
64401+ retval = -EPERM;
64402+ goto dput_out;
64403+ }
64404+
64405 if (flags & MS_REMOUNT)
64406 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
64407 data_page);
64408@@ -2644,7 +2660,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64409 retval = do_new_mount(&path, type_page, flags, mnt_flags,
64410 dev_name, data_page);
64411 dput_out:
64412+ gr_log_mount(dev_name, &path, retval);
64413+
64414 path_put(&path);
64415+
64416 return retval;
64417 }
64418
64419@@ -2662,7 +2681,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
64420 * number incrementing at 10Ghz will take 12,427 years to wrap which
64421 * is effectively never, so we can ignore the possibility.
64422 */
64423-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
64424+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
64425
64426 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64427 {
64428@@ -2678,7 +2697,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64429 return ERR_PTR(ret);
64430 }
64431 new_ns->ns.ops = &mntns_operations;
64432- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
64433+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
64434 atomic_set(&new_ns->count, 1);
64435 new_ns->root = NULL;
64436 INIT_LIST_HEAD(&new_ns->list);
64437@@ -2688,7 +2707,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64438 return new_ns;
64439 }
64440
64441-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64442+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64443 struct user_namespace *user_ns, struct fs_struct *new_fs)
64444 {
64445 struct mnt_namespace *new_ns;
64446@@ -2809,8 +2828,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
64447 }
64448 EXPORT_SYMBOL(mount_subtree);
64449
64450-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
64451- char __user *, type, unsigned long, flags, void __user *, data)
64452+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
64453+ const char __user *, type, unsigned long, flags, void __user *, data)
64454 {
64455 int ret;
64456 char *kernel_type;
64457@@ -2916,6 +2935,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
64458 if (error)
64459 goto out2;
64460
64461+ if (gr_handle_chroot_pivot()) {
64462+ error = -EPERM;
64463+ goto out2;
64464+ }
64465+
64466 get_fs_root(current->fs, &root);
64467 old_mp = lock_mount(&old);
64468 error = PTR_ERR(old_mp);
64469@@ -3190,7 +3214,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
64470 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
64471 return -EPERM;
64472
64473- if (fs->users != 1)
64474+ if (atomic_read(&fs->users) != 1)
64475 return -EINVAL;
64476
64477 get_mnt_ns(mnt_ns);
64478diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
64479index 02f8d09..a5c25d1 100644
64480--- a/fs/nfs/callback_xdr.c
64481+++ b/fs/nfs/callback_xdr.c
64482@@ -51,7 +51,7 @@ struct callback_op {
64483 callback_decode_arg_t decode_args;
64484 callback_encode_res_t encode_res;
64485 long res_maxsize;
64486-};
64487+} __do_const;
64488
64489 static struct callback_op callback_ops[];
64490
64491diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
64492index 2211f6b..30d0950 100644
64493--- a/fs/nfs/inode.c
64494+++ b/fs/nfs/inode.c
64495@@ -1234,16 +1234,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
64496 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
64497 }
64498
64499-static atomic_long_t nfs_attr_generation_counter;
64500+static atomic_long_unchecked_t nfs_attr_generation_counter;
64501
64502 static unsigned long nfs_read_attr_generation_counter(void)
64503 {
64504- return atomic_long_read(&nfs_attr_generation_counter);
64505+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
64506 }
64507
64508 unsigned long nfs_inc_attr_generation_counter(void)
64509 {
64510- return atomic_long_inc_return(&nfs_attr_generation_counter);
64511+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
64512 }
64513
64514 void nfs_fattr_init(struct nfs_fattr *fattr)
64515diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
64516index ac71d13..a2e590a 100644
64517--- a/fs/nfsd/nfs4proc.c
64518+++ b/fs/nfsd/nfs4proc.c
64519@@ -1237,7 +1237,7 @@ struct nfsd4_operation {
64520 nfsd4op_rsize op_rsize_bop;
64521 stateid_getter op_get_currentstateid;
64522 stateid_setter op_set_currentstateid;
64523-};
64524+} __do_const;
64525
64526 static struct nfsd4_operation nfsd4_ops[];
64527
64528diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
64529index 15f7b73..00e230b 100644
64530--- a/fs/nfsd/nfs4xdr.c
64531+++ b/fs/nfsd/nfs4xdr.c
64532@@ -1560,7 +1560,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
64533
64534 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
64535
64536-static nfsd4_dec nfsd4_dec_ops[] = {
64537+static const nfsd4_dec nfsd4_dec_ops[] = {
64538 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
64539 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
64540 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
64541diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
64542index 83a9694..6b7f928 100644
64543--- a/fs/nfsd/nfscache.c
64544+++ b/fs/nfsd/nfscache.c
64545@@ -537,7 +537,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64546 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
64547 u32 hash;
64548 struct nfsd_drc_bucket *b;
64549- int len;
64550+ long len;
64551 size_t bufsize = 0;
64552
64553 if (!rp)
64554@@ -546,11 +546,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64555 hash = nfsd_cache_hash(rp->c_xid);
64556 b = &drc_hashtbl[hash];
64557
64558- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
64559- len >>= 2;
64560+ if (statp) {
64561+ len = (char*)statp - (char*)resv->iov_base;
64562+ len = resv->iov_len - len;
64563+ len >>= 2;
64564+ }
64565
64566 /* Don't cache excessive amounts of data and XDR failures */
64567- if (!statp || len > (256 >> 2)) {
64568+ if (!statp || len > (256 >> 2) || len < 0) {
64569 nfsd_reply_cache_free(b, rp);
64570 return;
64571 }
64572@@ -558,7 +561,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64573 switch (cachetype) {
64574 case RC_REPLSTAT:
64575 if (len != 1)
64576- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
64577+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
64578 rp->c_replstat = *statp;
64579 break;
64580 case RC_REPLBUFF:
64581diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
64582index 5685c67..73029ef 100644
64583--- a/fs/nfsd/vfs.c
64584+++ b/fs/nfsd/vfs.c
64585@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
64586
64587 oldfs = get_fs();
64588 set_fs(KERNEL_DS);
64589- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
64590+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
64591 set_fs(oldfs);
64592 return nfsd_finish_read(file, count, host_err);
64593 }
64594@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
64595
64596 /* Write the data. */
64597 oldfs = get_fs(); set_fs(KERNEL_DS);
64598- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
64599+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
64600 set_fs(oldfs);
64601 if (host_err < 0)
64602 goto out_nfserr;
64603@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
64604 */
64605
64606 oldfs = get_fs(); set_fs(KERNEL_DS);
64607- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
64608+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
64609 set_fs(oldfs);
64610
64611 if (host_err < 0)
64612diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
64613index 52ccd34..7a6b202 100644
64614--- a/fs/nls/nls_base.c
64615+++ b/fs/nls/nls_base.c
64616@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
64617
64618 int __register_nls(struct nls_table *nls, struct module *owner)
64619 {
64620- struct nls_table ** tmp = &tables;
64621+ struct nls_table *tmp = tables;
64622
64623 if (nls->next)
64624 return -EBUSY;
64625
64626- nls->owner = owner;
64627+ pax_open_kernel();
64628+ *(void **)&nls->owner = owner;
64629+ pax_close_kernel();
64630 spin_lock(&nls_lock);
64631- while (*tmp) {
64632- if (nls == *tmp) {
64633+ while (tmp) {
64634+ if (nls == tmp) {
64635 spin_unlock(&nls_lock);
64636 return -EBUSY;
64637 }
64638- tmp = &(*tmp)->next;
64639+ tmp = tmp->next;
64640 }
64641- nls->next = tables;
64642+ pax_open_kernel();
64643+ *(struct nls_table **)&nls->next = tables;
64644+ pax_close_kernel();
64645 tables = nls;
64646 spin_unlock(&nls_lock);
64647 return 0;
64648@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
64649
64650 int unregister_nls(struct nls_table * nls)
64651 {
64652- struct nls_table ** tmp = &tables;
64653+ struct nls_table * const * tmp = &tables;
64654
64655 spin_lock(&nls_lock);
64656 while (*tmp) {
64657 if (nls == *tmp) {
64658- *tmp = nls->next;
64659+ pax_open_kernel();
64660+ *(struct nls_table **)tmp = nls->next;
64661+ pax_close_kernel();
64662 spin_unlock(&nls_lock);
64663 return 0;
64664 }
64665@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
64666 return -EINVAL;
64667 }
64668
64669-static struct nls_table *find_nls(char *charset)
64670+static struct nls_table *find_nls(const char *charset)
64671 {
64672 struct nls_table *nls;
64673 spin_lock(&nls_lock);
64674@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
64675 return nls;
64676 }
64677
64678-struct nls_table *load_nls(char *charset)
64679+struct nls_table *load_nls(const char *charset)
64680 {
64681 return try_then_request_module(find_nls(charset), "nls_%s", charset);
64682 }
64683diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
64684index 162b3f1..6076a7c 100644
64685--- a/fs/nls/nls_euc-jp.c
64686+++ b/fs/nls/nls_euc-jp.c
64687@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
64688 p_nls = load_nls("cp932");
64689
64690 if (p_nls) {
64691- table.charset2upper = p_nls->charset2upper;
64692- table.charset2lower = p_nls->charset2lower;
64693+ pax_open_kernel();
64694+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64695+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64696+ pax_close_kernel();
64697 return register_nls(&table);
64698 }
64699
64700diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
64701index a80a741..7b96e1b 100644
64702--- a/fs/nls/nls_koi8-ru.c
64703+++ b/fs/nls/nls_koi8-ru.c
64704@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
64705 p_nls = load_nls("koi8-u");
64706
64707 if (p_nls) {
64708- table.charset2upper = p_nls->charset2upper;
64709- table.charset2lower = p_nls->charset2lower;
64710+ pax_open_kernel();
64711+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64712+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64713+ pax_close_kernel();
64714 return register_nls(&table);
64715 }
64716
64717diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
64718index bff8567..83281c6 100644
64719--- a/fs/notify/fanotify/fanotify_user.c
64720+++ b/fs/notify/fanotify/fanotify_user.c
64721@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
64722
64723 fd = fanotify_event_metadata.fd;
64724 ret = -EFAULT;
64725- if (copy_to_user(buf, &fanotify_event_metadata,
64726- fanotify_event_metadata.event_len))
64727+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
64728+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
64729 goto out_close_fd;
64730
64731 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
64732diff --git a/fs/notify/notification.c b/fs/notify/notification.c
64733index a95d8e0..a91a5fd 100644
64734--- a/fs/notify/notification.c
64735+++ b/fs/notify/notification.c
64736@@ -48,7 +48,7 @@
64737 #include <linux/fsnotify_backend.h>
64738 #include "fsnotify.h"
64739
64740-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64741+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64742
64743 /**
64744 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
64745@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64746 */
64747 u32 fsnotify_get_cookie(void)
64748 {
64749- return atomic_inc_return(&fsnotify_sync_cookie);
64750+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
64751 }
64752 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
64753
64754diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
64755index 9e38daf..5727cae 100644
64756--- a/fs/ntfs/dir.c
64757+++ b/fs/ntfs/dir.c
64758@@ -1310,7 +1310,7 @@ find_next_index_buffer:
64759 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
64760 ~(s64)(ndir->itype.index.block_size - 1)));
64761 /* Bounds checks. */
64762- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
64763+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
64764 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
64765 "inode 0x%lx or driver bug.", vdir->i_ino);
64766 goto err_out;
64767diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
64768index 643faa4..ef9027e 100644
64769--- a/fs/ntfs/file.c
64770+++ b/fs/ntfs/file.c
64771@@ -1280,7 +1280,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
64772 char *addr;
64773 size_t total = 0;
64774 unsigned len;
64775- int left;
64776+ unsigned left;
64777
64778 do {
64779 len = PAGE_CACHE_SIZE - ofs;
64780diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
64781index 9e1e112..241a52a 100644
64782--- a/fs/ntfs/super.c
64783+++ b/fs/ntfs/super.c
64784@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64785 if (!silent)
64786 ntfs_error(sb, "Primary boot sector is invalid.");
64787 } else if (!silent)
64788- ntfs_error(sb, read_err_str, "primary");
64789+ ntfs_error(sb, read_err_str, "%s", "primary");
64790 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
64791 if (bh_primary)
64792 brelse(bh_primary);
64793@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64794 goto hotfix_primary_boot_sector;
64795 brelse(bh_backup);
64796 } else if (!silent)
64797- ntfs_error(sb, read_err_str, "backup");
64798+ ntfs_error(sb, read_err_str, "%s", "backup");
64799 /* Try to read NT3.51- backup boot sector. */
64800 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
64801 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
64802@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64803 "sector.");
64804 brelse(bh_backup);
64805 } else if (!silent)
64806- ntfs_error(sb, read_err_str, "backup");
64807+ ntfs_error(sb, read_err_str, "%s", "backup");
64808 /* We failed. Cleanup and return. */
64809 if (bh_primary)
64810 brelse(bh_primary);
64811diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
64812index 0440134..d52c93a 100644
64813--- a/fs/ocfs2/localalloc.c
64814+++ b/fs/ocfs2/localalloc.c
64815@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
64816 goto bail;
64817 }
64818
64819- atomic_inc(&osb->alloc_stats.moves);
64820+ atomic_inc_unchecked(&osb->alloc_stats.moves);
64821
64822 bail:
64823 if (handle)
64824diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
64825index 7d6b7d0..5fb529a 100644
64826--- a/fs/ocfs2/ocfs2.h
64827+++ b/fs/ocfs2/ocfs2.h
64828@@ -242,11 +242,11 @@ enum ocfs2_vol_state
64829
64830 struct ocfs2_alloc_stats
64831 {
64832- atomic_t moves;
64833- atomic_t local_data;
64834- atomic_t bitmap_data;
64835- atomic_t bg_allocs;
64836- atomic_t bg_extends;
64837+ atomic_unchecked_t moves;
64838+ atomic_unchecked_t local_data;
64839+ atomic_unchecked_t bitmap_data;
64840+ atomic_unchecked_t bg_allocs;
64841+ atomic_unchecked_t bg_extends;
64842 };
64843
64844 enum ocfs2_local_alloc_state
64845diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
64846index 0cb889a..6a26b24 100644
64847--- a/fs/ocfs2/suballoc.c
64848+++ b/fs/ocfs2/suballoc.c
64849@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
64850 mlog_errno(status);
64851 goto bail;
64852 }
64853- atomic_inc(&osb->alloc_stats.bg_extends);
64854+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
64855
64856 /* You should never ask for this much metadata */
64857 BUG_ON(bits_wanted >
64858@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
64859 mlog_errno(status);
64860 goto bail;
64861 }
64862- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64863+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64864
64865 *suballoc_loc = res.sr_bg_blkno;
64866 *suballoc_bit_start = res.sr_bit_offset;
64867@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
64868 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
64869 res->sr_bits);
64870
64871- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64872+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64873
64874 BUG_ON(res->sr_bits != 1);
64875
64876@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
64877 mlog_errno(status);
64878 goto bail;
64879 }
64880- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64881+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
64882
64883 BUG_ON(res.sr_bits != 1);
64884
64885@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
64886 cluster_start,
64887 num_clusters);
64888 if (!status)
64889- atomic_inc(&osb->alloc_stats.local_data);
64890+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
64891 } else {
64892 if (min_clusters > (osb->bitmap_cpg - 1)) {
64893 /* The only paths asking for contiguousness
64894@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
64895 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
64896 res.sr_bg_blkno,
64897 res.sr_bit_offset);
64898- atomic_inc(&osb->alloc_stats.bitmap_data);
64899+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
64900 *num_clusters = res.sr_bits;
64901 }
64902 }
64903diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
64904index 8372317..ec86e79 100644
64905--- a/fs/ocfs2/super.c
64906+++ b/fs/ocfs2/super.c
64907@@ -306,11 +306,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
64908 "%10s => GlobalAllocs: %d LocalAllocs: %d "
64909 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
64910 "Stats",
64911- atomic_read(&osb->alloc_stats.bitmap_data),
64912- atomic_read(&osb->alloc_stats.local_data),
64913- atomic_read(&osb->alloc_stats.bg_allocs),
64914- atomic_read(&osb->alloc_stats.moves),
64915- atomic_read(&osb->alloc_stats.bg_extends));
64916+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
64917+ atomic_read_unchecked(&osb->alloc_stats.local_data),
64918+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
64919+ atomic_read_unchecked(&osb->alloc_stats.moves),
64920+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
64921
64922 out += snprintf(buf + out, len - out,
64923 "%10s => State: %u Descriptor: %llu Size: %u bits "
64924@@ -2113,11 +2113,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
64925
64926 mutex_init(&osb->system_file_mutex);
64927
64928- atomic_set(&osb->alloc_stats.moves, 0);
64929- atomic_set(&osb->alloc_stats.local_data, 0);
64930- atomic_set(&osb->alloc_stats.bitmap_data, 0);
64931- atomic_set(&osb->alloc_stats.bg_allocs, 0);
64932- atomic_set(&osb->alloc_stats.bg_extends, 0);
64933+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
64934+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
64935+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
64936+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
64937+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
64938
64939 /* Copy the blockcheck stats from the superblock probe */
64940 osb->osb_ecc_stats = *stats;
64941diff --git a/fs/open.c b/fs/open.c
64942index 813be03..781941d 100644
64943--- a/fs/open.c
64944+++ b/fs/open.c
64945@@ -32,6 +32,8 @@
64946 #include <linux/dnotify.h>
64947 #include <linux/compat.h>
64948
64949+#define CREATE_TRACE_POINTS
64950+#include <trace/events/fs.h>
64951 #include "internal.h"
64952
64953 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
64954@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
64955 error = locks_verify_truncate(inode, NULL, length);
64956 if (!error)
64957 error = security_path_truncate(path);
64958+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
64959+ error = -EACCES;
64960 if (!error)
64961 error = do_truncate(path->dentry, length, 0, NULL);
64962
64963@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
64964 error = locks_verify_truncate(inode, f.file, length);
64965 if (!error)
64966 error = security_path_truncate(&f.file->f_path);
64967+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
64968+ error = -EACCES;
64969 if (!error)
64970 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
64971 sb_end_write(inode->i_sb);
64972@@ -392,6 +398,9 @@ retry:
64973 if (__mnt_is_readonly(path.mnt))
64974 res = -EROFS;
64975
64976+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
64977+ res = -EACCES;
64978+
64979 out_path_release:
64980 path_put(&path);
64981 if (retry_estale(res, lookup_flags)) {
64982@@ -423,6 +432,8 @@ retry:
64983 if (error)
64984 goto dput_and_out;
64985
64986+ gr_log_chdir(path.dentry, path.mnt);
64987+
64988 set_fs_pwd(current->fs, &path);
64989
64990 dput_and_out:
64991@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
64992 goto out_putf;
64993
64994 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
64995+
64996+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
64997+ error = -EPERM;
64998+
64999+ if (!error)
65000+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
65001+
65002 if (!error)
65003 set_fs_pwd(current->fs, &f.file->f_path);
65004 out_putf:
65005@@ -481,7 +499,13 @@ retry:
65006 if (error)
65007 goto dput_and_out;
65008
65009+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
65010+ goto dput_and_out;
65011+
65012 set_fs_root(current->fs, &path);
65013+
65014+ gr_handle_chroot_chdir(&path);
65015+
65016 error = 0;
65017 dput_and_out:
65018 path_put(&path);
65019@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
65020 return error;
65021 retry_deleg:
65022 mutex_lock(&inode->i_mutex);
65023+
65024+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
65025+ error = -EACCES;
65026+ goto out_unlock;
65027+ }
65028+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
65029+ error = -EACCES;
65030+ goto out_unlock;
65031+ }
65032+
65033 error = security_path_chmod(path, mode);
65034 if (error)
65035 goto out_unlock;
65036@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
65037 uid = make_kuid(current_user_ns(), user);
65038 gid = make_kgid(current_user_ns(), group);
65039
65040+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
65041+ return -EACCES;
65042+
65043 newattrs.ia_valid = ATTR_CTIME;
65044 if (user != (uid_t) -1) {
65045 if (!uid_valid(uid))
65046@@ -1014,6 +1051,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
65047 } else {
65048 fsnotify_open(f);
65049 fd_install(fd, f);
65050+ trace_do_sys_open(tmp->name, flags, mode);
65051 }
65052 }
65053 putname(tmp);
65054diff --git a/fs/pipe.c b/fs/pipe.c
65055index 21981e5..3d5f55c 100644
65056--- a/fs/pipe.c
65057+++ b/fs/pipe.c
65058@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
65059
65060 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65061 {
65062- if (pipe->files)
65063+ if (atomic_read(&pipe->files))
65064 mutex_lock_nested(&pipe->mutex, subclass);
65065 }
65066
65067@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
65068
65069 void pipe_unlock(struct pipe_inode_info *pipe)
65070 {
65071- if (pipe->files)
65072+ if (atomic_read(&pipe->files))
65073 mutex_unlock(&pipe->mutex);
65074 }
65075 EXPORT_SYMBOL(pipe_unlock);
65076@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
65077 }
65078 if (bufs) /* More to do? */
65079 continue;
65080- if (!pipe->writers)
65081+ if (!atomic_read(&pipe->writers))
65082 break;
65083- if (!pipe->waiting_writers) {
65084+ if (!atomic_read(&pipe->waiting_writers)) {
65085 /* syscall merging: Usually we must not sleep
65086 * if O_NONBLOCK is set, or if we got some data.
65087 * But if a writer sleeps in kernel space, then
65088@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65089
65090 __pipe_lock(pipe);
65091
65092- if (!pipe->readers) {
65093+ if (!atomic_read(&pipe->readers)) {
65094 send_sig(SIGPIPE, current, 0);
65095 ret = -EPIPE;
65096 goto out;
65097@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65098 for (;;) {
65099 int bufs;
65100
65101- if (!pipe->readers) {
65102+ if (!atomic_read(&pipe->readers)) {
65103 send_sig(SIGPIPE, current, 0);
65104 if (!ret)
65105 ret = -EPIPE;
65106@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65107 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65108 do_wakeup = 0;
65109 }
65110- pipe->waiting_writers++;
65111+ atomic_inc(&pipe->waiting_writers);
65112 pipe_wait(pipe);
65113- pipe->waiting_writers--;
65114+ atomic_dec(&pipe->waiting_writers);
65115 }
65116 out:
65117 __pipe_unlock(pipe);
65118@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65119 mask = 0;
65120 if (filp->f_mode & FMODE_READ) {
65121 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
65122- if (!pipe->writers && filp->f_version != pipe->w_counter)
65123+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
65124 mask |= POLLHUP;
65125 }
65126
65127@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65128 * Most Unices do not set POLLERR for FIFOs but on Linux they
65129 * behave exactly like pipes for poll().
65130 */
65131- if (!pipe->readers)
65132+ if (!atomic_read(&pipe->readers))
65133 mask |= POLLERR;
65134 }
65135
65136@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
65137 int kill = 0;
65138
65139 spin_lock(&inode->i_lock);
65140- if (!--pipe->files) {
65141+ if (atomic_dec_and_test(&pipe->files)) {
65142 inode->i_pipe = NULL;
65143 kill = 1;
65144 }
65145@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
65146
65147 __pipe_lock(pipe);
65148 if (file->f_mode & FMODE_READ)
65149- pipe->readers--;
65150+ atomic_dec(&pipe->readers);
65151 if (file->f_mode & FMODE_WRITE)
65152- pipe->writers--;
65153+ atomic_dec(&pipe->writers);
65154
65155- if (pipe->readers || pipe->writers) {
65156+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
65157 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
65158 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65159 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
65160@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
65161 kfree(pipe);
65162 }
65163
65164-static struct vfsmount *pipe_mnt __read_mostly;
65165+struct vfsmount *pipe_mnt __read_mostly;
65166
65167 /*
65168 * pipefs_dname() is called from d_path().
65169@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
65170 goto fail_iput;
65171
65172 inode->i_pipe = pipe;
65173- pipe->files = 2;
65174- pipe->readers = pipe->writers = 1;
65175+ atomic_set(&pipe->files, 2);
65176+ atomic_set(&pipe->readers, 1);
65177+ atomic_set(&pipe->writers, 1);
65178 inode->i_fop = &pipefifo_fops;
65179
65180 /*
65181@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
65182 spin_lock(&inode->i_lock);
65183 if (inode->i_pipe) {
65184 pipe = inode->i_pipe;
65185- pipe->files++;
65186+ atomic_inc(&pipe->files);
65187 spin_unlock(&inode->i_lock);
65188 } else {
65189 spin_unlock(&inode->i_lock);
65190 pipe = alloc_pipe_info();
65191 if (!pipe)
65192 return -ENOMEM;
65193- pipe->files = 1;
65194+ atomic_set(&pipe->files, 1);
65195 spin_lock(&inode->i_lock);
65196 if (unlikely(inode->i_pipe)) {
65197- inode->i_pipe->files++;
65198+ atomic_inc(&inode->i_pipe->files);
65199 spin_unlock(&inode->i_lock);
65200 free_pipe_info(pipe);
65201 pipe = inode->i_pipe;
65202@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
65203 * opened, even when there is no process writing the FIFO.
65204 */
65205 pipe->r_counter++;
65206- if (pipe->readers++ == 0)
65207+ if (atomic_inc_return(&pipe->readers) == 1)
65208 wake_up_partner(pipe);
65209
65210- if (!is_pipe && !pipe->writers) {
65211+ if (!is_pipe && !atomic_read(&pipe->writers)) {
65212 if ((filp->f_flags & O_NONBLOCK)) {
65213 /* suppress POLLHUP until we have
65214 * seen a writer */
65215@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
65216 * errno=ENXIO when there is no process reading the FIFO.
65217 */
65218 ret = -ENXIO;
65219- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
65220+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
65221 goto err;
65222
65223 pipe->w_counter++;
65224- if (!pipe->writers++)
65225+ if (atomic_inc_return(&pipe->writers) == 1)
65226 wake_up_partner(pipe);
65227
65228- if (!is_pipe && !pipe->readers) {
65229+ if (!is_pipe && !atomic_read(&pipe->readers)) {
65230 if (wait_for_partner(pipe, &pipe->r_counter))
65231 goto err_wr;
65232 }
65233@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
65234 * the process can at least talk to itself.
65235 */
65236
65237- pipe->readers++;
65238- pipe->writers++;
65239+ atomic_inc(&pipe->readers);
65240+ atomic_inc(&pipe->writers);
65241 pipe->r_counter++;
65242 pipe->w_counter++;
65243- if (pipe->readers == 1 || pipe->writers == 1)
65244+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
65245 wake_up_partner(pipe);
65246 break;
65247
65248@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
65249 return 0;
65250
65251 err_rd:
65252- if (!--pipe->readers)
65253+ if (atomic_dec_and_test(&pipe->readers))
65254 wake_up_interruptible(&pipe->wait);
65255 ret = -ERESTARTSYS;
65256 goto err;
65257
65258 err_wr:
65259- if (!--pipe->writers)
65260+ if (atomic_dec_and_test(&pipe->writers))
65261 wake_up_interruptible(&pipe->wait);
65262 ret = -ERESTARTSYS;
65263 goto err;
65264diff --git a/fs/posix_acl.c b/fs/posix_acl.c
65265index 0855f77..6787d50 100644
65266--- a/fs/posix_acl.c
65267+++ b/fs/posix_acl.c
65268@@ -20,6 +20,7 @@
65269 #include <linux/xattr.h>
65270 #include <linux/export.h>
65271 #include <linux/user_namespace.h>
65272+#include <linux/grsecurity.h>
65273
65274 struct posix_acl **acl_by_type(struct inode *inode, int type)
65275 {
65276@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
65277 }
65278 }
65279 if (mode_p)
65280- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65281+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65282 return not_equiv;
65283 }
65284 EXPORT_SYMBOL(posix_acl_equiv_mode);
65285@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
65286 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
65287 }
65288
65289- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65290+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65291 return not_equiv;
65292 }
65293
65294@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
65295 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
65296 int err = -ENOMEM;
65297 if (clone) {
65298+ *mode_p &= ~gr_acl_umask();
65299+
65300 err = posix_acl_create_masq(clone, mode_p);
65301 if (err < 0) {
65302 posix_acl_release(clone);
65303@@ -659,11 +662,12 @@ struct posix_acl *
65304 posix_acl_from_xattr(struct user_namespace *user_ns,
65305 const void *value, size_t size)
65306 {
65307- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
65308- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
65309+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
65310+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
65311 int count;
65312 struct posix_acl *acl;
65313 struct posix_acl_entry *acl_e;
65314+ umode_t umask = gr_acl_umask();
65315
65316 if (!value)
65317 return NULL;
65318@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65319
65320 switch(acl_e->e_tag) {
65321 case ACL_USER_OBJ:
65322+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65323+ break;
65324 case ACL_GROUP_OBJ:
65325 case ACL_MASK:
65326+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65327+ break;
65328 case ACL_OTHER:
65329+ acl_e->e_perm &= ~(umask & S_IRWXO);
65330 break;
65331
65332 case ACL_USER:
65333+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65334 acl_e->e_uid =
65335 make_kuid(user_ns,
65336 le32_to_cpu(entry->e_id));
65337@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65338 goto fail;
65339 break;
65340 case ACL_GROUP:
65341+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65342 acl_e->e_gid =
65343 make_kgid(user_ns,
65344 le32_to_cpu(entry->e_id));
65345diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
65346index 2183fcf..3c32a98 100644
65347--- a/fs/proc/Kconfig
65348+++ b/fs/proc/Kconfig
65349@@ -30,7 +30,7 @@ config PROC_FS
65350
65351 config PROC_KCORE
65352 bool "/proc/kcore support" if !ARM
65353- depends on PROC_FS && MMU
65354+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
65355 help
65356 Provides a virtual ELF core file of the live kernel. This can
65357 be read with gdb and other ELF tools. No modifications can be
65358@@ -38,8 +38,8 @@ config PROC_KCORE
65359
65360 config PROC_VMCORE
65361 bool "/proc/vmcore support"
65362- depends on PROC_FS && CRASH_DUMP
65363- default y
65364+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
65365+ default n
65366 help
65367 Exports the dump image of crashed kernel in ELF format.
65368
65369@@ -63,8 +63,8 @@ config PROC_SYSCTL
65370 limited in memory.
65371
65372 config PROC_PAGE_MONITOR
65373- default y
65374- depends on PROC_FS && MMU
65375+ default n
65376+ depends on PROC_FS && MMU && !GRKERNSEC
65377 bool "Enable /proc page monitoring" if EXPERT
65378 help
65379 Various /proc files exist to monitor process memory utilization:
65380diff --git a/fs/proc/array.c b/fs/proc/array.c
65381index bd117d0..e6872d7 100644
65382--- a/fs/proc/array.c
65383+++ b/fs/proc/array.c
65384@@ -60,6 +60,7 @@
65385 #include <linux/tty.h>
65386 #include <linux/string.h>
65387 #include <linux/mman.h>
65388+#include <linux/grsecurity.h>
65389 #include <linux/proc_fs.h>
65390 #include <linux/ioport.h>
65391 #include <linux/uaccess.h>
65392@@ -344,6 +345,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
65393 seq_putc(m, '\n');
65394 }
65395
65396+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65397+static inline void task_pax(struct seq_file *m, struct task_struct *p)
65398+{
65399+ if (p->mm)
65400+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
65401+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
65402+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
65403+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
65404+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
65405+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
65406+ else
65407+ seq_printf(m, "PaX:\t-----\n");
65408+}
65409+#endif
65410+
65411 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65412 struct pid *pid, struct task_struct *task)
65413 {
65414@@ -362,9 +378,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65415 task_cpus_allowed(m, task);
65416 cpuset_task_status_allowed(m, task);
65417 task_context_switch_counts(m, task);
65418+
65419+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65420+ task_pax(m, task);
65421+#endif
65422+
65423+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
65424+ task_grsec_rbac(m, task);
65425+#endif
65426+
65427 return 0;
65428 }
65429
65430+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65431+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65432+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65433+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65434+#endif
65435+
65436 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65437 struct pid *pid, struct task_struct *task, int whole)
65438 {
65439@@ -386,6 +417,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65440 char tcomm[sizeof(task->comm)];
65441 unsigned long flags;
65442
65443+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65444+ if (current->exec_id != m->exec_id) {
65445+ gr_log_badprocpid("stat");
65446+ return 0;
65447+ }
65448+#endif
65449+
65450 state = *get_task_state(task);
65451 vsize = eip = esp = 0;
65452 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65453@@ -456,6 +494,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65454 gtime = task_gtime(task);
65455 }
65456
65457+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65458+ if (PAX_RAND_FLAGS(mm)) {
65459+ eip = 0;
65460+ esp = 0;
65461+ wchan = 0;
65462+ }
65463+#endif
65464+#ifdef CONFIG_GRKERNSEC_HIDESYM
65465+ wchan = 0;
65466+ eip =0;
65467+ esp =0;
65468+#endif
65469+
65470 /* scale priority and nice values from timeslices to -20..20 */
65471 /* to make it look like a "normal" Unix priority/nice value */
65472 priority = task_prio(task);
65473@@ -487,9 +538,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65474 seq_put_decimal_ull(m, ' ', vsize);
65475 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
65476 seq_put_decimal_ull(m, ' ', rsslim);
65477+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65478+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
65479+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
65480+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
65481+#else
65482 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
65483 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
65484 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
65485+#endif
65486 seq_put_decimal_ull(m, ' ', esp);
65487 seq_put_decimal_ull(m, ' ', eip);
65488 /* The signal information here is obsolete.
65489@@ -511,7 +568,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65490 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
65491 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
65492
65493- if (mm && permitted) {
65494+ if (mm && permitted
65495+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65496+ && !PAX_RAND_FLAGS(mm)
65497+#endif
65498+ ) {
65499 seq_put_decimal_ull(m, ' ', mm->start_data);
65500 seq_put_decimal_ull(m, ' ', mm->end_data);
65501 seq_put_decimal_ull(m, ' ', mm->start_brk);
65502@@ -549,8 +610,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65503 struct pid *pid, struct task_struct *task)
65504 {
65505 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
65506- struct mm_struct *mm = get_task_mm(task);
65507+ struct mm_struct *mm;
65508
65509+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65510+ if (current->exec_id != m->exec_id) {
65511+ gr_log_badprocpid("statm");
65512+ return 0;
65513+ }
65514+#endif
65515+ mm = get_task_mm(task);
65516 if (mm) {
65517 size = task_statm(mm, &shared, &text, &data, &resident);
65518 mmput(mm);
65519@@ -573,6 +641,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65520 return 0;
65521 }
65522
65523+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65524+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
65525+{
65526+ unsigned long flags;
65527+ u32 curr_ip = 0;
65528+
65529+ if (lock_task_sighand(task, &flags)) {
65530+ curr_ip = task->signal->curr_ip;
65531+ unlock_task_sighand(task, &flags);
65532+ }
65533+ return seq_printf(m, "%pI4\n", &curr_ip);
65534+}
65535+#endif
65536+
65537 #ifdef CONFIG_CHECKPOINT_RESTORE
65538 static struct pid *
65539 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
65540diff --git a/fs/proc/base.c b/fs/proc/base.c
65541index 3f3d7ae..68de109 100644
65542--- a/fs/proc/base.c
65543+++ b/fs/proc/base.c
65544@@ -113,6 +113,14 @@ struct pid_entry {
65545 union proc_op op;
65546 };
65547
65548+struct getdents_callback {
65549+ struct linux_dirent __user * current_dir;
65550+ struct linux_dirent __user * previous;
65551+ struct file * file;
65552+ int count;
65553+ int error;
65554+};
65555+
65556 #define NOD(NAME, MODE, IOP, FOP, OP) { \
65557 .name = (NAME), \
65558 .len = sizeof(NAME) - 1, \
65559@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
65560 return 0;
65561 }
65562
65563+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65564+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65565+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65566+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65567+#endif
65568+
65569 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65570 struct pid *pid, struct task_struct *task)
65571 {
65572 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
65573 if (mm && !IS_ERR(mm)) {
65574 unsigned int nwords = 0;
65575+
65576+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65577+ /* allow if we're currently ptracing this task */
65578+ if (PAX_RAND_FLAGS(mm) &&
65579+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
65580+ mmput(mm);
65581+ return 0;
65582+ }
65583+#endif
65584+
65585 do {
65586 nwords += 2;
65587 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
65588@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65589 }
65590
65591
65592-#ifdef CONFIG_KALLSYMS
65593+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65594 /*
65595 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
65596 * Returns the resolved symbol. If that fails, simply return the address.
65597@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
65598 mutex_unlock(&task->signal->cred_guard_mutex);
65599 }
65600
65601-#ifdef CONFIG_STACKTRACE
65602+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65603
65604 #define MAX_STACK_TRACE_DEPTH 64
65605
65606@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
65607 return 0;
65608 }
65609
65610-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65611+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65612 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65613 struct pid *pid, struct task_struct *task)
65614 {
65615@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65616 /************************************************************************/
65617
65618 /* permission checks */
65619-static int proc_fd_access_allowed(struct inode *inode)
65620+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
65621 {
65622 struct task_struct *task;
65623 int allowed = 0;
65624@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
65625 */
65626 task = get_proc_task(inode);
65627 if (task) {
65628- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65629+ if (log)
65630+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65631+ else
65632+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65633 put_task_struct(task);
65634 }
65635 return allowed;
65636@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
65637 struct task_struct *task,
65638 int hide_pid_min)
65639 {
65640+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65641+ return false;
65642+
65643+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65644+ rcu_read_lock();
65645+ {
65646+ const struct cred *tmpcred = current_cred();
65647+ const struct cred *cred = __task_cred(task);
65648+
65649+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
65650+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65651+ || in_group_p(grsec_proc_gid)
65652+#endif
65653+ ) {
65654+ rcu_read_unlock();
65655+ return true;
65656+ }
65657+ }
65658+ rcu_read_unlock();
65659+
65660+ if (!pid->hide_pid)
65661+ return false;
65662+#endif
65663+
65664 if (pid->hide_pid < hide_pid_min)
65665 return true;
65666 if (in_group_p(pid->pid_gid))
65667 return true;
65668+
65669 return ptrace_may_access(task, PTRACE_MODE_READ);
65670 }
65671
65672@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
65673 put_task_struct(task);
65674
65675 if (!has_perms) {
65676+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65677+ {
65678+#else
65679 if (pid->hide_pid == 2) {
65680+#endif
65681 /*
65682 * Let's make getdents(), stat(), and open()
65683 * consistent with each other. If a process
65684@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
65685
65686 if (task) {
65687 mm = mm_access(task, mode);
65688+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
65689+ mmput(mm);
65690+ mm = ERR_PTR(-EPERM);
65691+ }
65692 put_task_struct(task);
65693
65694 if (!IS_ERR_OR_NULL(mm)) {
65695@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
65696 return PTR_ERR(mm);
65697
65698 file->private_data = mm;
65699+
65700+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65701+ file->f_version = current->exec_id;
65702+#endif
65703+
65704 return 0;
65705 }
65706
65707@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
65708 ssize_t copied;
65709 char *page;
65710
65711+#ifdef CONFIG_GRKERNSEC
65712+ if (write)
65713+ return -EPERM;
65714+#endif
65715+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65716+ if (file->f_version != current->exec_id) {
65717+ gr_log_badprocpid("mem");
65718+ return 0;
65719+ }
65720+#endif
65721+
65722 if (!mm)
65723 return 0;
65724
65725@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
65726 goto free;
65727
65728 while (count > 0) {
65729- int this_len = min_t(int, count, PAGE_SIZE);
65730+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
65731
65732 if (write && copy_from_user(page, buf, this_len)) {
65733 copied = -EFAULT;
65734@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
65735 if (!mm)
65736 return 0;
65737
65738+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65739+ if (file->f_version != current->exec_id) {
65740+ gr_log_badprocpid("environ");
65741+ return 0;
65742+ }
65743+#endif
65744+
65745 page = (char *)__get_free_page(GFP_TEMPORARY);
65746 if (!page)
65747 return -ENOMEM;
65748@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
65749 goto free;
65750 while (count > 0) {
65751 size_t this_len, max_len;
65752- int retval;
65753+ ssize_t retval;
65754
65755 if (src >= (mm->env_end - mm->env_start))
65756 break;
65757@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
65758 int error = -EACCES;
65759
65760 /* Are we allowed to snoop on the tasks file descriptors? */
65761- if (!proc_fd_access_allowed(inode))
65762+ if (!proc_fd_access_allowed(inode, 0))
65763 goto out;
65764
65765 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
65766@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
65767 struct path path;
65768
65769 /* Are we allowed to snoop on the tasks file descriptors? */
65770- if (!proc_fd_access_allowed(inode))
65771- goto out;
65772+ /* logging this is needed for learning on chromium to work properly,
65773+ but we don't want to flood the logs from 'ps' which does a readlink
65774+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
65775+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
65776+ */
65777+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
65778+ if (!proc_fd_access_allowed(inode,0))
65779+ goto out;
65780+ } else {
65781+ if (!proc_fd_access_allowed(inode,1))
65782+ goto out;
65783+ }
65784
65785 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
65786 if (error)
65787@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
65788 rcu_read_lock();
65789 cred = __task_cred(task);
65790 inode->i_uid = cred->euid;
65791+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65792+ inode->i_gid = grsec_proc_gid;
65793+#else
65794 inode->i_gid = cred->egid;
65795+#endif
65796 rcu_read_unlock();
65797 }
65798 security_task_to_inode(task, inode);
65799@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
65800 return -ENOENT;
65801 }
65802 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
65803+#ifdef CONFIG_GRKERNSEC_PROC_USER
65804+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
65805+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65806+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
65807+#endif
65808 task_dumpable(task)) {
65809 cred = __task_cred(task);
65810 stat->uid = cred->euid;
65811+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65812+ stat->gid = grsec_proc_gid;
65813+#else
65814 stat->gid = cred->egid;
65815+#endif
65816 }
65817 }
65818 rcu_read_unlock();
65819@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
65820
65821 if (task) {
65822 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
65823+#ifdef CONFIG_GRKERNSEC_PROC_USER
65824+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
65825+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65826+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
65827+#endif
65828 task_dumpable(task)) {
65829 rcu_read_lock();
65830 cred = __task_cred(task);
65831 inode->i_uid = cred->euid;
65832+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65833+ inode->i_gid = grsec_proc_gid;
65834+#else
65835 inode->i_gid = cred->egid;
65836+#endif
65837 rcu_read_unlock();
65838 } else {
65839 inode->i_uid = GLOBAL_ROOT_UID;
65840@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
65841 if (!task)
65842 goto out_no_task;
65843
65844+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65845+ goto out;
65846+
65847 /*
65848 * Yes, it does not scale. And it should not. Don't add
65849 * new entries into /proc/<tgid>/ without very good reasons.
65850@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
65851 if (!task)
65852 return -ENOENT;
65853
65854+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65855+ goto out;
65856+
65857 if (!dir_emit_dots(file, ctx))
65858 goto out;
65859
65860@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
65861 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
65862 #endif
65863 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
65864-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65865+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65866 ONE("syscall", S_IRUSR, proc_pid_syscall),
65867 #endif
65868 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
65869@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
65870 #ifdef CONFIG_SECURITY
65871 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
65872 #endif
65873-#ifdef CONFIG_KALLSYMS
65874+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65875 ONE("wchan", S_IRUGO, proc_pid_wchan),
65876 #endif
65877-#ifdef CONFIG_STACKTRACE
65878+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65879 ONE("stack", S_IRUSR, proc_pid_stack),
65880 #endif
65881 #ifdef CONFIG_SCHEDSTATS
65882@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
65883 #ifdef CONFIG_HARDWALL
65884 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
65885 #endif
65886+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65887+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
65888+#endif
65889 #ifdef CONFIG_USER_NS
65890 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
65891 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
65892@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
65893 if (!inode)
65894 goto out;
65895
65896+#ifdef CONFIG_GRKERNSEC_PROC_USER
65897+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
65898+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65899+ inode->i_gid = grsec_proc_gid;
65900+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
65901+#else
65902 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
65903+#endif
65904 inode->i_op = &proc_tgid_base_inode_operations;
65905 inode->i_fop = &proc_tgid_base_operations;
65906 inode->i_flags|=S_IMMUTABLE;
65907@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
65908 if (!task)
65909 goto out;
65910
65911+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65912+ goto out_put_task;
65913+
65914 result = proc_pid_instantiate(dir, dentry, task, NULL);
65915+out_put_task:
65916 put_task_struct(task);
65917 out:
65918 return ERR_PTR(result);
65919@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
65920 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
65921 #endif
65922 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
65923-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65924+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65925 ONE("syscall", S_IRUSR, proc_pid_syscall),
65926 #endif
65927 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
65928@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
65929 #ifdef CONFIG_SECURITY
65930 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
65931 #endif
65932-#ifdef CONFIG_KALLSYMS
65933+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65934 ONE("wchan", S_IRUGO, proc_pid_wchan),
65935 #endif
65936-#ifdef CONFIG_STACKTRACE
65937+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65938 ONE("stack", S_IRUSR, proc_pid_stack),
65939 #endif
65940 #ifdef CONFIG_SCHEDSTATS
65941diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
65942index cbd82df..c0407d2 100644
65943--- a/fs/proc/cmdline.c
65944+++ b/fs/proc/cmdline.c
65945@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
65946
65947 static int __init proc_cmdline_init(void)
65948 {
65949+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65950+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
65951+#else
65952 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
65953+#endif
65954 return 0;
65955 }
65956 fs_initcall(proc_cmdline_init);
65957diff --git a/fs/proc/devices.c b/fs/proc/devices.c
65958index 50493ed..248166b 100644
65959--- a/fs/proc/devices.c
65960+++ b/fs/proc/devices.c
65961@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
65962
65963 static int __init proc_devices_init(void)
65964 {
65965+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65966+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
65967+#else
65968 proc_create("devices", 0, NULL, &proc_devinfo_operations);
65969+#endif
65970 return 0;
65971 }
65972 fs_initcall(proc_devices_init);
65973diff --git a/fs/proc/fd.c b/fs/proc/fd.c
65974index 8e5ad83..1f07a8c 100644
65975--- a/fs/proc/fd.c
65976+++ b/fs/proc/fd.c
65977@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
65978 if (!task)
65979 return -ENOENT;
65980
65981- files = get_files_struct(task);
65982+ if (!gr_acl_handle_procpidmem(task))
65983+ files = get_files_struct(task);
65984 put_task_struct(task);
65985
65986 if (files) {
65987@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
65988 */
65989 int proc_fd_permission(struct inode *inode, int mask)
65990 {
65991+ struct task_struct *task;
65992 int rv = generic_permission(inode, mask);
65993- if (rv == 0)
65994- return 0;
65995+
65996 if (task_tgid(current) == proc_pid(inode))
65997 rv = 0;
65998+
65999+ task = get_proc_task(inode);
66000+ if (task == NULL)
66001+ return rv;
66002+
66003+ if (gr_acl_handle_procpidmem(task))
66004+ rv = -EACCES;
66005+
66006+ put_task_struct(task);
66007+
66008 return rv;
66009 }
66010
66011diff --git a/fs/proc/generic.c b/fs/proc/generic.c
66012index b502bba..849e216 100644
66013--- a/fs/proc/generic.c
66014+++ b/fs/proc/generic.c
66015@@ -22,6 +22,7 @@
66016 #include <linux/bitops.h>
66017 #include <linux/spinlock.h>
66018 #include <linux/completion.h>
66019+#include <linux/grsecurity.h>
66020 #include <asm/uaccess.h>
66021
66022 #include "internal.h"
66023@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
66024 return proc_lookup_de(PDE(dir), dir, dentry);
66025 }
66026
66027+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
66028+ unsigned int flags)
66029+{
66030+ if (gr_proc_is_restricted())
66031+ return ERR_PTR(-EACCES);
66032+
66033+ return proc_lookup_de(PDE(dir), dir, dentry);
66034+}
66035+
66036 /*
66037 * This returns non-zero if at EOF, so that the /proc
66038 * root directory can use this and check if it should
66039@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
66040 return proc_readdir_de(PDE(inode), file, ctx);
66041 }
66042
66043+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
66044+{
66045+ struct inode *inode = file_inode(file);
66046+
66047+ if (gr_proc_is_restricted())
66048+ return -EACCES;
66049+
66050+ return proc_readdir_de(PDE(inode), file, ctx);
66051+}
66052+
66053 /*
66054 * These are the generic /proc directory operations. They
66055 * use the in-memory "struct proc_dir_entry" tree to parse
66056@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
66057 .iterate = proc_readdir,
66058 };
66059
66060+static const struct file_operations proc_dir_restricted_operations = {
66061+ .llseek = generic_file_llseek,
66062+ .read = generic_read_dir,
66063+ .iterate = proc_readdir_restrict,
66064+};
66065+
66066 /*
66067 * proc directories can do almost nothing..
66068 */
66069@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
66070 .setattr = proc_notify_change,
66071 };
66072
66073+static const struct inode_operations proc_dir_restricted_inode_operations = {
66074+ .lookup = proc_lookup_restrict,
66075+ .getattr = proc_getattr,
66076+ .setattr = proc_notify_change,
66077+};
66078+
66079 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
66080 {
66081 int ret;
66082@@ -339,8 +371,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
66083 return ret;
66084
66085 if (S_ISDIR(dp->mode)) {
66086- dp->proc_fops = &proc_dir_operations;
66087- dp->proc_iops = &proc_dir_inode_operations;
66088+ if (dp->restricted) {
66089+ dp->proc_fops = &proc_dir_restricted_operations;
66090+ dp->proc_iops = &proc_dir_restricted_inode_operations;
66091+ } else {
66092+ dp->proc_fops = &proc_dir_operations;
66093+ dp->proc_iops = &proc_dir_inode_operations;
66094+ }
66095 dir->nlink++;
66096 } else if (S_ISLNK(dp->mode)) {
66097 dp->proc_iops = &proc_link_inode_operations;
66098@@ -453,6 +490,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
66099 }
66100 EXPORT_SYMBOL_GPL(proc_mkdir_data);
66101
66102+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
66103+ struct proc_dir_entry *parent, void *data)
66104+{
66105+ struct proc_dir_entry *ent;
66106+
66107+ if (mode == 0)
66108+ mode = S_IRUGO | S_IXUGO;
66109+
66110+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
66111+ if (ent) {
66112+ ent->data = data;
66113+ ent->restricted = 1;
66114+ if (proc_register(parent, ent) < 0) {
66115+ kfree(ent);
66116+ ent = NULL;
66117+ }
66118+ }
66119+ return ent;
66120+}
66121+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
66122+
66123 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
66124 struct proc_dir_entry *parent)
66125 {
66126@@ -467,6 +525,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
66127 }
66128 EXPORT_SYMBOL(proc_mkdir);
66129
66130+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
66131+ struct proc_dir_entry *parent)
66132+{
66133+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
66134+}
66135+EXPORT_SYMBOL(proc_mkdir_restrict);
66136+
66137 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
66138 struct proc_dir_entry *parent,
66139 const struct file_operations *proc_fops,
66140diff --git a/fs/proc/inode.c b/fs/proc/inode.c
66141index 3b0f838..a0e0f63e 100644
66142--- a/fs/proc/inode.c
66143+++ b/fs/proc/inode.c
66144@@ -24,11 +24,17 @@
66145 #include <linux/mount.h>
66146 #include <linux/magic.h>
66147 #include <linux/namei.h>
66148+#include <linux/grsecurity.h>
66149
66150 #include <asm/uaccess.h>
66151
66152 #include "internal.h"
66153
66154+#ifdef CONFIG_PROC_SYSCTL
66155+extern const struct inode_operations proc_sys_inode_operations;
66156+extern const struct inode_operations proc_sys_dir_operations;
66157+#endif
66158+
66159 static void proc_evict_inode(struct inode *inode)
66160 {
66161 struct proc_dir_entry *de;
66162@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
66163 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
66164 sysctl_head_put(head);
66165 }
66166+
66167+#ifdef CONFIG_PROC_SYSCTL
66168+ if (inode->i_op == &proc_sys_inode_operations ||
66169+ inode->i_op == &proc_sys_dir_operations)
66170+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
66171+#endif
66172+
66173 }
66174
66175 static struct kmem_cache * proc_inode_cachep;
66176@@ -426,7 +439,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
66177 if (de->mode) {
66178 inode->i_mode = de->mode;
66179 inode->i_uid = de->uid;
66180+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66181+ inode->i_gid = grsec_proc_gid;
66182+#else
66183 inode->i_gid = de->gid;
66184+#endif
66185 }
66186 if (de->size)
66187 inode->i_size = de->size;
66188diff --git a/fs/proc/internal.h b/fs/proc/internal.h
66189index c835b94..c9e01a3 100644
66190--- a/fs/proc/internal.h
66191+++ b/fs/proc/internal.h
66192@@ -47,9 +47,10 @@ struct proc_dir_entry {
66193 struct completion *pde_unload_completion;
66194 struct list_head pde_openers; /* who did ->open, but not ->release */
66195 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
66196+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
66197 u8 namelen;
66198 char name[];
66199-};
66200+} __randomize_layout;
66201
66202 union proc_op {
66203 int (*proc_get_link)(struct dentry *, struct path *);
66204@@ -67,7 +68,7 @@ struct proc_inode {
66205 struct ctl_table *sysctl_entry;
66206 const struct proc_ns_operations *ns_ops;
66207 struct inode vfs_inode;
66208-};
66209+} __randomize_layout;
66210
66211 /*
66212 * General functions
66213@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
66214 struct pid *, struct task_struct *);
66215 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
66216 struct pid *, struct task_struct *);
66217+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66218+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
66219+ struct pid *, struct task_struct *);
66220+#endif
66221
66222 /*
66223 * base.c
66224@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
66225 * generic.c
66226 */
66227 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
66228+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
66229 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
66230 struct dentry *);
66231 extern int proc_readdir(struct file *, struct dir_context *);
66232+extern int proc_readdir_restrict(struct file *, struct dir_context *);
66233 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
66234
66235 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
66236diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
66237index a352d57..cb94a5c 100644
66238--- a/fs/proc/interrupts.c
66239+++ b/fs/proc/interrupts.c
66240@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
66241
66242 static int __init proc_interrupts_init(void)
66243 {
66244+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66245+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
66246+#else
66247 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
66248+#endif
66249 return 0;
66250 }
66251 fs_initcall(proc_interrupts_init);
66252diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
66253index 91a4e64..cb007c0 100644
66254--- a/fs/proc/kcore.c
66255+++ b/fs/proc/kcore.c
66256@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66257 * the addresses in the elf_phdr on our list.
66258 */
66259 start = kc_offset_to_vaddr(*fpos - elf_buflen);
66260- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
66261+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
66262+ if (tsz > buflen)
66263 tsz = buflen;
66264-
66265+
66266 while (buflen) {
66267 struct kcore_list *m;
66268
66269@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66270 kfree(elf_buf);
66271 } else {
66272 if (kern_addr_valid(start)) {
66273- unsigned long n;
66274+ char *elf_buf;
66275+ mm_segment_t oldfs;
66276
66277- n = copy_to_user(buffer, (char *)start, tsz);
66278- /*
66279- * We cannot distinguish between fault on source
66280- * and fault on destination. When this happens
66281- * we clear too and hope it will trigger the
66282- * EFAULT again.
66283- */
66284- if (n) {
66285- if (clear_user(buffer + tsz - n,
66286- n))
66287+ elf_buf = kmalloc(tsz, GFP_KERNEL);
66288+ if (!elf_buf)
66289+ return -ENOMEM;
66290+ oldfs = get_fs();
66291+ set_fs(KERNEL_DS);
66292+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
66293+ set_fs(oldfs);
66294+ if (copy_to_user(buffer, elf_buf, tsz)) {
66295+ kfree(elf_buf);
66296 return -EFAULT;
66297+ }
66298 }
66299+ set_fs(oldfs);
66300+ kfree(elf_buf);
66301 } else {
66302 if (clear_user(buffer, tsz))
66303 return -EFAULT;
66304@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66305
66306 static int open_kcore(struct inode *inode, struct file *filp)
66307 {
66308+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66309+ return -EPERM;
66310+#endif
66311 if (!capable(CAP_SYS_RAWIO))
66312 return -EPERM;
66313 if (kcore_need_update)
66314diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
66315index d3ebf2e..6ad42d1 100644
66316--- a/fs/proc/meminfo.c
66317+++ b/fs/proc/meminfo.c
66318@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
66319 vmi.used >> 10,
66320 vmi.largest_chunk >> 10
66321 #ifdef CONFIG_MEMORY_FAILURE
66322- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66323+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66324 #endif
66325 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66326 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
66327diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
66328index d4a3574..b421ce9 100644
66329--- a/fs/proc/nommu.c
66330+++ b/fs/proc/nommu.c
66331@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
66332
66333 if (file) {
66334 seq_pad(m, ' ');
66335- seq_path(m, &file->f_path, "");
66336+ seq_path(m, &file->f_path, "\n\\");
66337 }
66338
66339 seq_putc(m, '\n');
66340diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
66341index 1bde894..22ac7eb 100644
66342--- a/fs/proc/proc_net.c
66343+++ b/fs/proc/proc_net.c
66344@@ -23,9 +23,27 @@
66345 #include <linux/nsproxy.h>
66346 #include <net/net_namespace.h>
66347 #include <linux/seq_file.h>
66348+#include <linux/grsecurity.h>
66349
66350 #include "internal.h"
66351
66352+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66353+static struct seq_operations *ipv6_seq_ops_addr;
66354+
66355+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
66356+{
66357+ ipv6_seq_ops_addr = addr;
66358+}
66359+
66360+void unregister_ipv6_seq_ops_addr(void)
66361+{
66362+ ipv6_seq_ops_addr = NULL;
66363+}
66364+
66365+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
66366+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
66367+#endif
66368+
66369 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
66370 {
66371 return pde->parent->data;
66372@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
66373 return maybe_get_net(PDE_NET(PDE(inode)));
66374 }
66375
66376+extern const struct seq_operations dev_seq_ops;
66377+
66378 int seq_open_net(struct inode *ino, struct file *f,
66379 const struct seq_operations *ops, int size)
66380 {
66381@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
66382
66383 BUG_ON(size < sizeof(*p));
66384
66385+ /* only permit access to /proc/net/dev */
66386+ if (
66387+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66388+ ops != ipv6_seq_ops_addr &&
66389+#endif
66390+ ops != &dev_seq_ops && gr_proc_is_restricted())
66391+ return -EACCES;
66392+
66393 net = get_proc_net(ino);
66394 if (net == NULL)
66395 return -ENXIO;
66396@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
66397 int err;
66398 struct net *net;
66399
66400+ if (gr_proc_is_restricted())
66401+ return -EACCES;
66402+
66403 err = -ENXIO;
66404 net = get_proc_net(inode);
66405 if (net == NULL)
66406diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
66407index f92d5dd..26398ac 100644
66408--- a/fs/proc/proc_sysctl.c
66409+++ b/fs/proc/proc_sysctl.c
66410@@ -11,13 +11,21 @@
66411 #include <linux/namei.h>
66412 #include <linux/mm.h>
66413 #include <linux/module.h>
66414+#include <linux/nsproxy.h>
66415+#ifdef CONFIG_GRKERNSEC
66416+#include <net/net_namespace.h>
66417+#endif
66418 #include "internal.h"
66419
66420+extern int gr_handle_chroot_sysctl(const int op);
66421+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66422+ const int op);
66423+
66424 static const struct dentry_operations proc_sys_dentry_operations;
66425 static const struct file_operations proc_sys_file_operations;
66426-static const struct inode_operations proc_sys_inode_operations;
66427+const struct inode_operations proc_sys_inode_operations;
66428 static const struct file_operations proc_sys_dir_file_operations;
66429-static const struct inode_operations proc_sys_dir_operations;
66430+const struct inode_operations proc_sys_dir_operations;
66431
66432 void proc_sys_poll_notify(struct ctl_table_poll *poll)
66433 {
66434@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
66435
66436 err = NULL;
66437 d_set_d_op(dentry, &proc_sys_dentry_operations);
66438+
66439+ gr_handle_proc_create(dentry, inode);
66440+
66441 d_add(dentry, inode);
66442
66443 out:
66444@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66445 struct inode *inode = file_inode(filp);
66446 struct ctl_table_header *head = grab_header(inode);
66447 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
66448+ int op = write ? MAY_WRITE : MAY_READ;
66449 ssize_t error;
66450 size_t res;
66451
66452@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66453 * and won't be until we finish.
66454 */
66455 error = -EPERM;
66456- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
66457+ if (sysctl_perm(head, table, op))
66458 goto out;
66459
66460 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
66461@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66462 if (!table->proc_handler)
66463 goto out;
66464
66465+#ifdef CONFIG_GRKERNSEC
66466+ error = -EPERM;
66467+ if (gr_handle_chroot_sysctl(op))
66468+ goto out;
66469+ dget(filp->f_path.dentry);
66470+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
66471+ dput(filp->f_path.dentry);
66472+ goto out;
66473+ }
66474+ dput(filp->f_path.dentry);
66475+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
66476+ goto out;
66477+ if (write) {
66478+ if (current->nsproxy->net_ns != table->extra2) {
66479+ if (!capable(CAP_SYS_ADMIN))
66480+ goto out;
66481+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
66482+ goto out;
66483+ }
66484+#endif
66485+
66486 /* careful: calling conventions are nasty here */
66487 res = count;
66488 error = table->proc_handler(table, write, buf, &res, ppos);
66489@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
66490 return false;
66491 } else {
66492 d_set_d_op(child, &proc_sys_dentry_operations);
66493+
66494+ gr_handle_proc_create(child, inode);
66495+
66496 d_add(child, inode);
66497 }
66498 } else {
66499@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
66500 if ((*pos)++ < ctx->pos)
66501 return true;
66502
66503+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
66504+ return 0;
66505+
66506 if (unlikely(S_ISLNK(table->mode)))
66507 res = proc_sys_link_fill_cache(file, ctx, head, table);
66508 else
66509@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
66510 if (IS_ERR(head))
66511 return PTR_ERR(head);
66512
66513+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
66514+ return -ENOENT;
66515+
66516 generic_fillattr(inode, stat);
66517 if (table)
66518 stat->mode = (stat->mode & S_IFMT) | table->mode;
66519@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
66520 .llseek = generic_file_llseek,
66521 };
66522
66523-static const struct inode_operations proc_sys_inode_operations = {
66524+const struct inode_operations proc_sys_inode_operations = {
66525 .permission = proc_sys_permission,
66526 .setattr = proc_sys_setattr,
66527 .getattr = proc_sys_getattr,
66528 };
66529
66530-static const struct inode_operations proc_sys_dir_operations = {
66531+const struct inode_operations proc_sys_dir_operations = {
66532 .lookup = proc_sys_lookup,
66533 .permission = proc_sys_permission,
66534 .setattr = proc_sys_setattr,
66535@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
66536 static struct ctl_dir *new_dir(struct ctl_table_set *set,
66537 const char *name, int namelen)
66538 {
66539- struct ctl_table *table;
66540+ ctl_table_no_const *table;
66541 struct ctl_dir *new;
66542 struct ctl_node *node;
66543 char *new_name;
66544@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
66545 return NULL;
66546
66547 node = (struct ctl_node *)(new + 1);
66548- table = (struct ctl_table *)(node + 1);
66549+ table = (ctl_table_no_const *)(node + 1);
66550 new_name = (char *)(table + 2);
66551 memcpy(new_name, name, namelen);
66552 new_name[namelen] = '\0';
66553@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
66554 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
66555 struct ctl_table_root *link_root)
66556 {
66557- struct ctl_table *link_table, *entry, *link;
66558+ ctl_table_no_const *link_table, *link;
66559+ struct ctl_table *entry;
66560 struct ctl_table_header *links;
66561 struct ctl_node *node;
66562 char *link_name;
66563@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
66564 return NULL;
66565
66566 node = (struct ctl_node *)(links + 1);
66567- link_table = (struct ctl_table *)(node + nr_entries);
66568+ link_table = (ctl_table_no_const *)(node + nr_entries);
66569 link_name = (char *)&link_table[nr_entries + 1];
66570
66571 for (link = link_table, entry = table; entry->procname; link++, entry++) {
66572@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66573 struct ctl_table_header ***subheader, struct ctl_table_set *set,
66574 struct ctl_table *table)
66575 {
66576- struct ctl_table *ctl_table_arg = NULL;
66577- struct ctl_table *entry, *files;
66578+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
66579+ struct ctl_table *entry;
66580 int nr_files = 0;
66581 int nr_dirs = 0;
66582 int err = -ENOMEM;
66583@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66584 nr_files++;
66585 }
66586
66587- files = table;
66588 /* If there are mixed files and directories we need a new table */
66589 if (nr_dirs && nr_files) {
66590- struct ctl_table *new;
66591+ ctl_table_no_const *new;
66592 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
66593 GFP_KERNEL);
66594 if (!files)
66595@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66596 /* Register everything except a directory full of subdirectories */
66597 if (nr_files || !nr_dirs) {
66598 struct ctl_table_header *header;
66599- header = __register_sysctl_table(set, path, files);
66600+ header = __register_sysctl_table(set, path, files ? files : table);
66601 if (!header) {
66602 kfree(ctl_table_arg);
66603 goto out;
66604diff --git a/fs/proc/root.c b/fs/proc/root.c
66605index e74ac9f..35e89f4 100644
66606--- a/fs/proc/root.c
66607+++ b/fs/proc/root.c
66608@@ -188,7 +188,15 @@ void __init proc_root_init(void)
66609 proc_mkdir("openprom", NULL);
66610 #endif
66611 proc_tty_init();
66612+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66613+#ifdef CONFIG_GRKERNSEC_PROC_USER
66614+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
66615+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66616+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
66617+#endif
66618+#else
66619 proc_mkdir("bus", NULL);
66620+#endif
66621 proc_sys_init();
66622 }
66623
66624diff --git a/fs/proc/stat.c b/fs/proc/stat.c
66625index 510413eb..34d9a8c 100644
66626--- a/fs/proc/stat.c
66627+++ b/fs/proc/stat.c
66628@@ -11,6 +11,7 @@
66629 #include <linux/irqnr.h>
66630 #include <linux/cputime.h>
66631 #include <linux/tick.h>
66632+#include <linux/grsecurity.h>
66633
66634 #ifndef arch_irq_stat_cpu
66635 #define arch_irq_stat_cpu(cpu) 0
66636@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
66637 u64 sum_softirq = 0;
66638 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
66639 struct timespec boottime;
66640+ int unrestricted = 1;
66641+
66642+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66643+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66644+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
66645+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66646+ && !in_group_p(grsec_proc_gid)
66647+#endif
66648+ )
66649+ unrestricted = 0;
66650+#endif
66651+#endif
66652
66653 user = nice = system = idle = iowait =
66654 irq = softirq = steal = 0;
66655@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
66656 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66657 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66658 idle += get_idle_time(i);
66659- iowait += get_iowait_time(i);
66660- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66661- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66662- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66663- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66664- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66665- sum += kstat_cpu_irqs_sum(i);
66666- sum += arch_irq_stat_cpu(i);
66667+ if (unrestricted) {
66668+ iowait += get_iowait_time(i);
66669+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66670+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66671+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66672+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66673+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66674+ sum += kstat_cpu_irqs_sum(i);
66675+ sum += arch_irq_stat_cpu(i);
66676+ for (j = 0; j < NR_SOFTIRQS; j++) {
66677+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66678
66679- for (j = 0; j < NR_SOFTIRQS; j++) {
66680- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66681-
66682- per_softirq_sums[j] += softirq_stat;
66683- sum_softirq += softirq_stat;
66684+ per_softirq_sums[j] += softirq_stat;
66685+ sum_softirq += softirq_stat;
66686+ }
66687 }
66688 }
66689- sum += arch_irq_stat();
66690+ if (unrestricted)
66691+ sum += arch_irq_stat();
66692
66693 seq_puts(p, "cpu ");
66694 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66695@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
66696 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66697 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66698 idle = get_idle_time(i);
66699- iowait = get_iowait_time(i);
66700- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66701- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66702- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66703- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66704- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66705+ if (unrestricted) {
66706+ iowait = get_iowait_time(i);
66707+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66708+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66709+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66710+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66711+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66712+ }
66713 seq_printf(p, "cpu%d", i);
66714 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66715 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
66716@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
66717
66718 /* sum again ? it could be updated? */
66719 for_each_irq_nr(j)
66720- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
66721+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
66722
66723 seq_printf(p,
66724 "\nctxt %llu\n"
66725@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
66726 "processes %lu\n"
66727 "procs_running %lu\n"
66728 "procs_blocked %lu\n",
66729- nr_context_switches(),
66730+ unrestricted ? nr_context_switches() : 0ULL,
66731 (unsigned long)jif,
66732- total_forks,
66733- nr_running(),
66734- nr_iowait());
66735+ unrestricted ? total_forks : 0UL,
66736+ unrestricted ? nr_running() : 0UL,
66737+ unrestricted ? nr_iowait() : 0UL);
66738
66739 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
66740
66741diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
66742index f86e549..3a88fcd 100644
66743--- a/fs/proc/task_mmu.c
66744+++ b/fs/proc/task_mmu.c
66745@@ -13,12 +13,19 @@
66746 #include <linux/swap.h>
66747 #include <linux/swapops.h>
66748 #include <linux/mmu_notifier.h>
66749+#include <linux/grsecurity.h>
66750
66751 #include <asm/elf.h>
66752 #include <asm/uaccess.h>
66753 #include <asm/tlbflush.h>
66754 #include "internal.h"
66755
66756+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66757+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66758+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66759+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66760+#endif
66761+
66762 void task_mem(struct seq_file *m, struct mm_struct *mm)
66763 {
66764 unsigned long data, text, lib, swap;
66765@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66766 "VmExe:\t%8lu kB\n"
66767 "VmLib:\t%8lu kB\n"
66768 "VmPTE:\t%8lu kB\n"
66769- "VmSwap:\t%8lu kB\n",
66770- hiwater_vm << (PAGE_SHIFT-10),
66771+ "VmSwap:\t%8lu kB\n"
66772+
66773+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66774+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
66775+#endif
66776+
66777+ ,hiwater_vm << (PAGE_SHIFT-10),
66778 total_vm << (PAGE_SHIFT-10),
66779 mm->locked_vm << (PAGE_SHIFT-10),
66780 mm->pinned_vm << (PAGE_SHIFT-10),
66781@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66782 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
66783 (PTRS_PER_PTE * sizeof(pte_t) *
66784 atomic_long_read(&mm->nr_ptes)) >> 10,
66785- swap << (PAGE_SHIFT-10));
66786+ swap << (PAGE_SHIFT-10)
66787+
66788+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66789+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66790+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
66791+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
66792+#else
66793+ , mm->context.user_cs_base
66794+ , mm->context.user_cs_limit
66795+#endif
66796+#endif
66797+
66798+ );
66799 }
66800
66801 unsigned long task_vsize(struct mm_struct *mm)
66802@@ -282,13 +306,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66803 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
66804 }
66805
66806- /* We don't show the stack guard page in /proc/maps */
66807+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66808+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
66809+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
66810+#else
66811 start = vma->vm_start;
66812- if (stack_guard_page_start(vma, start))
66813- start += PAGE_SIZE;
66814 end = vma->vm_end;
66815- if (stack_guard_page_end(vma, end))
66816- end -= PAGE_SIZE;
66817+#endif
66818
66819 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
66820 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
66821@@ -298,7 +322,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66822 flags & VM_WRITE ? 'w' : '-',
66823 flags & VM_EXEC ? 'x' : '-',
66824 flags & VM_MAYSHARE ? 's' : 'p',
66825+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66826+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
66827+#else
66828 pgoff,
66829+#endif
66830 MAJOR(dev), MINOR(dev), ino);
66831
66832 /*
66833@@ -307,7 +335,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66834 */
66835 if (file) {
66836 seq_pad(m, ' ');
66837- seq_path(m, &file->f_path, "\n");
66838+ seq_path(m, &file->f_path, "\n\\");
66839 goto done;
66840 }
66841
66842@@ -338,8 +366,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66843 * Thread stack in /proc/PID/task/TID/maps or
66844 * the main process stack.
66845 */
66846- if (!is_pid || (vma->vm_start <= mm->start_stack &&
66847- vma->vm_end >= mm->start_stack)) {
66848+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
66849+ (vma->vm_start <= mm->start_stack &&
66850+ vma->vm_end >= mm->start_stack)) {
66851 name = "[stack]";
66852 } else {
66853 /* Thread stack in /proc/PID/maps */
66854@@ -359,6 +388,12 @@ done:
66855
66856 static int show_map(struct seq_file *m, void *v, int is_pid)
66857 {
66858+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66859+ if (current->exec_id != m->exec_id) {
66860+ gr_log_badprocpid("maps");
66861+ return 0;
66862+ }
66863+#endif
66864 show_map_vma(m, v, is_pid);
66865 m_cache_vma(m, v);
66866 return 0;
66867@@ -629,12 +664,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
66868 .private = &mss,
66869 };
66870
66871+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66872+ if (current->exec_id != m->exec_id) {
66873+ gr_log_badprocpid("smaps");
66874+ return 0;
66875+ }
66876+#endif
66877 memset(&mss, 0, sizeof mss);
66878- mss.vma = vma;
66879- /* mmap_sem is held in m_start */
66880- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
66881- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
66882-
66883+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66884+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
66885+#endif
66886+ mss.vma = vma;
66887+ /* mmap_sem is held in m_start */
66888+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
66889+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
66890+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66891+ }
66892+#endif
66893 show_map_vma(m, vma, is_pid);
66894
66895 seq_printf(m,
66896@@ -652,7 +698,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
66897 "KernelPageSize: %8lu kB\n"
66898 "MMUPageSize: %8lu kB\n"
66899 "Locked: %8lu kB\n",
66900+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66901+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
66902+#else
66903 (vma->vm_end - vma->vm_start) >> 10,
66904+#endif
66905 mss.resident >> 10,
66906 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
66907 mss.shared_clean >> 10,
66908@@ -1489,6 +1539,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
66909 char buffer[64];
66910 int nid;
66911
66912+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66913+ if (current->exec_id != m->exec_id) {
66914+ gr_log_badprocpid("numa_maps");
66915+ return 0;
66916+ }
66917+#endif
66918+
66919 if (!mm)
66920 return 0;
66921
66922@@ -1510,11 +1567,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
66923 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
66924 }
66925
66926+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66927+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
66928+#else
66929 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
66930+#endif
66931
66932 if (file) {
66933 seq_puts(m, " file=");
66934- seq_path(m, &file->f_path, "\n\t= ");
66935+ seq_path(m, &file->f_path, "\n\t\\= ");
66936 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
66937 seq_puts(m, " heap");
66938 } else {
66939diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
66940index 599ec2e..f1413ae 100644
66941--- a/fs/proc/task_nommu.c
66942+++ b/fs/proc/task_nommu.c
66943@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66944 else
66945 bytes += kobjsize(mm);
66946
66947- if (current->fs && current->fs->users > 1)
66948+ if (current->fs && atomic_read(&current->fs->users) > 1)
66949 sbytes += kobjsize(current->fs);
66950 else
66951 bytes += kobjsize(current->fs);
66952@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
66953
66954 if (file) {
66955 seq_pad(m, ' ');
66956- seq_path(m, &file->f_path, "");
66957+ seq_path(m, &file->f_path, "\n\\");
66958 } else if (mm) {
66959 pid_t tid = pid_of_stack(priv, vma, is_pid);
66960
66961diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
66962index a90d6d35..d08047c 100644
66963--- a/fs/proc/vmcore.c
66964+++ b/fs/proc/vmcore.c
66965@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
66966 nr_bytes = count;
66967
66968 /* If pfn is not ram, return zeros for sparse dump files */
66969- if (pfn_is_ram(pfn) == 0)
66970- memset(buf, 0, nr_bytes);
66971- else {
66972+ if (pfn_is_ram(pfn) == 0) {
66973+ if (userbuf) {
66974+ if (clear_user((char __force_user *)buf, nr_bytes))
66975+ return -EFAULT;
66976+ } else
66977+ memset(buf, 0, nr_bytes);
66978+ } else {
66979 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
66980 offset, userbuf);
66981 if (tmp < 0)
66982@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
66983 static int copy_to(void *target, void *src, size_t size, int userbuf)
66984 {
66985 if (userbuf) {
66986- if (copy_to_user((char __user *) target, src, size))
66987+ if (copy_to_user((char __force_user *) target, src, size))
66988 return -EFAULT;
66989 } else {
66990 memcpy(target, src, size);
66991@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
66992 if (*fpos < m->offset + m->size) {
66993 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
66994 start = m->paddr + *fpos - m->offset;
66995- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
66996+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
66997 if (tmp < 0)
66998 return tmp;
66999 buflen -= tsz;
67000@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67001 static ssize_t read_vmcore(struct file *file, char __user *buffer,
67002 size_t buflen, loff_t *fpos)
67003 {
67004- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
67005+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
67006 }
67007
67008 /*
67009diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
67010index d3fb2b6..43a8140 100644
67011--- a/fs/qnx6/qnx6.h
67012+++ b/fs/qnx6/qnx6.h
67013@@ -74,7 +74,7 @@ enum {
67014 BYTESEX_BE,
67015 };
67016
67017-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67018+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67019 {
67020 if (sbi->s_bytesex == BYTESEX_LE)
67021 return le64_to_cpu((__force __le64)n);
67022@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
67023 return (__force __fs64)cpu_to_be64(n);
67024 }
67025
67026-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67027+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67028 {
67029 if (sbi->s_bytesex == BYTESEX_LE)
67030 return le32_to_cpu((__force __le32)n);
67031diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
67032index bb2869f..d34ada8 100644
67033--- a/fs/quota/netlink.c
67034+++ b/fs/quota/netlink.c
67035@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
67036 void quota_send_warning(struct kqid qid, dev_t dev,
67037 const char warntype)
67038 {
67039- static atomic_t seq;
67040+ static atomic_unchecked_t seq;
67041 struct sk_buff *skb;
67042 void *msg_head;
67043 int ret;
67044@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
67045 "VFS: Not enough memory to send quota warning.\n");
67046 return;
67047 }
67048- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
67049+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
67050 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
67051 if (!msg_head) {
67052 printk(KERN_ERR
67053diff --git a/fs/read_write.c b/fs/read_write.c
67054index c0805c93..d39f2eb 100644
67055--- a/fs/read_write.c
67056+++ b/fs/read_write.c
67057@@ -507,7 +507,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
67058
67059 old_fs = get_fs();
67060 set_fs(get_ds());
67061- p = (__force const char __user *)buf;
67062+ p = (const char __force_user *)buf;
67063 if (count > MAX_RW_COUNT)
67064 count = MAX_RW_COUNT;
67065 if (file->f_op->write)
67066diff --git a/fs/readdir.c b/fs/readdir.c
67067index ced6791..936687b 100644
67068--- a/fs/readdir.c
67069+++ b/fs/readdir.c
67070@@ -18,6 +18,7 @@
67071 #include <linux/security.h>
67072 #include <linux/syscalls.h>
67073 #include <linux/unistd.h>
67074+#include <linux/namei.h>
67075
67076 #include <asm/uaccess.h>
67077
67078@@ -71,6 +72,7 @@ struct old_linux_dirent {
67079 struct readdir_callback {
67080 struct dir_context ctx;
67081 struct old_linux_dirent __user * dirent;
67082+ struct file * file;
67083 int result;
67084 };
67085
67086@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
67087 buf->result = -EOVERFLOW;
67088 return -EOVERFLOW;
67089 }
67090+
67091+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67092+ return 0;
67093+
67094 buf->result++;
67095 dirent = buf->dirent;
67096 if (!access_ok(VERIFY_WRITE, dirent,
67097@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
67098 if (!f.file)
67099 return -EBADF;
67100
67101+ buf.file = f.file;
67102 error = iterate_dir(f.file, &buf.ctx);
67103 if (buf.result)
67104 error = buf.result;
67105@@ -145,6 +152,7 @@ struct getdents_callback {
67106 struct dir_context ctx;
67107 struct linux_dirent __user * current_dir;
67108 struct linux_dirent __user * previous;
67109+ struct file * file;
67110 int count;
67111 int error;
67112 };
67113@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
67114 buf->error = -EOVERFLOW;
67115 return -EOVERFLOW;
67116 }
67117+
67118+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67119+ return 0;
67120+
67121 dirent = buf->previous;
67122 if (dirent) {
67123 if (__put_user(offset, &dirent->d_off))
67124@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
67125 if (!f.file)
67126 return -EBADF;
67127
67128+ buf.file = f.file;
67129 error = iterate_dir(f.file, &buf.ctx);
67130 if (error >= 0)
67131 error = buf.error;
67132@@ -230,6 +243,7 @@ struct getdents_callback64 {
67133 struct dir_context ctx;
67134 struct linux_dirent64 __user * current_dir;
67135 struct linux_dirent64 __user * previous;
67136+ struct file *file;
67137 int count;
67138 int error;
67139 };
67140@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
67141 buf->error = -EINVAL; /* only used if we fail.. */
67142 if (reclen > buf->count)
67143 return -EINVAL;
67144+
67145+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67146+ return 0;
67147+
67148 dirent = buf->previous;
67149 if (dirent) {
67150 if (__put_user(offset, &dirent->d_off))
67151@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
67152 if (!f.file)
67153 return -EBADF;
67154
67155+ buf.file = f.file;
67156 error = iterate_dir(f.file, &buf.ctx);
67157 if (error >= 0)
67158 error = buf.error;
67159diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
67160index 9c02d96..6562c10 100644
67161--- a/fs/reiserfs/do_balan.c
67162+++ b/fs/reiserfs/do_balan.c
67163@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
67164 return;
67165 }
67166
67167- atomic_inc(&fs_generation(tb->tb_sb));
67168+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
67169 do_balance_starts(tb);
67170
67171 /*
67172diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
67173index aca73dd..e3c558d 100644
67174--- a/fs/reiserfs/item_ops.c
67175+++ b/fs/reiserfs/item_ops.c
67176@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
67177 }
67178
67179 static struct item_operations errcatch_ops = {
67180- errcatch_bytes_number,
67181- errcatch_decrement_key,
67182- errcatch_is_left_mergeable,
67183- errcatch_print_item,
67184- errcatch_check_item,
67185+ .bytes_number = errcatch_bytes_number,
67186+ .decrement_key = errcatch_decrement_key,
67187+ .is_left_mergeable = errcatch_is_left_mergeable,
67188+ .print_item = errcatch_print_item,
67189+ .check_item = errcatch_check_item,
67190
67191- errcatch_create_vi,
67192- errcatch_check_left,
67193- errcatch_check_right,
67194- errcatch_part_size,
67195- errcatch_unit_num,
67196- errcatch_print_vi
67197+ .create_vi = errcatch_create_vi,
67198+ .check_left = errcatch_check_left,
67199+ .check_right = errcatch_check_right,
67200+ .part_size = errcatch_part_size,
67201+ .unit_num = errcatch_unit_num,
67202+ .print_vi = errcatch_print_vi
67203 };
67204
67205 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
67206diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
67207index 621b9f3..af527fd 100644
67208--- a/fs/reiserfs/procfs.c
67209+++ b/fs/reiserfs/procfs.c
67210@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
67211 "SMALL_TAILS " : "NO_TAILS ",
67212 replay_only(sb) ? "REPLAY_ONLY " : "",
67213 convert_reiserfs(sb) ? "CONV " : "",
67214- atomic_read(&r->s_generation_counter),
67215+ atomic_read_unchecked(&r->s_generation_counter),
67216 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
67217 SF(s_do_balance), SF(s_unneeded_left_neighbor),
67218 SF(s_good_search_by_key_reada), SF(s_bmaps),
67219diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
67220index bb79cdd..fcf49ef 100644
67221--- a/fs/reiserfs/reiserfs.h
67222+++ b/fs/reiserfs/reiserfs.h
67223@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
67224 /* Comment? -Hans */
67225 wait_queue_head_t s_wait;
67226 /* increased by one every time the tree gets re-balanced */
67227- atomic_t s_generation_counter;
67228+ atomic_unchecked_t s_generation_counter;
67229
67230 /* File system properties. Currently holds on-disk FS format */
67231 unsigned long s_properties;
67232@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67233 #define REISERFS_USER_MEM 1 /* user memory mode */
67234
67235 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67236-#define get_generation(s) atomic_read (&fs_generation(s))
67237+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67238 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67239 #define __fs_changed(gen,s) (gen != get_generation (s))
67240 #define fs_changed(gen,s) \
67241diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
67242index 71fbbe3..eff29ba 100644
67243--- a/fs/reiserfs/super.c
67244+++ b/fs/reiserfs/super.c
67245@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
67246 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
67247 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
67248 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
67249+#ifdef CONFIG_REISERFS_FS_XATTR
67250+ /* turn on user xattrs by default */
67251+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
67252+#endif
67253 /* no preallocation minimum, be smart in reiserfs_file_write instead */
67254 sbi->s_alloc_options.preallocmin = 0;
67255 /* Preallocate by 16 blocks (17-1) at once */
67256diff --git a/fs/select.c b/fs/select.c
67257index 467bb1c..cf9d65a 100644
67258--- a/fs/select.c
67259+++ b/fs/select.c
67260@@ -20,6 +20,7 @@
67261 #include <linux/export.h>
67262 #include <linux/slab.h>
67263 #include <linux/poll.h>
67264+#include <linux/security.h>
67265 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
67266 #include <linux/file.h>
67267 #include <linux/fdtable.h>
67268@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
67269 struct poll_list *walk = head;
67270 unsigned long todo = nfds;
67271
67272+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
67273 if (nfds > rlimit(RLIMIT_NOFILE))
67274 return -EINVAL;
67275
67276diff --git a/fs/seq_file.c b/fs/seq_file.c
67277index dbf3a59..daf023f 100644
67278--- a/fs/seq_file.c
67279+++ b/fs/seq_file.c
67280@@ -12,6 +12,8 @@
67281 #include <linux/slab.h>
67282 #include <linux/cred.h>
67283 #include <linux/mm.h>
67284+#include <linux/sched.h>
67285+#include <linux/grsecurity.h>
67286
67287 #include <asm/uaccess.h>
67288 #include <asm/page.h>
67289@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
67290
67291 static void *seq_buf_alloc(unsigned long size)
67292 {
67293- void *buf;
67294-
67295- /*
67296- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
67297- * it's better to fall back to vmalloc() than to kill things.
67298- */
67299- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
67300- if (!buf && size > PAGE_SIZE)
67301- buf = vmalloc(size);
67302- return buf;
67303+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
67304 }
67305
67306 /**
67307@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
67308 #ifdef CONFIG_USER_NS
67309 p->user_ns = file->f_cred->user_ns;
67310 #endif
67311+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67312+ p->exec_id = current->exec_id;
67313+#endif
67314
67315 /*
67316 * Wrappers around seq_open(e.g. swaps_open) need to be
67317@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
67318 }
67319 EXPORT_SYMBOL(seq_open);
67320
67321+
67322+int seq_open_restrict(struct file *file, const struct seq_operations *op)
67323+{
67324+ if (gr_proc_is_restricted())
67325+ return -EACCES;
67326+
67327+ return seq_open(file, op);
67328+}
67329+EXPORT_SYMBOL(seq_open_restrict);
67330+
67331 static int traverse(struct seq_file *m, loff_t offset)
67332 {
67333 loff_t pos = 0, index;
67334@@ -158,7 +164,7 @@ Eoverflow:
67335 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
67336 {
67337 struct seq_file *m = file->private_data;
67338- size_t copied = 0;
67339+ ssize_t copied = 0;
67340 loff_t pos;
67341 size_t n;
67342 void *p;
67343@@ -589,7 +595,7 @@ static void single_stop(struct seq_file *p, void *v)
67344 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
67345 void *data)
67346 {
67347- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
67348+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
67349 int res = -ENOMEM;
67350
67351 if (op) {
67352@@ -625,6 +631,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
67353 }
67354 EXPORT_SYMBOL(single_open_size);
67355
67356+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
67357+ void *data)
67358+{
67359+ if (gr_proc_is_restricted())
67360+ return -EACCES;
67361+
67362+ return single_open(file, show, data);
67363+}
67364+EXPORT_SYMBOL(single_open_restrict);
67365+
67366+
67367 int single_release(struct inode *inode, struct file *file)
67368 {
67369 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
67370diff --git a/fs/splice.c b/fs/splice.c
67371index 75c6058..770d40c 100644
67372--- a/fs/splice.c
67373+++ b/fs/splice.c
67374@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67375 pipe_lock(pipe);
67376
67377 for (;;) {
67378- if (!pipe->readers) {
67379+ if (!atomic_read(&pipe->readers)) {
67380 send_sig(SIGPIPE, current, 0);
67381 if (!ret)
67382 ret = -EPIPE;
67383@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67384 page_nr++;
67385 ret += buf->len;
67386
67387- if (pipe->files)
67388+ if (atomic_read(&pipe->files))
67389 do_wakeup = 1;
67390
67391 if (!--spd->nr_pages)
67392@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67393 do_wakeup = 0;
67394 }
67395
67396- pipe->waiting_writers++;
67397+ atomic_inc(&pipe->waiting_writers);
67398 pipe_wait(pipe);
67399- pipe->waiting_writers--;
67400+ atomic_dec(&pipe->waiting_writers);
67401 }
67402
67403 pipe_unlock(pipe);
67404@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
67405 old_fs = get_fs();
67406 set_fs(get_ds());
67407 /* The cast to a user pointer is valid due to the set_fs() */
67408- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
67409+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
67410 set_fs(old_fs);
67411
67412 return res;
67413@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
67414 old_fs = get_fs();
67415 set_fs(get_ds());
67416 /* The cast to a user pointer is valid due to the set_fs() */
67417- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
67418+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
67419 set_fs(old_fs);
67420
67421 return res;
67422@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
67423 goto err;
67424
67425 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
67426- vec[i].iov_base = (void __user *) page_address(page);
67427+ vec[i].iov_base = (void __force_user *) page_address(page);
67428 vec[i].iov_len = this_len;
67429 spd.pages[i] = page;
67430 spd.nr_pages++;
67431@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67432 ops->release(pipe, buf);
67433 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67434 pipe->nrbufs--;
67435- if (pipe->files)
67436+ if (atomic_read(&pipe->files))
67437 sd->need_wakeup = true;
67438 }
67439
67440@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67441 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
67442 {
67443 while (!pipe->nrbufs) {
67444- if (!pipe->writers)
67445+ if (!atomic_read(&pipe->writers))
67446 return 0;
67447
67448- if (!pipe->waiting_writers && sd->num_spliced)
67449+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
67450 return 0;
67451
67452 if (sd->flags & SPLICE_F_NONBLOCK)
67453@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
67454 ops->release(pipe, buf);
67455 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67456 pipe->nrbufs--;
67457- if (pipe->files)
67458+ if (atomic_read(&pipe->files))
67459 sd.need_wakeup = true;
67460 } else {
67461 buf->offset += ret;
67462@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
67463 * out of the pipe right after the splice_to_pipe(). So set
67464 * PIPE_READERS appropriately.
67465 */
67466- pipe->readers = 1;
67467+ atomic_set(&pipe->readers, 1);
67468
67469 current->splice_pipe = pipe;
67470 }
67471@@ -1497,6 +1497,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
67472
67473 partial[buffers].offset = off;
67474 partial[buffers].len = plen;
67475+ partial[buffers].private = 0;
67476
67477 off = 0;
67478 len -= plen;
67479@@ -1733,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67480 ret = -ERESTARTSYS;
67481 break;
67482 }
67483- if (!pipe->writers)
67484+ if (!atomic_read(&pipe->writers))
67485 break;
67486- if (!pipe->waiting_writers) {
67487+ if (!atomic_read(&pipe->waiting_writers)) {
67488 if (flags & SPLICE_F_NONBLOCK) {
67489 ret = -EAGAIN;
67490 break;
67491@@ -1767,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67492 pipe_lock(pipe);
67493
67494 while (pipe->nrbufs >= pipe->buffers) {
67495- if (!pipe->readers) {
67496+ if (!atomic_read(&pipe->readers)) {
67497 send_sig(SIGPIPE, current, 0);
67498 ret = -EPIPE;
67499 break;
67500@@ -1780,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67501 ret = -ERESTARTSYS;
67502 break;
67503 }
67504- pipe->waiting_writers++;
67505+ atomic_inc(&pipe->waiting_writers);
67506 pipe_wait(pipe);
67507- pipe->waiting_writers--;
67508+ atomic_dec(&pipe->waiting_writers);
67509 }
67510
67511 pipe_unlock(pipe);
67512@@ -1818,14 +1819,14 @@ retry:
67513 pipe_double_lock(ipipe, opipe);
67514
67515 do {
67516- if (!opipe->readers) {
67517+ if (!atomic_read(&opipe->readers)) {
67518 send_sig(SIGPIPE, current, 0);
67519 if (!ret)
67520 ret = -EPIPE;
67521 break;
67522 }
67523
67524- if (!ipipe->nrbufs && !ipipe->writers)
67525+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
67526 break;
67527
67528 /*
67529@@ -1922,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67530 pipe_double_lock(ipipe, opipe);
67531
67532 do {
67533- if (!opipe->readers) {
67534+ if (!atomic_read(&opipe->readers)) {
67535 send_sig(SIGPIPE, current, 0);
67536 if (!ret)
67537 ret = -EPIPE;
67538@@ -1967,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67539 * return EAGAIN if we have the potential of some data in the
67540 * future, otherwise just return 0
67541 */
67542- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
67543+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
67544 ret = -EAGAIN;
67545
67546 pipe_unlock(ipipe);
67547diff --git a/fs/stat.c b/fs/stat.c
67548index ae0c3ce..9ee641c 100644
67549--- a/fs/stat.c
67550+++ b/fs/stat.c
67551@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
67552 stat->gid = inode->i_gid;
67553 stat->rdev = inode->i_rdev;
67554 stat->size = i_size_read(inode);
67555- stat->atime = inode->i_atime;
67556- stat->mtime = inode->i_mtime;
67557+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67558+ stat->atime = inode->i_ctime;
67559+ stat->mtime = inode->i_ctime;
67560+ } else {
67561+ stat->atime = inode->i_atime;
67562+ stat->mtime = inode->i_mtime;
67563+ }
67564 stat->ctime = inode->i_ctime;
67565 stat->blksize = (1 << inode->i_blkbits);
67566 stat->blocks = inode->i_blocks;
67567@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
67568 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
67569 {
67570 struct inode *inode = path->dentry->d_inode;
67571+ int retval;
67572
67573- if (inode->i_op->getattr)
67574- return inode->i_op->getattr(path->mnt, path->dentry, stat);
67575+ if (inode->i_op->getattr) {
67576+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
67577+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67578+ stat->atime = stat->ctime;
67579+ stat->mtime = stat->ctime;
67580+ }
67581+ return retval;
67582+ }
67583
67584 generic_fillattr(inode, stat);
67585 return 0;
67586diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
67587index 0b45ff4..edf9d3a 100644
67588--- a/fs/sysfs/dir.c
67589+++ b/fs/sysfs/dir.c
67590@@ -33,6 +33,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
67591 kfree(buf);
67592 }
67593
67594+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67595+extern int grsec_enable_sysfs_restrict;
67596+#endif
67597+
67598 /**
67599 * sysfs_create_dir_ns - create a directory for an object with a namespace tag
67600 * @kobj: object we're creating directory for
67601@@ -41,9 +45,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
67602 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67603 {
67604 struct kernfs_node *parent, *kn;
67605+ const char *name;
67606+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
67607+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67608+ const char *parent_name;
67609+#endif
67610
67611 BUG_ON(!kobj);
67612
67613+ name = kobject_name(kobj);
67614+
67615 if (kobj->parent)
67616 parent = kobj->parent->sd;
67617 else
67618@@ -52,11 +63,24 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67619 if (!parent)
67620 return -ENOENT;
67621
67622- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
67623- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
67624+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67625+ parent_name = parent->name;
67626+ mode = S_IRWXU;
67627+
67628+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
67629+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
67630+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
67631+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
67632+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
67633+ if (!grsec_enable_sysfs_restrict)
67634+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
67635+#endif
67636+
67637+ kn = kernfs_create_dir_ns(parent, name,
67638+ mode, kobj, ns);
67639 if (IS_ERR(kn)) {
67640 if (PTR_ERR(kn) == -EEXIST)
67641- sysfs_warn_dup(parent, kobject_name(kobj));
67642+ sysfs_warn_dup(parent, name);
67643 return PTR_ERR(kn);
67644 }
67645
67646diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
67647index 69d4889..a810bd4 100644
67648--- a/fs/sysv/sysv.h
67649+++ b/fs/sysv/sysv.h
67650@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
67651 #endif
67652 }
67653
67654-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67655+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67656 {
67657 if (sbi->s_bytesex == BYTESEX_PDP)
67658 return PDP_swab((__force __u32)n);
67659diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
67660index fb08b0c..65fcc7e 100644
67661--- a/fs/ubifs/io.c
67662+++ b/fs/ubifs/io.c
67663@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
67664 return err;
67665 }
67666
67667-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67668+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67669 {
67670 int err;
67671
67672diff --git a/fs/udf/misc.c b/fs/udf/misc.c
67673index c175b4d..8f36a16 100644
67674--- a/fs/udf/misc.c
67675+++ b/fs/udf/misc.c
67676@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
67677
67678 u8 udf_tag_checksum(const struct tag *t)
67679 {
67680- u8 *data = (u8 *)t;
67681+ const u8 *data = (const u8 *)t;
67682 u8 checksum = 0;
67683 int i;
67684 for (i = 0; i < sizeof(struct tag); ++i)
67685diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
67686index 8d974c4..b82f6ec 100644
67687--- a/fs/ufs/swab.h
67688+++ b/fs/ufs/swab.h
67689@@ -22,7 +22,7 @@ enum {
67690 BYTESEX_BE
67691 };
67692
67693-static inline u64
67694+static inline u64 __intentional_overflow(-1)
67695 fs64_to_cpu(struct super_block *sbp, __fs64 n)
67696 {
67697 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67698@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
67699 return (__force __fs64)cpu_to_be64(n);
67700 }
67701
67702-static inline u32
67703+static inline u32 __intentional_overflow(-1)
67704 fs32_to_cpu(struct super_block *sbp, __fs32 n)
67705 {
67706 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67707diff --git a/fs/utimes.c b/fs/utimes.c
67708index aa138d6..5f3a811 100644
67709--- a/fs/utimes.c
67710+++ b/fs/utimes.c
67711@@ -1,6 +1,7 @@
67712 #include <linux/compiler.h>
67713 #include <linux/file.h>
67714 #include <linux/fs.h>
67715+#include <linux/security.h>
67716 #include <linux/linkage.h>
67717 #include <linux/mount.h>
67718 #include <linux/namei.h>
67719@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
67720 }
67721 }
67722 retry_deleg:
67723+
67724+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
67725+ error = -EACCES;
67726+ goto mnt_drop_write_and_out;
67727+ }
67728+
67729 mutex_lock(&inode->i_mutex);
67730 error = notify_change(path->dentry, &newattrs, &delegated_inode);
67731 mutex_unlock(&inode->i_mutex);
67732diff --git a/fs/xattr.c b/fs/xattr.c
67733index 4ef6985..a6cd6567 100644
67734--- a/fs/xattr.c
67735+++ b/fs/xattr.c
67736@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
67737 return rc;
67738 }
67739
67740+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
67741+ssize_t
67742+pax_getxattr(struct dentry *dentry, void *value, size_t size)
67743+{
67744+ struct inode *inode = dentry->d_inode;
67745+ ssize_t error;
67746+
67747+ error = inode_permission(inode, MAY_EXEC);
67748+ if (error)
67749+ return error;
67750+
67751+ if (inode->i_op->getxattr)
67752+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
67753+ else
67754+ error = -EOPNOTSUPP;
67755+
67756+ return error;
67757+}
67758+EXPORT_SYMBOL(pax_getxattr);
67759+#endif
67760+
67761 ssize_t
67762 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
67763 {
67764@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
67765 * Extended attribute SET operations
67766 */
67767 static long
67768-setxattr(struct dentry *d, const char __user *name, const void __user *value,
67769+setxattr(struct path *path, const char __user *name, const void __user *value,
67770 size_t size, int flags)
67771 {
67772 int error;
67773@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
67774 posix_acl_fix_xattr_from_user(kvalue, size);
67775 }
67776
67777- error = vfs_setxattr(d, kname, kvalue, size, flags);
67778+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
67779+ error = -EACCES;
67780+ goto out;
67781+ }
67782+
67783+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
67784 out:
67785 if (vvalue)
67786 vfree(vvalue);
67787@@ -376,7 +402,7 @@ retry:
67788 return error;
67789 error = mnt_want_write(path.mnt);
67790 if (!error) {
67791- error = setxattr(path.dentry, name, value, size, flags);
67792+ error = setxattr(&path, name, value, size, flags);
67793 mnt_drop_write(path.mnt);
67794 }
67795 path_put(&path);
67796@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
67797 audit_file(f.file);
67798 error = mnt_want_write_file(f.file);
67799 if (!error) {
67800- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
67801+ error = setxattr(&f.file->f_path, name, value, size, flags);
67802 mnt_drop_write_file(f.file);
67803 }
67804 fdput(f);
67805@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
67806 * Extended attribute REMOVE operations
67807 */
67808 static long
67809-removexattr(struct dentry *d, const char __user *name)
67810+removexattr(struct path *path, const char __user *name)
67811 {
67812 int error;
67813 char kname[XATTR_NAME_MAX + 1];
67814@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
67815 if (error < 0)
67816 return error;
67817
67818- return vfs_removexattr(d, kname);
67819+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
67820+ return -EACCES;
67821+
67822+ return vfs_removexattr(path->dentry, kname);
67823 }
67824
67825 static int path_removexattr(const char __user *pathname,
67826@@ -623,7 +652,7 @@ retry:
67827 return error;
67828 error = mnt_want_write(path.mnt);
67829 if (!error) {
67830- error = removexattr(path.dentry, name);
67831+ error = removexattr(&path, name);
67832 mnt_drop_write(path.mnt);
67833 }
67834 path_put(&path);
67835@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
67836 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
67837 {
67838 struct fd f = fdget(fd);
67839+ struct path *path;
67840 int error = -EBADF;
67841
67842 if (!f.file)
67843 return error;
67844+ path = &f.file->f_path;
67845 audit_file(f.file);
67846 error = mnt_want_write_file(f.file);
67847 if (!error) {
67848- error = removexattr(f.file->f_path.dentry, name);
67849+ error = removexattr(path, name);
67850 mnt_drop_write_file(f.file);
67851 }
67852 fdput(f);
67853diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
67854index 4e20fe7..6d1a55a 100644
67855--- a/fs/xfs/libxfs/xfs_bmap.c
67856+++ b/fs/xfs/libxfs/xfs_bmap.c
67857@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
67858
67859 #else
67860 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
67861-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
67862+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
67863 #endif /* DEBUG */
67864
67865 /*
67866diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
67867index 098cd78..724d3f8 100644
67868--- a/fs/xfs/xfs_dir2_readdir.c
67869+++ b/fs/xfs/xfs_dir2_readdir.c
67870@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
67871 ino = dp->d_ops->sf_get_ino(sfp, sfep);
67872 filetype = dp->d_ops->sf_get_ftype(sfep);
67873 ctx->pos = off & 0x7fffffff;
67874- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
67875+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
67876+ char name[sfep->namelen];
67877+ memcpy(name, sfep->name, sfep->namelen);
67878+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
67879+ return 0;
67880+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
67881 xfs_dir3_get_dtype(dp->i_mount, filetype)))
67882 return 0;
67883 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
67884diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
67885index a183198..6b52f52 100644
67886--- a/fs/xfs/xfs_ioctl.c
67887+++ b/fs/xfs/xfs_ioctl.c
67888@@ -119,7 +119,7 @@ xfs_find_handle(
67889 }
67890
67891 error = -EFAULT;
67892- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
67893+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
67894 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
67895 goto out_put;
67896
67897diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
67898index c31d2c2..6ec8f62 100644
67899--- a/fs/xfs/xfs_linux.h
67900+++ b/fs/xfs/xfs_linux.h
67901@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
67902 * of the compiler which do not like us using do_div in the middle
67903 * of large functions.
67904 */
67905-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
67906+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
67907 {
67908 __u32 mod;
67909
67910@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
67911 return 0;
67912 }
67913 #else
67914-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
67915+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
67916 {
67917 __u32 mod;
67918
67919diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
67920new file mode 100644
67921index 0000000..31f8fe4
67922--- /dev/null
67923+++ b/grsecurity/Kconfig
67924@@ -0,0 +1,1182 @@
67925+#
67926+# grecurity configuration
67927+#
67928+menu "Memory Protections"
67929+depends on GRKERNSEC
67930+
67931+config GRKERNSEC_KMEM
67932+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
67933+ default y if GRKERNSEC_CONFIG_AUTO
67934+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
67935+ help
67936+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
67937+ be written to or read from to modify or leak the contents of the running
67938+ kernel. /dev/port will also not be allowed to be opened, writing to
67939+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
67940+ If you have module support disabled, enabling this will close up several
67941+ ways that are currently used to insert malicious code into the running
67942+ kernel.
67943+
67944+ Even with this feature enabled, we still highly recommend that
67945+ you use the RBAC system, as it is still possible for an attacker to
67946+ modify the running kernel through other more obscure methods.
67947+
67948+ It is highly recommended that you say Y here if you meet all the
67949+ conditions above.
67950+
67951+config GRKERNSEC_VM86
67952+ bool "Restrict VM86 mode"
67953+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
67954+ depends on X86_32
67955+
67956+ help
67957+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
67958+ make use of a special execution mode on 32bit x86 processors called
67959+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
67960+ video cards and will still work with this option enabled. The purpose
67961+ of the option is to prevent exploitation of emulation errors in
67962+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
67963+ Nearly all users should be able to enable this option.
67964+
67965+config GRKERNSEC_IO
67966+ bool "Disable privileged I/O"
67967+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
67968+ depends on X86
67969+ select RTC_CLASS
67970+ select RTC_INTF_DEV
67971+ select RTC_DRV_CMOS
67972+
67973+ help
67974+ If you say Y here, all ioperm and iopl calls will return an error.
67975+ Ioperm and iopl can be used to modify the running kernel.
67976+ Unfortunately, some programs need this access to operate properly,
67977+ the most notable of which are XFree86 and hwclock. hwclock can be
67978+ remedied by having RTC support in the kernel, so real-time
67979+ clock support is enabled if this option is enabled, to ensure
67980+ that hwclock operates correctly. If hwclock still does not work,
67981+ either update udev or symlink /dev/rtc to /dev/rtc0.
67982+
67983+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
67984+ you may not be able to boot into a graphical environment with this
67985+ option enabled. In this case, you should use the RBAC system instead.
67986+
67987+config GRKERNSEC_BPF_HARDEN
67988+ bool "Harden BPF interpreter"
67989+ default y if GRKERNSEC_CONFIG_AUTO
67990+ help
67991+ Unlike previous versions of grsecurity that hardened both the BPF
67992+ interpreted code against corruption at rest as well as the JIT code
67993+ against JIT-spray attacks and attacker-controlled immediate values
67994+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
67995+ and will ensure the interpreted code is read-only at rest. This feature
67996+ may be removed at a later time when eBPF stabilizes to entirely revert
67997+ back to the more secure pre-3.16 BPF interpreter/JIT.
67998+
67999+ If you're using KERNEXEC, it's recommended that you enable this option
68000+ to supplement the hardening of the kernel.
68001+
68002+config GRKERNSEC_PERF_HARDEN
68003+ bool "Disable unprivileged PERF_EVENTS usage by default"
68004+ default y if GRKERNSEC_CONFIG_AUTO
68005+ depends on PERF_EVENTS
68006+ help
68007+ If you say Y here, the range of acceptable values for the
68008+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
68009+ default to a new value: 3. When the sysctl is set to this value, no
68010+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
68011+
68012+ Though PERF_EVENTS can be used legitimately for performance monitoring
68013+ and low-level application profiling, it is forced on regardless of
68014+ configuration, has been at fault for several vulnerabilities, and
68015+ creates new opportunities for side channels and other information leaks.
68016+
68017+ This feature puts PERF_EVENTS into a secure default state and permits
68018+ the administrator to change out of it temporarily if unprivileged
68019+ application profiling is needed.
68020+
68021+config GRKERNSEC_RAND_THREADSTACK
68022+ bool "Insert random gaps between thread stacks"
68023+ default y if GRKERNSEC_CONFIG_AUTO
68024+ depends on PAX_RANDMMAP && !PPC
68025+ help
68026+ If you say Y here, a random-sized gap will be enforced between allocated
68027+ thread stacks. Glibc's NPTL and other threading libraries that
68028+ pass MAP_STACK to the kernel for thread stack allocation are supported.
68029+ The implementation currently provides 8 bits of entropy for the gap.
68030+
68031+ Many distributions do not compile threaded remote services with the
68032+ -fstack-check argument to GCC, causing the variable-sized stack-based
68033+ allocator, alloca(), to not probe the stack on allocation. This
68034+ permits an unbounded alloca() to skip over any guard page and potentially
68035+ modify another thread's stack reliably. An enforced random gap
68036+ reduces the reliability of such an attack and increases the chance
68037+ that such a read/write to another thread's stack instead lands in
68038+ an unmapped area, causing a crash and triggering grsecurity's
68039+ anti-bruteforcing logic.
68040+
68041+config GRKERNSEC_PROC_MEMMAP
68042+ bool "Harden ASLR against information leaks and entropy reduction"
68043+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
68044+ depends on PAX_NOEXEC || PAX_ASLR
68045+ help
68046+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
68047+ give no information about the addresses of its mappings if
68048+ PaX features that rely on random addresses are enabled on the task.
68049+ In addition to sanitizing this information and disabling other
68050+ dangerous sources of information, this option causes reads of sensitive
68051+ /proc/<pid> entries where the file descriptor was opened in a different
68052+ task than the one performing the read. Such attempts are logged.
68053+ This option also limits argv/env strings for suid/sgid binaries
68054+ to 512KB to prevent a complete exhaustion of the stack entropy provided
68055+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
68056+ binaries to prevent alternative mmap layouts from being abused.
68057+
68058+ If you use PaX it is essential that you say Y here as it closes up
68059+ several holes that make full ASLR useless locally.
68060+
68061+
68062+config GRKERNSEC_KSTACKOVERFLOW
68063+ bool "Prevent kernel stack overflows"
68064+ default y if GRKERNSEC_CONFIG_AUTO
68065+ depends on !IA64 && 64BIT
68066+ help
68067+ If you say Y here, the kernel's process stacks will be allocated
68068+ with vmalloc instead of the kernel's default allocator. This
68069+ introduces guard pages that in combination with the alloca checking
68070+ of the STACKLEAK feature prevents all forms of kernel process stack
68071+ overflow abuse. Note that this is different from kernel stack
68072+ buffer overflows.
68073+
68074+config GRKERNSEC_BRUTE
68075+ bool "Deter exploit bruteforcing"
68076+ default y if GRKERNSEC_CONFIG_AUTO
68077+ help
68078+ If you say Y here, attempts to bruteforce exploits against forking
68079+ daemons such as apache or sshd, as well as against suid/sgid binaries
68080+ will be deterred. When a child of a forking daemon is killed by PaX
68081+ or crashes due to an illegal instruction or other suspicious signal,
68082+ the parent process will be delayed 30 seconds upon every subsequent
68083+ fork until the administrator is able to assess the situation and
68084+ restart the daemon.
68085+ In the suid/sgid case, the attempt is logged, the user has all their
68086+ existing instances of the suid/sgid binary terminated and will
68087+ be unable to execute any suid/sgid binaries for 15 minutes.
68088+
68089+ It is recommended that you also enable signal logging in the auditing
68090+ section so that logs are generated when a process triggers a suspicious
68091+ signal.
68092+ If the sysctl option is enabled, a sysctl option with name
68093+ "deter_bruteforce" is created.
68094+
68095+config GRKERNSEC_MODHARDEN
68096+ bool "Harden module auto-loading"
68097+ default y if GRKERNSEC_CONFIG_AUTO
68098+ depends on MODULES
68099+ help
68100+ If you say Y here, module auto-loading in response to use of some
68101+ feature implemented by an unloaded module will be restricted to
68102+ root users. Enabling this option helps defend against attacks
68103+ by unprivileged users who abuse the auto-loading behavior to
68104+ cause a vulnerable module to load that is then exploited.
68105+
68106+ If this option prevents a legitimate use of auto-loading for a
68107+ non-root user, the administrator can execute modprobe manually
68108+ with the exact name of the module mentioned in the alert log.
68109+ Alternatively, the administrator can add the module to the list
68110+ of modules loaded at boot by modifying init scripts.
68111+
68112+ Modification of init scripts will most likely be needed on
68113+ Ubuntu servers with encrypted home directory support enabled,
68114+ as the first non-root user logging in will cause the ecb(aes),
68115+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
68116+
68117+config GRKERNSEC_HIDESYM
68118+ bool "Hide kernel symbols"
68119+ default y if GRKERNSEC_CONFIG_AUTO
68120+ select PAX_USERCOPY_SLABS
68121+ help
68122+ If you say Y here, getting information on loaded modules, and
68123+ displaying all kernel symbols through a syscall will be restricted
68124+ to users with CAP_SYS_MODULE. For software compatibility reasons,
68125+ /proc/kallsyms will be restricted to the root user. The RBAC
68126+ system can hide that entry even from root.
68127+
68128+ This option also prevents leaking of kernel addresses through
68129+ several /proc entries.
68130+
68131+ Note that this option is only effective provided the following
68132+ conditions are met:
68133+ 1) The kernel using grsecurity is not precompiled by some distribution
68134+ 2) You have also enabled GRKERNSEC_DMESG
68135+ 3) You are using the RBAC system and hiding other files such as your
68136+ kernel image and System.map. Alternatively, enabling this option
68137+ causes the permissions on /boot, /lib/modules, and the kernel
68138+ source directory to change at compile time to prevent
68139+ reading by non-root users.
68140+ If the above conditions are met, this option will aid in providing a
68141+ useful protection against local kernel exploitation of overflows
68142+ and arbitrary read/write vulnerabilities.
68143+
68144+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
68145+ in addition to this feature.
68146+
68147+config GRKERNSEC_RANDSTRUCT
68148+ bool "Randomize layout of sensitive kernel structures"
68149+ default y if GRKERNSEC_CONFIG_AUTO
68150+ select GRKERNSEC_HIDESYM
68151+ select MODVERSIONS if MODULES
68152+ help
68153+ If you say Y here, the layouts of a number of sensitive kernel
68154+ structures (task, fs, cred, etc) and all structures composed entirely
68155+ of function pointers (aka "ops" structs) will be randomized at compile-time.
68156+ This can introduce the requirement of an additional infoleak
68157+ vulnerability for exploits targeting these structure types.
68158+
68159+ Enabling this feature will introduce some performance impact, slightly
68160+ increase memory usage, and prevent the use of forensic tools like
68161+ Volatility against the system (unless the kernel source tree isn't
68162+ cleaned after kernel installation).
68163+
68164+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
68165+ It remains after a make clean to allow for external modules to be compiled
68166+ with the existing seed and will be removed by a make mrproper or
68167+ make distclean.
68168+
68169+ Note that the implementation requires gcc 4.6.4. or newer. You may need
68170+ to install the supporting headers explicitly in addition to the normal
68171+ gcc package.
68172+
68173+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
68174+ bool "Use cacheline-aware structure randomization"
68175+ depends on GRKERNSEC_RANDSTRUCT
68176+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
68177+ help
68178+ If you say Y here, the RANDSTRUCT randomization will make a best effort
68179+ at restricting randomization to cacheline-sized groups of elements. It
68180+ will further not randomize bitfields in structures. This reduces the
68181+ performance hit of RANDSTRUCT at the cost of weakened randomization.
68182+
68183+config GRKERNSEC_KERN_LOCKOUT
68184+ bool "Active kernel exploit response"
68185+ default y if GRKERNSEC_CONFIG_AUTO
68186+ depends on X86 || ARM || PPC || SPARC
68187+ help
68188+ If you say Y here, when a PaX alert is triggered due to suspicious
68189+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
68190+ or an OOPS occurs due to bad memory accesses, instead of just
68191+ terminating the offending process (and potentially allowing
68192+ a subsequent exploit from the same user), we will take one of two
68193+ actions:
68194+ If the user was root, we will panic the system
68195+ If the user was non-root, we will log the attempt, terminate
68196+ all processes owned by the user, then prevent them from creating
68197+ any new processes until the system is restarted
68198+ This deters repeated kernel exploitation/bruteforcing attempts
68199+ and is useful for later forensics.
68200+
68201+config GRKERNSEC_OLD_ARM_USERLAND
68202+ bool "Old ARM userland compatibility"
68203+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
68204+ help
68205+ If you say Y here, stubs of executable code to perform such operations
68206+ as "compare-exchange" will be placed at fixed locations in the ARM vector
68207+ table. This is unfortunately needed for old ARM userland meant to run
68208+ across a wide range of processors. Without this option enabled,
68209+ the get_tls and data memory barrier stubs will be emulated by the kernel,
68210+ which is enough for Linaro userlands or other userlands designed for v6
68211+ and newer ARM CPUs. It's recommended that you try without this option enabled
68212+ first, and only enable it if your userland does not boot (it will likely fail
68213+ at init time).
68214+
68215+endmenu
68216+menu "Role Based Access Control Options"
68217+depends on GRKERNSEC
68218+
68219+config GRKERNSEC_RBAC_DEBUG
68220+ bool
68221+
68222+config GRKERNSEC_NO_RBAC
68223+ bool "Disable RBAC system"
68224+ help
68225+ If you say Y here, the /dev/grsec device will be removed from the kernel,
68226+ preventing the RBAC system from being enabled. You should only say Y
68227+ here if you have no intention of using the RBAC system, so as to prevent
68228+ an attacker with root access from misusing the RBAC system to hide files
68229+ and processes when loadable module support and /dev/[k]mem have been
68230+ locked down.
68231+
68232+config GRKERNSEC_ACL_HIDEKERN
68233+ bool "Hide kernel processes"
68234+ help
68235+ If you say Y here, all kernel threads will be hidden to all
68236+ processes but those whose subject has the "view hidden processes"
68237+ flag.
68238+
68239+config GRKERNSEC_ACL_MAXTRIES
68240+ int "Maximum tries before password lockout"
68241+ default 3
68242+ help
68243+ This option enforces the maximum number of times a user can attempt
68244+ to authorize themselves with the grsecurity RBAC system before being
68245+ denied the ability to attempt authorization again for a specified time.
68246+ The lower the number, the harder it will be to brute-force a password.
68247+
68248+config GRKERNSEC_ACL_TIMEOUT
68249+ int "Time to wait after max password tries, in seconds"
68250+ default 30
68251+ help
68252+ This option specifies the time the user must wait after attempting to
68253+ authorize to the RBAC system with the maximum number of invalid
68254+ passwords. The higher the number, the harder it will be to brute-force
68255+ a password.
68256+
68257+endmenu
68258+menu "Filesystem Protections"
68259+depends on GRKERNSEC
68260+
68261+config GRKERNSEC_PROC
68262+ bool "Proc restrictions"
68263+ default y if GRKERNSEC_CONFIG_AUTO
68264+ help
68265+ If you say Y here, the permissions of the /proc filesystem
68266+ will be altered to enhance system security and privacy. You MUST
68267+ choose either a user only restriction or a user and group restriction.
68268+ Depending upon the option you choose, you can either restrict users to
68269+ see only the processes they themselves run, or choose a group that can
68270+ view all processes and files normally restricted to root if you choose
68271+ the "restrict to user only" option. NOTE: If you're running identd or
68272+ ntpd as a non-root user, you will have to run it as the group you
68273+ specify here.
68274+
68275+config GRKERNSEC_PROC_USER
68276+ bool "Restrict /proc to user only"
68277+ depends on GRKERNSEC_PROC
68278+ help
68279+ If you say Y here, non-root users will only be able to view their own
68280+ processes, and restricts them from viewing network-related information,
68281+ and viewing kernel symbol and module information.
68282+
68283+config GRKERNSEC_PROC_USERGROUP
68284+ bool "Allow special group"
68285+ default y if GRKERNSEC_CONFIG_AUTO
68286+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
68287+ help
68288+ If you say Y here, you will be able to select a group that will be
68289+ able to view all processes and network-related information. If you've
68290+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
68291+ remain hidden. This option is useful if you want to run identd as
68292+ a non-root user. The group you select may also be chosen at boot time
68293+ via "grsec_proc_gid=" on the kernel commandline.
68294+
68295+config GRKERNSEC_PROC_GID
68296+ int "GID for special group"
68297+ depends on GRKERNSEC_PROC_USERGROUP
68298+ default 1001
68299+
68300+config GRKERNSEC_PROC_ADD
68301+ bool "Additional restrictions"
68302+ default y if GRKERNSEC_CONFIG_AUTO
68303+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
68304+ help
68305+ If you say Y here, additional restrictions will be placed on
68306+ /proc that keep normal users from viewing device information and
68307+ slabinfo information that could be useful for exploits.
68308+
68309+config GRKERNSEC_LINK
68310+ bool "Linking restrictions"
68311+ default y if GRKERNSEC_CONFIG_AUTO
68312+ help
68313+ If you say Y here, /tmp race exploits will be prevented, since users
68314+ will no longer be able to follow symlinks owned by other users in
68315+ world-writable +t directories (e.g. /tmp), unless the owner of the
68316+ symlink is the owner of the directory. users will also not be
68317+ able to hardlink to files they do not own. If the sysctl option is
68318+ enabled, a sysctl option with name "linking_restrictions" is created.
68319+
68320+config GRKERNSEC_SYMLINKOWN
68321+ bool "Kernel-enforced SymlinksIfOwnerMatch"
68322+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68323+ help
68324+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
68325+ that prevents it from being used as a security feature. As Apache
68326+ verifies the symlink by performing a stat() against the target of
68327+ the symlink before it is followed, an attacker can setup a symlink
68328+ to point to a same-owned file, then replace the symlink with one
68329+ that targets another user's file just after Apache "validates" the
68330+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
68331+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
68332+ will be in place for the group you specify. If the sysctl option
68333+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
68334+ created.
68335+
68336+config GRKERNSEC_SYMLINKOWN_GID
68337+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
68338+ depends on GRKERNSEC_SYMLINKOWN
68339+ default 1006
68340+ help
68341+ Setting this GID determines what group kernel-enforced
68342+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
68343+ is enabled, a sysctl option with name "symlinkown_gid" is created.
68344+
68345+config GRKERNSEC_FIFO
68346+ bool "FIFO restrictions"
68347+ default y if GRKERNSEC_CONFIG_AUTO
68348+ help
68349+ If you say Y here, users will not be able to write to FIFOs they don't
68350+ own in world-writable +t directories (e.g. /tmp), unless the owner of
68351+ the FIFO is the same owner of the directory it's held in. If the sysctl
68352+ option is enabled, a sysctl option with name "fifo_restrictions" is
68353+ created.
68354+
68355+config GRKERNSEC_SYSFS_RESTRICT
68356+ bool "Sysfs/debugfs restriction"
68357+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68358+ depends on SYSFS
68359+ help
68360+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
68361+ any filesystem normally mounted under it (e.g. debugfs) will be
68362+ mostly accessible only by root. These filesystems generally provide access
68363+ to hardware and debug information that isn't appropriate for unprivileged
68364+ users of the system. Sysfs and debugfs have also become a large source
68365+ of new vulnerabilities, ranging from infoleaks to local compromise.
68366+ There has been very little oversight with an eye toward security involved
68367+ in adding new exporters of information to these filesystems, so their
68368+ use is discouraged.
68369+ For reasons of compatibility, a few directories have been whitelisted
68370+ for access by non-root users:
68371+ /sys/fs/selinux
68372+ /sys/fs/fuse
68373+ /sys/devices/system/cpu
68374+
68375+config GRKERNSEC_ROFS
68376+ bool "Runtime read-only mount protection"
68377+ depends on SYSCTL
68378+ help
68379+ If you say Y here, a sysctl option with name "romount_protect" will
68380+ be created. By setting this option to 1 at runtime, filesystems
68381+ will be protected in the following ways:
68382+ * No new writable mounts will be allowed
68383+ * Existing read-only mounts won't be able to be remounted read/write
68384+ * Write operations will be denied on all block devices
68385+ This option acts independently of grsec_lock: once it is set to 1,
68386+ it cannot be turned off. Therefore, please be mindful of the resulting
68387+ behavior if this option is enabled in an init script on a read-only
68388+ filesystem.
68389+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
68390+ and GRKERNSEC_IO should be enabled and module loading disabled via
68391+ config or at runtime.
68392+ This feature is mainly intended for secure embedded systems.
68393+
68394+
68395+config GRKERNSEC_DEVICE_SIDECHANNEL
68396+ bool "Eliminate stat/notify-based device sidechannels"
68397+ default y if GRKERNSEC_CONFIG_AUTO
68398+ help
68399+ If you say Y here, timing analyses on block or character
68400+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
68401+ will be thwarted for unprivileged users. If a process without
68402+ CAP_MKNOD stats such a device, the last access and last modify times
68403+ will match the device's create time. No access or modify events
68404+ will be triggered through inotify/dnotify/fanotify for such devices.
68405+ This feature will prevent attacks that may at a minimum
68406+ allow an attacker to determine the administrator's password length.
68407+
68408+config GRKERNSEC_CHROOT
68409+ bool "Chroot jail restrictions"
68410+ default y if GRKERNSEC_CONFIG_AUTO
68411+ help
68412+ If you say Y here, you will be able to choose several options that will
68413+ make breaking out of a chrooted jail much more difficult. If you
68414+ encounter no software incompatibilities with the following options, it
68415+ is recommended that you enable each one.
68416+
68417+ Note that the chroot restrictions are not intended to apply to "chroots"
68418+ to directories that are simple bind mounts of the global root filesystem.
68419+ For several other reasons, a user shouldn't expect any significant
68420+ security by performing such a chroot.
68421+
68422+config GRKERNSEC_CHROOT_MOUNT
68423+ bool "Deny mounts"
68424+ default y if GRKERNSEC_CONFIG_AUTO
68425+ depends on GRKERNSEC_CHROOT
68426+ help
68427+ If you say Y here, processes inside a chroot will not be able to
68428+ mount or remount filesystems. If the sysctl option is enabled, a
68429+ sysctl option with name "chroot_deny_mount" is created.
68430+
68431+config GRKERNSEC_CHROOT_DOUBLE
68432+ bool "Deny double-chroots"
68433+ default y if GRKERNSEC_CONFIG_AUTO
68434+ depends on GRKERNSEC_CHROOT
68435+ help
68436+ If you say Y here, processes inside a chroot will not be able to chroot
68437+ again outside the chroot. This is a widely used method of breaking
68438+ out of a chroot jail and should not be allowed. If the sysctl
68439+ option is enabled, a sysctl option with name
68440+ "chroot_deny_chroot" is created.
68441+
68442+config GRKERNSEC_CHROOT_PIVOT
68443+ bool "Deny pivot_root in chroot"
68444+ default y if GRKERNSEC_CONFIG_AUTO
68445+ depends on GRKERNSEC_CHROOT
68446+ help
68447+ If you say Y here, processes inside a chroot will not be able to use
68448+ a function called pivot_root() that was introduced in Linux 2.3.41. It
68449+ works similar to chroot in that it changes the root filesystem. This
68450+ function could be misused in a chrooted process to attempt to break out
68451+ of the chroot, and therefore should not be allowed. If the sysctl
68452+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
68453+ created.
68454+
68455+config GRKERNSEC_CHROOT_CHDIR
68456+ bool "Enforce chdir(\"/\") on all chroots"
68457+ default y if GRKERNSEC_CONFIG_AUTO
68458+ depends on GRKERNSEC_CHROOT
68459+ help
68460+ If you say Y here, the current working directory of all newly-chrooted
68461+ applications will be set to the the root directory of the chroot.
68462+ The man page on chroot(2) states:
68463+ Note that this call does not change the current working
68464+ directory, so that `.' can be outside the tree rooted at
68465+ `/'. In particular, the super-user can escape from a
68466+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
68467+
68468+ It is recommended that you say Y here, since it's not known to break
68469+ any software. If the sysctl option is enabled, a sysctl option with
68470+ name "chroot_enforce_chdir" is created.
68471+
68472+config GRKERNSEC_CHROOT_CHMOD
68473+ bool "Deny (f)chmod +s"
68474+ default y if GRKERNSEC_CONFIG_AUTO
68475+ depends on GRKERNSEC_CHROOT
68476+ help
68477+ If you say Y here, processes inside a chroot will not be able to chmod
68478+ or fchmod files to make them have suid or sgid bits. This protects
68479+ against another published method of breaking a chroot. If the sysctl
68480+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
68481+ created.
68482+
68483+config GRKERNSEC_CHROOT_FCHDIR
68484+ bool "Deny fchdir and fhandle out of chroot"
68485+ default y if GRKERNSEC_CONFIG_AUTO
68486+ depends on GRKERNSEC_CHROOT
68487+ help
68488+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
68489+ to a file descriptor of the chrooting process that points to a directory
68490+ outside the filesystem will be stopped. Additionally, this option prevents
68491+ use of the recently-created syscall for opening files by a guessable "file
68492+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
68493+ with name "chroot_deny_fchdir" is created.
68494+
68495+config GRKERNSEC_CHROOT_MKNOD
68496+ bool "Deny mknod"
68497+ default y if GRKERNSEC_CONFIG_AUTO
68498+ depends on GRKERNSEC_CHROOT
68499+ help
68500+ If you say Y here, processes inside a chroot will not be allowed to
68501+ mknod. The problem with using mknod inside a chroot is that it
68502+ would allow an attacker to create a device entry that is the same
68503+ as one on the physical root of your system, which could range from
68504+ anything from the console device to a device for your harddrive (which
68505+ they could then use to wipe the drive or steal data). It is recommended
68506+ that you say Y here, unless you run into software incompatibilities.
68507+ If the sysctl option is enabled, a sysctl option with name
68508+ "chroot_deny_mknod" is created.
68509+
68510+config GRKERNSEC_CHROOT_SHMAT
68511+ bool "Deny shmat() out of chroot"
68512+ default y if GRKERNSEC_CONFIG_AUTO
68513+ depends on GRKERNSEC_CHROOT
68514+ help
68515+ If you say Y here, processes inside a chroot will not be able to attach
68516+ to shared memory segments that were created outside of the chroot jail.
68517+ It is recommended that you say Y here. If the sysctl option is enabled,
68518+ a sysctl option with name "chroot_deny_shmat" is created.
68519+
68520+config GRKERNSEC_CHROOT_UNIX
68521+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
68522+ default y if GRKERNSEC_CONFIG_AUTO
68523+ depends on GRKERNSEC_CHROOT
68524+ help
68525+ If you say Y here, processes inside a chroot will not be able to
68526+ connect to abstract (meaning not belonging to a filesystem) Unix
68527+ domain sockets that were bound outside of a chroot. It is recommended
68528+ that you say Y here. If the sysctl option is enabled, a sysctl option
68529+ with name "chroot_deny_unix" is created.
68530+
68531+config GRKERNSEC_CHROOT_FINDTASK
68532+ bool "Protect outside processes"
68533+ default y if GRKERNSEC_CONFIG_AUTO
68534+ depends on GRKERNSEC_CHROOT
68535+ help
68536+ If you say Y here, processes inside a chroot will not be able to
68537+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
68538+ getsid, or view any process outside of the chroot. If the sysctl
68539+ option is enabled, a sysctl option with name "chroot_findtask" is
68540+ created.
68541+
68542+config GRKERNSEC_CHROOT_NICE
68543+ bool "Restrict priority changes"
68544+ default y if GRKERNSEC_CONFIG_AUTO
68545+ depends on GRKERNSEC_CHROOT
68546+ help
68547+ If you say Y here, processes inside a chroot will not be able to raise
68548+ the priority of processes in the chroot, or alter the priority of
68549+ processes outside the chroot. This provides more security than simply
68550+ removing CAP_SYS_NICE from the process' capability set. If the
68551+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
68552+ is created.
68553+
68554+config GRKERNSEC_CHROOT_SYSCTL
68555+ bool "Deny sysctl writes"
68556+ default y if GRKERNSEC_CONFIG_AUTO
68557+ depends on GRKERNSEC_CHROOT
68558+ help
68559+ If you say Y here, an attacker in a chroot will not be able to
68560+ write to sysctl entries, either by sysctl(2) or through a /proc
68561+ interface. It is strongly recommended that you say Y here. If the
68562+ sysctl option is enabled, a sysctl option with name
68563+ "chroot_deny_sysctl" is created.
68564+
68565+config GRKERNSEC_CHROOT_RENAME
68566+ bool "Deny bad renames"
68567+ default y if GRKERNSEC_CONFIG_AUTO
68568+ depends on GRKERNSEC_CHROOT
68569+ help
68570+ If you say Y here, an attacker in a chroot will not be able to
68571+ abuse the ability to create double chroots to break out of the
68572+ chroot by exploiting a race condition between a rename of a directory
68573+ within a chroot against an open of a symlink with relative path
68574+ components. This feature will likewise prevent an accomplice outside
68575+ a chroot from enabling a user inside the chroot to break out and make
68576+ use of their credentials on the global filesystem. Enabling this
68577+ feature is essential to prevent root users from breaking out of a
68578+ chroot. If the sysctl option is enabled, a sysctl option with name
68579+ "chroot_deny_bad_rename" is created.
68580+
68581+config GRKERNSEC_CHROOT_CAPS
68582+ bool "Capability restrictions"
68583+ default y if GRKERNSEC_CONFIG_AUTO
68584+ depends on GRKERNSEC_CHROOT
68585+ help
68586+ If you say Y here, the capabilities on all processes within a
68587+ chroot jail will be lowered to stop module insertion, raw i/o,
68588+ system and net admin tasks, rebooting the system, modifying immutable
68589+ files, modifying IPC owned by another, and changing the system time.
68590+ This is left an option because it can break some apps. Disable this
68591+ if your chrooted apps are having problems performing those kinds of
68592+ tasks. If the sysctl option is enabled, a sysctl option with
68593+ name "chroot_caps" is created.
68594+
68595+config GRKERNSEC_CHROOT_INITRD
68596+ bool "Exempt initrd tasks from restrictions"
68597+ default y if GRKERNSEC_CONFIG_AUTO
68598+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
68599+ help
68600+ If you say Y here, tasks started prior to init will be exempted from
68601+ grsecurity's chroot restrictions. This option is mainly meant to
68602+ resolve Plymouth's performing privileged operations unnecessarily
68603+ in a chroot.
68604+
68605+endmenu
68606+menu "Kernel Auditing"
68607+depends on GRKERNSEC
68608+
68609+config GRKERNSEC_AUDIT_GROUP
68610+ bool "Single group for auditing"
68611+ help
68612+ If you say Y here, the exec and chdir logging features will only operate
68613+ on a group you specify. This option is recommended if you only want to
68614+ watch certain users instead of having a large amount of logs from the
68615+ entire system. If the sysctl option is enabled, a sysctl option with
68616+ name "audit_group" is created.
68617+
68618+config GRKERNSEC_AUDIT_GID
68619+ int "GID for auditing"
68620+ depends on GRKERNSEC_AUDIT_GROUP
68621+ default 1007
68622+
68623+config GRKERNSEC_EXECLOG
68624+ bool "Exec logging"
68625+ help
68626+ If you say Y here, all execve() calls will be logged (since the
68627+ other exec*() calls are frontends to execve(), all execution
68628+ will be logged). Useful for shell-servers that like to keep track
68629+ of their users. If the sysctl option is enabled, a sysctl option with
68630+ name "exec_logging" is created.
68631+ WARNING: This option when enabled will produce a LOT of logs, especially
68632+ on an active system.
68633+
68634+config GRKERNSEC_RESLOG
68635+ bool "Resource logging"
68636+ default y if GRKERNSEC_CONFIG_AUTO
68637+ help
68638+ If you say Y here, all attempts to overstep resource limits will
68639+ be logged with the resource name, the requested size, and the current
68640+ limit. It is highly recommended that you say Y here. If the sysctl
68641+ option is enabled, a sysctl option with name "resource_logging" is
68642+ created. If the RBAC system is enabled, the sysctl value is ignored.
68643+
68644+config GRKERNSEC_CHROOT_EXECLOG
68645+ bool "Log execs within chroot"
68646+ help
68647+ If you say Y here, all executions inside a chroot jail will be logged
68648+ to syslog. This can cause a large amount of logs if certain
68649+ applications (eg. djb's daemontools) are installed on the system, and
68650+ is therefore left as an option. If the sysctl option is enabled, a
68651+ sysctl option with name "chroot_execlog" is created.
68652+
68653+config GRKERNSEC_AUDIT_PTRACE
68654+ bool "Ptrace logging"
68655+ help
68656+ If you say Y here, all attempts to attach to a process via ptrace
68657+ will be logged. If the sysctl option is enabled, a sysctl option
68658+ with name "audit_ptrace" is created.
68659+
68660+config GRKERNSEC_AUDIT_CHDIR
68661+ bool "Chdir logging"
68662+ help
68663+ If you say Y here, all chdir() calls will be logged. If the sysctl
68664+ option is enabled, a sysctl option with name "audit_chdir" is created.
68665+
68666+config GRKERNSEC_AUDIT_MOUNT
68667+ bool "(Un)Mount logging"
68668+ help
68669+ If you say Y here, all mounts and unmounts will be logged. If the
68670+ sysctl option is enabled, a sysctl option with name "audit_mount" is
68671+ created.
68672+
68673+config GRKERNSEC_SIGNAL
68674+ bool "Signal logging"
68675+ default y if GRKERNSEC_CONFIG_AUTO
68676+ help
68677+ If you say Y here, certain important signals will be logged, such as
68678+ SIGSEGV, which will as a result inform you of when a error in a program
68679+ occurred, which in some cases could mean a possible exploit attempt.
68680+ If the sysctl option is enabled, a sysctl option with name
68681+ "signal_logging" is created.
68682+
68683+config GRKERNSEC_FORKFAIL
68684+ bool "Fork failure logging"
68685+ help
68686+ If you say Y here, all failed fork() attempts will be logged.
68687+ This could suggest a fork bomb, or someone attempting to overstep
68688+ their process limit. If the sysctl option is enabled, a sysctl option
68689+ with name "forkfail_logging" is created.
68690+
68691+config GRKERNSEC_TIME
68692+ bool "Time change logging"
68693+ default y if GRKERNSEC_CONFIG_AUTO
68694+ help
68695+ If you say Y here, any changes of the system clock will be logged.
68696+ If the sysctl option is enabled, a sysctl option with name
68697+ "timechange_logging" is created.
68698+
68699+config GRKERNSEC_PROC_IPADDR
68700+ bool "/proc/<pid>/ipaddr support"
68701+ default y if GRKERNSEC_CONFIG_AUTO
68702+ help
68703+ If you say Y here, a new entry will be added to each /proc/<pid>
68704+ directory that contains the IP address of the person using the task.
68705+ The IP is carried across local TCP and AF_UNIX stream sockets.
68706+ This information can be useful for IDS/IPSes to perform remote response
68707+ to a local attack. The entry is readable by only the owner of the
68708+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
68709+ the RBAC system), and thus does not create privacy concerns.
68710+
68711+config GRKERNSEC_RWXMAP_LOG
68712+ bool 'Denied RWX mmap/mprotect logging'
68713+ default y if GRKERNSEC_CONFIG_AUTO
68714+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
68715+ help
68716+ If you say Y here, calls to mmap() and mprotect() with explicit
68717+ usage of PROT_WRITE and PROT_EXEC together will be logged when
68718+ denied by the PAX_MPROTECT feature. This feature will also
68719+ log other problematic scenarios that can occur when PAX_MPROTECT
68720+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
68721+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
68722+ is created.
68723+
68724+endmenu
68725+
68726+menu "Executable Protections"
68727+depends on GRKERNSEC
68728+
68729+config GRKERNSEC_DMESG
68730+ bool "Dmesg(8) restriction"
68731+ default y if GRKERNSEC_CONFIG_AUTO
68732+ help
68733+ If you say Y here, non-root users will not be able to use dmesg(8)
68734+ to view the contents of the kernel's circular log buffer.
68735+ The kernel's log buffer often contains kernel addresses and other
68736+ identifying information useful to an attacker in fingerprinting a
68737+ system for a targeted exploit.
68738+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
68739+ created.
68740+
68741+config GRKERNSEC_HARDEN_PTRACE
68742+ bool "Deter ptrace-based process snooping"
68743+ default y if GRKERNSEC_CONFIG_AUTO
68744+ help
68745+ If you say Y here, TTY sniffers and other malicious monitoring
68746+ programs implemented through ptrace will be defeated. If you
68747+ have been using the RBAC system, this option has already been
68748+ enabled for several years for all users, with the ability to make
68749+ fine-grained exceptions.
68750+
68751+ This option only affects the ability of non-root users to ptrace
68752+ processes that are not a descendent of the ptracing process.
68753+ This means that strace ./binary and gdb ./binary will still work,
68754+ but attaching to arbitrary processes will not. If the sysctl
68755+ option is enabled, a sysctl option with name "harden_ptrace" is
68756+ created.
68757+
68758+config GRKERNSEC_PTRACE_READEXEC
68759+ bool "Require read access to ptrace sensitive binaries"
68760+ default y if GRKERNSEC_CONFIG_AUTO
68761+ help
68762+ If you say Y here, unprivileged users will not be able to ptrace unreadable
68763+ binaries. This option is useful in environments that
68764+ remove the read bits (e.g. file mode 4711) from suid binaries to
68765+ prevent infoleaking of their contents. This option adds
68766+ consistency to the use of that file mode, as the binary could normally
68767+ be read out when run without privileges while ptracing.
68768+
68769+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
68770+ is created.
68771+
68772+config GRKERNSEC_SETXID
68773+ bool "Enforce consistent multithreaded privileges"
68774+ default y if GRKERNSEC_CONFIG_AUTO
68775+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
68776+ help
68777+ If you say Y here, a change from a root uid to a non-root uid
68778+ in a multithreaded application will cause the resulting uids,
68779+ gids, supplementary groups, and capabilities in that thread
68780+ to be propagated to the other threads of the process. In most
68781+ cases this is unnecessary, as glibc will emulate this behavior
68782+ on behalf of the application. Other libcs do not act in the
68783+ same way, allowing the other threads of the process to continue
68784+ running with root privileges. If the sysctl option is enabled,
68785+ a sysctl option with name "consistent_setxid" is created.
68786+
68787+config GRKERNSEC_HARDEN_IPC
68788+ bool "Disallow access to overly-permissive IPC objects"
68789+ default y if GRKERNSEC_CONFIG_AUTO
68790+ depends on SYSVIPC
68791+ help
68792+ If you say Y here, access to overly-permissive IPC objects (shared
68793+ memory, message queues, and semaphores) will be denied for processes
68794+ given the following criteria beyond normal permission checks:
68795+ 1) If the IPC object is world-accessible and the euid doesn't match
68796+ that of the creator or current uid for the IPC object
68797+ 2) If the IPC object is group-accessible and the egid doesn't
68798+ match that of the creator or current gid for the IPC object
68799+ It's a common error to grant too much permission to these objects,
68800+ with impact ranging from denial of service and information leaking to
68801+ privilege escalation. This feature was developed in response to
68802+ research by Tim Brown:
68803+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
68804+ who found hundreds of such insecure usages. Processes with
68805+ CAP_IPC_OWNER are still permitted to access these IPC objects.
68806+ If the sysctl option is enabled, a sysctl option with name
68807+ "harden_ipc" is created.
68808+
68809+config GRKERNSEC_TPE
68810+ bool "Trusted Path Execution (TPE)"
68811+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68812+ help
68813+ If you say Y here, you will be able to choose a gid to add to the
68814+ supplementary groups of users you want to mark as "untrusted."
68815+ These users will not be able to execute any files that are not in
68816+ root-owned directories writable only by root. If the sysctl option
68817+ is enabled, a sysctl option with name "tpe" is created.
68818+
68819+config GRKERNSEC_TPE_ALL
68820+ bool "Partially restrict all non-root users"
68821+ depends on GRKERNSEC_TPE
68822+ help
68823+ If you say Y here, all non-root users will be covered under
68824+ a weaker TPE restriction. This is separate from, and in addition to,
68825+ the main TPE options that you have selected elsewhere. Thus, if a
68826+ "trusted" GID is chosen, this restriction applies to even that GID.
68827+ Under this restriction, all non-root users will only be allowed to
68828+ execute files in directories they own that are not group or
68829+ world-writable, or in directories owned by root and writable only by
68830+ root. If the sysctl option is enabled, a sysctl option with name
68831+ "tpe_restrict_all" is created.
68832+
68833+config GRKERNSEC_TPE_INVERT
68834+ bool "Invert GID option"
68835+ depends on GRKERNSEC_TPE
68836+ help
68837+ If you say Y here, the group you specify in the TPE configuration will
68838+ decide what group TPE restrictions will be *disabled* for. This
68839+ option is useful if you want TPE restrictions to be applied to most
68840+ users on the system. If the sysctl option is enabled, a sysctl option
68841+ with name "tpe_invert" is created. Unlike other sysctl options, this
68842+ entry will default to on for backward-compatibility.
68843+
68844+config GRKERNSEC_TPE_GID
68845+ int
68846+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
68847+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
68848+
68849+config GRKERNSEC_TPE_UNTRUSTED_GID
68850+ int "GID for TPE-untrusted users"
68851+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
68852+ default 1005
68853+ help
68854+ Setting this GID determines what group TPE restrictions will be
68855+ *enabled* for. If the sysctl option is enabled, a sysctl option
68856+ with name "tpe_gid" is created.
68857+
68858+config GRKERNSEC_TPE_TRUSTED_GID
68859+ int "GID for TPE-trusted users"
68860+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
68861+ default 1005
68862+ help
68863+ Setting this GID determines what group TPE restrictions will be
68864+ *disabled* for. If the sysctl option is enabled, a sysctl option
68865+ with name "tpe_gid" is created.
68866+
68867+endmenu
68868+menu "Network Protections"
68869+depends on GRKERNSEC
68870+
68871+config GRKERNSEC_BLACKHOLE
68872+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
68873+ default y if GRKERNSEC_CONFIG_AUTO
68874+ depends on NET
68875+ help
68876+ If you say Y here, neither TCP resets nor ICMP
68877+ destination-unreachable packets will be sent in response to packets
68878+ sent to ports for which no associated listening process exists.
68879+ It will also prevent the sending of ICMP protocol unreachable packets
68880+ in response to packets with unknown protocols.
68881+ This feature supports both IPV4 and IPV6 and exempts the
68882+ loopback interface from blackholing. Enabling this feature
68883+ makes a host more resilient to DoS attacks and reduces network
68884+ visibility against scanners.
68885+
68886+ The blackhole feature as-implemented is equivalent to the FreeBSD
68887+ blackhole feature, as it prevents RST responses to all packets, not
68888+ just SYNs. Under most application behavior this causes no
68889+ problems, but applications (like haproxy) may not close certain
68890+ connections in a way that cleanly terminates them on the remote
68891+ end, leaving the remote host in LAST_ACK state. Because of this
68892+ side-effect and to prevent intentional LAST_ACK DoSes, this
68893+ feature also adds automatic mitigation against such attacks.
68894+ The mitigation drastically reduces the amount of time a socket
68895+ can spend in LAST_ACK state. If you're using haproxy and not
68896+ all servers it connects to have this option enabled, consider
68897+ disabling this feature on the haproxy host.
68898+
68899+ If the sysctl option is enabled, two sysctl options with names
68900+ "ip_blackhole" and "lastack_retries" will be created.
68901+ While "ip_blackhole" takes the standard zero/non-zero on/off
68902+ toggle, "lastack_retries" uses the same kinds of values as
68903+ "tcp_retries1" and "tcp_retries2". The default value of 4
68904+ prevents a socket from lasting more than 45 seconds in LAST_ACK
68905+ state.
68906+
68907+config GRKERNSEC_NO_SIMULT_CONNECT
68908+ bool "Disable TCP Simultaneous Connect"
68909+ default y if GRKERNSEC_CONFIG_AUTO
68910+ depends on NET
68911+ help
68912+ If you say Y here, a feature by Willy Tarreau will be enabled that
68913+ removes a weakness in Linux's strict implementation of TCP that
68914+ allows two clients to connect to each other without either entering
68915+ a listening state. The weakness allows an attacker to easily prevent
68916+ a client from connecting to a known server provided the source port
68917+ for the connection is guessed correctly.
68918+
68919+ As the weakness could be used to prevent an antivirus or IPS from
68920+ fetching updates, or prevent an SSL gateway from fetching a CRL,
68921+ it should be eliminated by enabling this option. Though Linux is
68922+ one of few operating systems supporting simultaneous connect, it
68923+ has no legitimate use in practice and is rarely supported by firewalls.
68924+
68925+config GRKERNSEC_SOCKET
68926+ bool "Socket restrictions"
68927+ depends on NET
68928+ help
68929+ If you say Y here, you will be able to choose from several options.
68930+ If you assign a GID on your system and add it to the supplementary
68931+ groups of users you want to restrict socket access to, this patch
68932+ will perform up to three things, based on the option(s) you choose.
68933+
68934+config GRKERNSEC_SOCKET_ALL
68935+ bool "Deny any sockets to group"
68936+ depends on GRKERNSEC_SOCKET
68937+ help
68938+ If you say Y here, you will be able to choose a GID of whose users will
68939+ be unable to connect to other hosts from your machine or run server
68940+ applications from your machine. If the sysctl option is enabled, a
68941+ sysctl option with name "socket_all" is created.
68942+
68943+config GRKERNSEC_SOCKET_ALL_GID
68944+ int "GID to deny all sockets for"
68945+ depends on GRKERNSEC_SOCKET_ALL
68946+ default 1004
68947+ help
68948+ Here you can choose the GID to disable socket access for. Remember to
68949+ add the users you want socket access disabled for to the GID
68950+ specified here. If the sysctl option is enabled, a sysctl option
68951+ with name "socket_all_gid" is created.
68952+
68953+config GRKERNSEC_SOCKET_CLIENT
68954+ bool "Deny client sockets to group"
68955+ depends on GRKERNSEC_SOCKET
68956+ help
68957+ If you say Y here, you will be able to choose a GID of whose users will
68958+ be unable to connect to other hosts from your machine, but will be
68959+ able to run servers. If this option is enabled, all users in the group
68960+ you specify will have to use passive mode when initiating ftp transfers
68961+ from the shell on your machine. If the sysctl option is enabled, a
68962+ sysctl option with name "socket_client" is created.
68963+
68964+config GRKERNSEC_SOCKET_CLIENT_GID
68965+ int "GID to deny client sockets for"
68966+ depends on GRKERNSEC_SOCKET_CLIENT
68967+ default 1003
68968+ help
68969+ Here you can choose the GID to disable client socket access for.
68970+ Remember to add the users you want client socket access disabled for to
68971+ the GID specified here. If the sysctl option is enabled, a sysctl
68972+ option with name "socket_client_gid" is created.
68973+
68974+config GRKERNSEC_SOCKET_SERVER
68975+ bool "Deny server sockets to group"
68976+ depends on GRKERNSEC_SOCKET
68977+ help
68978+ If you say Y here, you will be able to choose a GID of whose users will
68979+ be unable to run server applications from your machine. If the sysctl
68980+ option is enabled, a sysctl option with name "socket_server" is created.
68981+
68982+config GRKERNSEC_SOCKET_SERVER_GID
68983+ int "GID to deny server sockets for"
68984+ depends on GRKERNSEC_SOCKET_SERVER
68985+ default 1002
68986+ help
68987+ Here you can choose the GID to disable server socket access for.
68988+ Remember to add the users you want server socket access disabled for to
68989+ the GID specified here. If the sysctl option is enabled, a sysctl
68990+ option with name "socket_server_gid" is created.
68991+
68992+endmenu
68993+
68994+menu "Physical Protections"
68995+depends on GRKERNSEC
68996+
68997+config GRKERNSEC_DENYUSB
68998+ bool "Deny new USB connections after toggle"
68999+ default y if GRKERNSEC_CONFIG_AUTO
69000+ depends on SYSCTL && USB_SUPPORT
69001+ help
69002+ If you say Y here, a new sysctl option with name "deny_new_usb"
69003+ will be created. Setting its value to 1 will prevent any new
69004+ USB devices from being recognized by the OS. Any attempted USB
69005+ device insertion will be logged. This option is intended to be
69006+ used against custom USB devices designed to exploit vulnerabilities
69007+ in various USB device drivers.
69008+
69009+ For greatest effectiveness, this sysctl should be set after any
69010+ relevant init scripts. This option is safe to enable in distros
69011+ as each user can choose whether or not to toggle the sysctl.
69012+
69013+config GRKERNSEC_DENYUSB_FORCE
69014+ bool "Reject all USB devices not connected at boot"
69015+ select USB
69016+ depends on GRKERNSEC_DENYUSB
69017+ help
69018+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
69019+ that doesn't involve a sysctl entry. This option should only be
69020+ enabled if you're sure you want to deny all new USB connections
69021+ at runtime and don't want to modify init scripts. This should not
69022+ be enabled by distros. It forces the core USB code to be built
69023+ into the kernel image so that all devices connected at boot time
69024+ can be recognized and new USB device connections can be prevented
69025+ prior to init running.
69026+
69027+endmenu
69028+
69029+menu "Sysctl Support"
69030+depends on GRKERNSEC && SYSCTL
69031+
69032+config GRKERNSEC_SYSCTL
69033+ bool "Sysctl support"
69034+ default y if GRKERNSEC_CONFIG_AUTO
69035+ help
69036+ If you say Y here, you will be able to change the options that
69037+ grsecurity runs with at bootup, without having to recompile your
69038+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
69039+ to enable (1) or disable (0) various features. All the sysctl entries
69040+ are mutable until the "grsec_lock" entry is set to a non-zero value.
69041+ All features enabled in the kernel configuration are disabled at boot
69042+ if you do not say Y to the "Turn on features by default" option.
69043+ All options should be set at startup, and the grsec_lock entry should
69044+ be set to a non-zero value after all the options are set.
69045+ *THIS IS EXTREMELY IMPORTANT*
69046+
69047+config GRKERNSEC_SYSCTL_DISTRO
69048+ bool "Extra sysctl support for distro makers (READ HELP)"
69049+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
69050+ help
69051+ If you say Y here, additional sysctl options will be created
69052+ for features that affect processes running as root. Therefore,
69053+ it is critical when using this option that the grsec_lock entry be
69054+ enabled after boot. Only distros with prebuilt kernel packages
69055+ with this option enabled that can ensure grsec_lock is enabled
69056+ after boot should use this option.
69057+ *Failure to set grsec_lock after boot makes all grsec features
69058+ this option covers useless*
69059+
69060+ Currently this option creates the following sysctl entries:
69061+ "Disable Privileged I/O": "disable_priv_io"
69062+
69063+config GRKERNSEC_SYSCTL_ON
69064+ bool "Turn on features by default"
69065+ default y if GRKERNSEC_CONFIG_AUTO
69066+ depends on GRKERNSEC_SYSCTL
69067+ help
69068+ If you say Y here, instead of having all features enabled in the
69069+ kernel configuration disabled at boot time, the features will be
69070+ enabled at boot time. It is recommended you say Y here unless
69071+ there is some reason you would want all sysctl-tunable features to
69072+ be disabled by default. As mentioned elsewhere, it is important
69073+ to enable the grsec_lock entry once you have finished modifying
69074+ the sysctl entries.
69075+
69076+endmenu
69077+menu "Logging Options"
69078+depends on GRKERNSEC
69079+
69080+config GRKERNSEC_FLOODTIME
69081+ int "Seconds in between log messages (minimum)"
69082+ default 10
69083+ help
69084+ This option allows you to enforce the number of seconds between
69085+ grsecurity log messages. The default should be suitable for most
69086+ people, however, if you choose to change it, choose a value small enough
69087+ to allow informative logs to be produced, but large enough to
69088+ prevent flooding.
69089+
69090+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
69091+ any rate limiting on grsecurity log messages.
69092+
69093+config GRKERNSEC_FLOODBURST
69094+ int "Number of messages in a burst (maximum)"
69095+ default 6
69096+ help
69097+ This option allows you to choose the maximum number of messages allowed
69098+ within the flood time interval you chose in a separate option. The
69099+ default should be suitable for most people, however if you find that
69100+ many of your logs are being interpreted as flooding, you may want to
69101+ raise this value.
69102+
69103+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
69104+ any rate limiting on grsecurity log messages.
69105+
69106+endmenu
69107diff --git a/grsecurity/Makefile b/grsecurity/Makefile
69108new file mode 100644
69109index 0000000..30ababb
69110--- /dev/null
69111+++ b/grsecurity/Makefile
69112@@ -0,0 +1,54 @@
69113+# grsecurity – access control and security hardening for Linux
69114+# All code in this directory and various hooks located throughout the Linux kernel are
69115+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
69116+# http://www.grsecurity.net spender@grsecurity.net
69117+#
69118+# This program is free software; you can redistribute it and/or
69119+# modify it under the terms of the GNU General Public License version 2
69120+# as published by the Free Software Foundation.
69121+#
69122+# This program is distributed in the hope that it will be useful,
69123+# but WITHOUT ANY WARRANTY; without even the implied warranty of
69124+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69125+# GNU General Public License for more details.
69126+#
69127+# You should have received a copy of the GNU General Public License
69128+# along with this program; if not, write to the Free Software
69129+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
69130+
69131+KBUILD_CFLAGS += -Werror
69132+
69133+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
69134+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
69135+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
69136+ grsec_usb.o grsec_ipc.o grsec_proc.o
69137+
69138+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
69139+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
69140+ gracl_learn.o grsec_log.o gracl_policy.o
69141+ifdef CONFIG_COMPAT
69142+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
69143+endif
69144+
69145+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
69146+
69147+ifdef CONFIG_NET
69148+obj-y += grsec_sock.o
69149+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
69150+endif
69151+
69152+ifndef CONFIG_GRKERNSEC
69153+obj-y += grsec_disabled.o
69154+endif
69155+
69156+ifdef CONFIG_GRKERNSEC_HIDESYM
69157+extra-y := grsec_hidesym.o
69158+$(obj)/grsec_hidesym.o:
69159+ @-chmod -f 500 /boot
69160+ @-chmod -f 500 /lib/modules
69161+ @-chmod -f 500 /lib64/modules
69162+ @-chmod -f 500 /lib32/modules
69163+ @-chmod -f 700 .
69164+ @-chmod -f 700 $(objtree)
69165+ @echo ' grsec: protected kernel image paths'
69166+endif
69167diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
69168new file mode 100644
69169index 0000000..6c1e154
69170--- /dev/null
69171+++ b/grsecurity/gracl.c
69172@@ -0,0 +1,2749 @@
69173+#include <linux/kernel.h>
69174+#include <linux/module.h>
69175+#include <linux/sched.h>
69176+#include <linux/mm.h>
69177+#include <linux/file.h>
69178+#include <linux/fs.h>
69179+#include <linux/namei.h>
69180+#include <linux/mount.h>
69181+#include <linux/tty.h>
69182+#include <linux/proc_fs.h>
69183+#include <linux/lglock.h>
69184+#include <linux/slab.h>
69185+#include <linux/vmalloc.h>
69186+#include <linux/types.h>
69187+#include <linux/sysctl.h>
69188+#include <linux/netdevice.h>
69189+#include <linux/ptrace.h>
69190+#include <linux/gracl.h>
69191+#include <linux/gralloc.h>
69192+#include <linux/security.h>
69193+#include <linux/grinternal.h>
69194+#include <linux/pid_namespace.h>
69195+#include <linux/stop_machine.h>
69196+#include <linux/fdtable.h>
69197+#include <linux/percpu.h>
69198+#include <linux/lglock.h>
69199+#include <linux/hugetlb.h>
69200+#include <linux/posix-timers.h>
69201+#include <linux/prefetch.h>
69202+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69203+#include <linux/magic.h>
69204+#include <linux/pagemap.h>
69205+#include "../fs/btrfs/async-thread.h"
69206+#include "../fs/btrfs/ctree.h"
69207+#include "../fs/btrfs/btrfs_inode.h"
69208+#endif
69209+#include "../fs/mount.h"
69210+
69211+#include <asm/uaccess.h>
69212+#include <asm/errno.h>
69213+#include <asm/mman.h>
69214+
69215+#define FOR_EACH_ROLE_START(role) \
69216+ role = running_polstate.role_list; \
69217+ while (role) {
69218+
69219+#define FOR_EACH_ROLE_END(role) \
69220+ role = role->prev; \
69221+ }
69222+
69223+extern struct path gr_real_root;
69224+
69225+static struct gr_policy_state running_polstate;
69226+struct gr_policy_state *polstate = &running_polstate;
69227+extern struct gr_alloc_state *current_alloc_state;
69228+
69229+extern char *gr_shared_page[4];
69230+DEFINE_RWLOCK(gr_inode_lock);
69231+
69232+static unsigned int gr_status __read_only = GR_STATUS_INIT;
69233+
69234+#ifdef CONFIG_NET
69235+extern struct vfsmount *sock_mnt;
69236+#endif
69237+
69238+extern struct vfsmount *pipe_mnt;
69239+extern struct vfsmount *shm_mnt;
69240+
69241+#ifdef CONFIG_HUGETLBFS
69242+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
69243+#endif
69244+
69245+extern u16 acl_sp_role_value;
69246+extern struct acl_object_label *fakefs_obj_rw;
69247+extern struct acl_object_label *fakefs_obj_rwx;
69248+
69249+int gr_acl_is_enabled(void)
69250+{
69251+ return (gr_status & GR_READY);
69252+}
69253+
69254+void gr_enable_rbac_system(void)
69255+{
69256+ pax_open_kernel();
69257+ gr_status |= GR_READY;
69258+ pax_close_kernel();
69259+}
69260+
69261+int gr_rbac_disable(void *unused)
69262+{
69263+ pax_open_kernel();
69264+ gr_status &= ~GR_READY;
69265+ pax_close_kernel();
69266+
69267+ return 0;
69268+}
69269+
69270+static inline dev_t __get_dev(const struct dentry *dentry)
69271+{
69272+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69273+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69274+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
69275+ else
69276+#endif
69277+ return dentry->d_sb->s_dev;
69278+}
69279+
69280+static inline u64 __get_ino(const struct dentry *dentry)
69281+{
69282+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69283+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69284+ return btrfs_ino(dentry->d_inode);
69285+ else
69286+#endif
69287+ return dentry->d_inode->i_ino;
69288+}
69289+
69290+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69291+{
69292+ return __get_dev(dentry);
69293+}
69294+
69295+u64 gr_get_ino_from_dentry(struct dentry *dentry)
69296+{
69297+ return __get_ino(dentry);
69298+}
69299+
69300+static char gr_task_roletype_to_char(struct task_struct *task)
69301+{
69302+ switch (task->role->roletype &
69303+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
69304+ GR_ROLE_SPECIAL)) {
69305+ case GR_ROLE_DEFAULT:
69306+ return 'D';
69307+ case GR_ROLE_USER:
69308+ return 'U';
69309+ case GR_ROLE_GROUP:
69310+ return 'G';
69311+ case GR_ROLE_SPECIAL:
69312+ return 'S';
69313+ }
69314+
69315+ return 'X';
69316+}
69317+
69318+char gr_roletype_to_char(void)
69319+{
69320+ return gr_task_roletype_to_char(current);
69321+}
69322+
69323+__inline__ int
69324+gr_acl_tpe_check(void)
69325+{
69326+ if (unlikely(!(gr_status & GR_READY)))
69327+ return 0;
69328+ if (current->role->roletype & GR_ROLE_TPE)
69329+ return 1;
69330+ else
69331+ return 0;
69332+}
69333+
69334+int
69335+gr_handle_rawio(const struct inode *inode)
69336+{
69337+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69338+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
69339+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
69340+ !capable(CAP_SYS_RAWIO))
69341+ return 1;
69342+#endif
69343+ return 0;
69344+}
69345+
69346+int
69347+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
69348+{
69349+ if (likely(lena != lenb))
69350+ return 0;
69351+
69352+ return !memcmp(a, b, lena);
69353+}
69354+
69355+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
69356+{
69357+ *buflen -= namelen;
69358+ if (*buflen < 0)
69359+ return -ENAMETOOLONG;
69360+ *buffer -= namelen;
69361+ memcpy(*buffer, str, namelen);
69362+ return 0;
69363+}
69364+
69365+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
69366+{
69367+ return prepend(buffer, buflen, name->name, name->len);
69368+}
69369+
69370+static int prepend_path(const struct path *path, struct path *root,
69371+ char **buffer, int *buflen)
69372+{
69373+ struct dentry *dentry = path->dentry;
69374+ struct vfsmount *vfsmnt = path->mnt;
69375+ struct mount *mnt = real_mount(vfsmnt);
69376+ bool slash = false;
69377+ int error = 0;
69378+
69379+ while (dentry != root->dentry || vfsmnt != root->mnt) {
69380+ struct dentry * parent;
69381+
69382+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
69383+ /* Global root? */
69384+ if (!mnt_has_parent(mnt)) {
69385+ goto out;
69386+ }
69387+ dentry = mnt->mnt_mountpoint;
69388+ mnt = mnt->mnt_parent;
69389+ vfsmnt = &mnt->mnt;
69390+ continue;
69391+ }
69392+ parent = dentry->d_parent;
69393+ prefetch(parent);
69394+ spin_lock(&dentry->d_lock);
69395+ error = prepend_name(buffer, buflen, &dentry->d_name);
69396+ spin_unlock(&dentry->d_lock);
69397+ if (!error)
69398+ error = prepend(buffer, buflen, "/", 1);
69399+ if (error)
69400+ break;
69401+
69402+ slash = true;
69403+ dentry = parent;
69404+ }
69405+
69406+out:
69407+ if (!error && !slash)
69408+ error = prepend(buffer, buflen, "/", 1);
69409+
69410+ return error;
69411+}
69412+
69413+/* this must be called with mount_lock and rename_lock held */
69414+
69415+static char *__our_d_path(const struct path *path, struct path *root,
69416+ char *buf, int buflen)
69417+{
69418+ char *res = buf + buflen;
69419+ int error;
69420+
69421+ prepend(&res, &buflen, "\0", 1);
69422+ error = prepend_path(path, root, &res, &buflen);
69423+ if (error)
69424+ return ERR_PTR(error);
69425+
69426+ return res;
69427+}
69428+
69429+static char *
69430+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
69431+{
69432+ char *retval;
69433+
69434+ retval = __our_d_path(path, root, buf, buflen);
69435+ if (unlikely(IS_ERR(retval)))
69436+ retval = strcpy(buf, "<path too long>");
69437+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
69438+ retval[1] = '\0';
69439+
69440+ return retval;
69441+}
69442+
69443+static char *
69444+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69445+ char *buf, int buflen)
69446+{
69447+ struct path path;
69448+ char *res;
69449+
69450+ path.dentry = (struct dentry *)dentry;
69451+ path.mnt = (struct vfsmount *)vfsmnt;
69452+
69453+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
69454+ by the RBAC system */
69455+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
69456+
69457+ return res;
69458+}
69459+
69460+static char *
69461+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69462+ char *buf, int buflen)
69463+{
69464+ char *res;
69465+ struct path path;
69466+ struct path root;
69467+ struct task_struct *reaper = init_pid_ns.child_reaper;
69468+
69469+ path.dentry = (struct dentry *)dentry;
69470+ path.mnt = (struct vfsmount *)vfsmnt;
69471+
69472+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
69473+ get_fs_root(reaper->fs, &root);
69474+
69475+ read_seqlock_excl(&mount_lock);
69476+ write_seqlock(&rename_lock);
69477+ res = gen_full_path(&path, &root, buf, buflen);
69478+ write_sequnlock(&rename_lock);
69479+ read_sequnlock_excl(&mount_lock);
69480+
69481+ path_put(&root);
69482+ return res;
69483+}
69484+
69485+char *
69486+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69487+{
69488+ char *ret;
69489+ read_seqlock_excl(&mount_lock);
69490+ write_seqlock(&rename_lock);
69491+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69492+ PAGE_SIZE);
69493+ write_sequnlock(&rename_lock);
69494+ read_sequnlock_excl(&mount_lock);
69495+ return ret;
69496+}
69497+
69498+static char *
69499+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69500+{
69501+ char *ret;
69502+ char *buf;
69503+ int buflen;
69504+
69505+ read_seqlock_excl(&mount_lock);
69506+ write_seqlock(&rename_lock);
69507+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
69508+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
69509+ buflen = (int)(ret - buf);
69510+ if (buflen >= 5)
69511+ prepend(&ret, &buflen, "/proc", 5);
69512+ else
69513+ ret = strcpy(buf, "<path too long>");
69514+ write_sequnlock(&rename_lock);
69515+ read_sequnlock_excl(&mount_lock);
69516+ return ret;
69517+}
69518+
69519+char *
69520+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
69521+{
69522+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69523+ PAGE_SIZE);
69524+}
69525+
69526+char *
69527+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
69528+{
69529+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
69530+ PAGE_SIZE);
69531+}
69532+
69533+char *
69534+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
69535+{
69536+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
69537+ PAGE_SIZE);
69538+}
69539+
69540+char *
69541+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
69542+{
69543+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
69544+ PAGE_SIZE);
69545+}
69546+
69547+char *
69548+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
69549+{
69550+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
69551+ PAGE_SIZE);
69552+}
69553+
69554+__inline__ __u32
69555+to_gr_audit(const __u32 reqmode)
69556+{
69557+ /* masks off auditable permission flags, then shifts them to create
69558+ auditing flags, and adds the special case of append auditing if
69559+ we're requesting write */
69560+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
69561+}
69562+
69563+struct acl_role_label *
69564+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
69565+ const gid_t gid)
69566+{
69567+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
69568+ struct acl_role_label *match;
69569+ struct role_allowed_ip *ipp;
69570+ unsigned int x;
69571+ u32 curr_ip = task->signal->saved_ip;
69572+
69573+ match = state->acl_role_set.r_hash[index];
69574+
69575+ while (match) {
69576+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
69577+ for (x = 0; x < match->domain_child_num; x++) {
69578+ if (match->domain_children[x] == uid)
69579+ goto found;
69580+ }
69581+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
69582+ break;
69583+ match = match->next;
69584+ }
69585+found:
69586+ if (match == NULL) {
69587+ try_group:
69588+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
69589+ match = state->acl_role_set.r_hash[index];
69590+
69591+ while (match) {
69592+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
69593+ for (x = 0; x < match->domain_child_num; x++) {
69594+ if (match->domain_children[x] == gid)
69595+ goto found2;
69596+ }
69597+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
69598+ break;
69599+ match = match->next;
69600+ }
69601+found2:
69602+ if (match == NULL)
69603+ match = state->default_role;
69604+ if (match->allowed_ips == NULL)
69605+ return match;
69606+ else {
69607+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69608+ if (likely
69609+ ((ntohl(curr_ip) & ipp->netmask) ==
69610+ (ntohl(ipp->addr) & ipp->netmask)))
69611+ return match;
69612+ }
69613+ match = state->default_role;
69614+ }
69615+ } else if (match->allowed_ips == NULL) {
69616+ return match;
69617+ } else {
69618+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69619+ if (likely
69620+ ((ntohl(curr_ip) & ipp->netmask) ==
69621+ (ntohl(ipp->addr) & ipp->netmask)))
69622+ return match;
69623+ }
69624+ goto try_group;
69625+ }
69626+
69627+ return match;
69628+}
69629+
69630+static struct acl_role_label *
69631+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
69632+ const gid_t gid)
69633+{
69634+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
69635+}
69636+
69637+struct acl_subject_label *
69638+lookup_acl_subj_label(const u64 ino, const dev_t dev,
69639+ const struct acl_role_label *role)
69640+{
69641+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69642+ struct acl_subject_label *match;
69643+
69644+ match = role->subj_hash[index];
69645+
69646+ while (match && (match->inode != ino || match->device != dev ||
69647+ (match->mode & GR_DELETED))) {
69648+ match = match->next;
69649+ }
69650+
69651+ if (match && !(match->mode & GR_DELETED))
69652+ return match;
69653+ else
69654+ return NULL;
69655+}
69656+
69657+struct acl_subject_label *
69658+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
69659+ const struct acl_role_label *role)
69660+{
69661+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69662+ struct acl_subject_label *match;
69663+
69664+ match = role->subj_hash[index];
69665+
69666+ while (match && (match->inode != ino || match->device != dev ||
69667+ !(match->mode & GR_DELETED))) {
69668+ match = match->next;
69669+ }
69670+
69671+ if (match && (match->mode & GR_DELETED))
69672+ return match;
69673+ else
69674+ return NULL;
69675+}
69676+
69677+static struct acl_object_label *
69678+lookup_acl_obj_label(const u64 ino, const dev_t dev,
69679+ const struct acl_subject_label *subj)
69680+{
69681+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69682+ struct acl_object_label *match;
69683+
69684+ match = subj->obj_hash[index];
69685+
69686+ while (match && (match->inode != ino || match->device != dev ||
69687+ (match->mode & GR_DELETED))) {
69688+ match = match->next;
69689+ }
69690+
69691+ if (match && !(match->mode & GR_DELETED))
69692+ return match;
69693+ else
69694+ return NULL;
69695+}
69696+
69697+static struct acl_object_label *
69698+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
69699+ const struct acl_subject_label *subj)
69700+{
69701+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69702+ struct acl_object_label *match;
69703+
69704+ match = subj->obj_hash[index];
69705+
69706+ while (match && (match->inode != ino || match->device != dev ||
69707+ !(match->mode & GR_DELETED))) {
69708+ match = match->next;
69709+ }
69710+
69711+ if (match && (match->mode & GR_DELETED))
69712+ return match;
69713+
69714+ match = subj->obj_hash[index];
69715+
69716+ while (match && (match->inode != ino || match->device != dev ||
69717+ (match->mode & GR_DELETED))) {
69718+ match = match->next;
69719+ }
69720+
69721+ if (match && !(match->mode & GR_DELETED))
69722+ return match;
69723+ else
69724+ return NULL;
69725+}
69726+
69727+struct name_entry *
69728+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
69729+{
69730+ unsigned int len = strlen(name);
69731+ unsigned int key = full_name_hash(name, len);
69732+ unsigned int index = key % state->name_set.n_size;
69733+ struct name_entry *match;
69734+
69735+ match = state->name_set.n_hash[index];
69736+
69737+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
69738+ match = match->next;
69739+
69740+ return match;
69741+}
69742+
69743+static struct name_entry *
69744+lookup_name_entry(const char *name)
69745+{
69746+ return __lookup_name_entry(&running_polstate, name);
69747+}
69748+
69749+static struct name_entry *
69750+lookup_name_entry_create(const char *name)
69751+{
69752+ unsigned int len = strlen(name);
69753+ unsigned int key = full_name_hash(name, len);
69754+ unsigned int index = key % running_polstate.name_set.n_size;
69755+ struct name_entry *match;
69756+
69757+ match = running_polstate.name_set.n_hash[index];
69758+
69759+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
69760+ !match->deleted))
69761+ match = match->next;
69762+
69763+ if (match && match->deleted)
69764+ return match;
69765+
69766+ match = running_polstate.name_set.n_hash[index];
69767+
69768+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
69769+ match->deleted))
69770+ match = match->next;
69771+
69772+ if (match && !match->deleted)
69773+ return match;
69774+ else
69775+ return NULL;
69776+}
69777+
69778+static struct inodev_entry *
69779+lookup_inodev_entry(const u64 ino, const dev_t dev)
69780+{
69781+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
69782+ struct inodev_entry *match;
69783+
69784+ match = running_polstate.inodev_set.i_hash[index];
69785+
69786+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
69787+ match = match->next;
69788+
69789+ return match;
69790+}
69791+
69792+void
69793+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
69794+{
69795+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
69796+ state->inodev_set.i_size);
69797+ struct inodev_entry **curr;
69798+
69799+ entry->prev = NULL;
69800+
69801+ curr = &state->inodev_set.i_hash[index];
69802+ if (*curr != NULL)
69803+ (*curr)->prev = entry;
69804+
69805+ entry->next = *curr;
69806+ *curr = entry;
69807+
69808+ return;
69809+}
69810+
69811+static void
69812+insert_inodev_entry(struct inodev_entry *entry)
69813+{
69814+ __insert_inodev_entry(&running_polstate, entry);
69815+}
69816+
69817+void
69818+insert_acl_obj_label(struct acl_object_label *obj,
69819+ struct acl_subject_label *subj)
69820+{
69821+ unsigned int index =
69822+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
69823+ struct acl_object_label **curr;
69824+
69825+ obj->prev = NULL;
69826+
69827+ curr = &subj->obj_hash[index];
69828+ if (*curr != NULL)
69829+ (*curr)->prev = obj;
69830+
69831+ obj->next = *curr;
69832+ *curr = obj;
69833+
69834+ return;
69835+}
69836+
69837+void
69838+insert_acl_subj_label(struct acl_subject_label *obj,
69839+ struct acl_role_label *role)
69840+{
69841+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
69842+ struct acl_subject_label **curr;
69843+
69844+ obj->prev = NULL;
69845+
69846+ curr = &role->subj_hash[index];
69847+ if (*curr != NULL)
69848+ (*curr)->prev = obj;
69849+
69850+ obj->next = *curr;
69851+ *curr = obj;
69852+
69853+ return;
69854+}
69855+
69856+/* derived from glibc fnmatch() 0: match, 1: no match*/
69857+
69858+static int
69859+glob_match(const char *p, const char *n)
69860+{
69861+ char c;
69862+
69863+ while ((c = *p++) != '\0') {
69864+ switch (c) {
69865+ case '?':
69866+ if (*n == '\0')
69867+ return 1;
69868+ else if (*n == '/')
69869+ return 1;
69870+ break;
69871+ case '\\':
69872+ if (*n != c)
69873+ return 1;
69874+ break;
69875+ case '*':
69876+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
69877+ if (*n == '/')
69878+ return 1;
69879+ else if (c == '?') {
69880+ if (*n == '\0')
69881+ return 1;
69882+ else
69883+ ++n;
69884+ }
69885+ }
69886+ if (c == '\0') {
69887+ return 0;
69888+ } else {
69889+ const char *endp;
69890+
69891+ if ((endp = strchr(n, '/')) == NULL)
69892+ endp = n + strlen(n);
69893+
69894+ if (c == '[') {
69895+ for (--p; n < endp; ++n)
69896+ if (!glob_match(p, n))
69897+ return 0;
69898+ } else if (c == '/') {
69899+ while (*n != '\0' && *n != '/')
69900+ ++n;
69901+ if (*n == '/' && !glob_match(p, n + 1))
69902+ return 0;
69903+ } else {
69904+ for (--p; n < endp; ++n)
69905+ if (*n == c && !glob_match(p, n))
69906+ return 0;
69907+ }
69908+
69909+ return 1;
69910+ }
69911+ case '[':
69912+ {
69913+ int not;
69914+ char cold;
69915+
69916+ if (*n == '\0' || *n == '/')
69917+ return 1;
69918+
69919+ not = (*p == '!' || *p == '^');
69920+ if (not)
69921+ ++p;
69922+
69923+ c = *p++;
69924+ for (;;) {
69925+ unsigned char fn = (unsigned char)*n;
69926+
69927+ if (c == '\0')
69928+ return 1;
69929+ else {
69930+ if (c == fn)
69931+ goto matched;
69932+ cold = c;
69933+ c = *p++;
69934+
69935+ if (c == '-' && *p != ']') {
69936+ unsigned char cend = *p++;
69937+
69938+ if (cend == '\0')
69939+ return 1;
69940+
69941+ if (cold <= fn && fn <= cend)
69942+ goto matched;
69943+
69944+ c = *p++;
69945+ }
69946+ }
69947+
69948+ if (c == ']')
69949+ break;
69950+ }
69951+ if (!not)
69952+ return 1;
69953+ break;
69954+ matched:
69955+ while (c != ']') {
69956+ if (c == '\0')
69957+ return 1;
69958+
69959+ c = *p++;
69960+ }
69961+ if (not)
69962+ return 1;
69963+ }
69964+ break;
69965+ default:
69966+ if (c != *n)
69967+ return 1;
69968+ }
69969+
69970+ ++n;
69971+ }
69972+
69973+ if (*n == '\0')
69974+ return 0;
69975+
69976+ if (*n == '/')
69977+ return 0;
69978+
69979+ return 1;
69980+}
69981+
69982+static struct acl_object_label *
69983+chk_glob_label(struct acl_object_label *globbed,
69984+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
69985+{
69986+ struct acl_object_label *tmp;
69987+
69988+ if (*path == NULL)
69989+ *path = gr_to_filename_nolock(dentry, mnt);
69990+
69991+ tmp = globbed;
69992+
69993+ while (tmp) {
69994+ if (!glob_match(tmp->filename, *path))
69995+ return tmp;
69996+ tmp = tmp->next;
69997+ }
69998+
69999+ return NULL;
70000+}
70001+
70002+static struct acl_object_label *
70003+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70004+ const u64 curr_ino, const dev_t curr_dev,
70005+ const struct acl_subject_label *subj, char **path, const int checkglob)
70006+{
70007+ struct acl_subject_label *tmpsubj;
70008+ struct acl_object_label *retval;
70009+ struct acl_object_label *retval2;
70010+
70011+ tmpsubj = (struct acl_subject_label *) subj;
70012+ read_lock(&gr_inode_lock);
70013+ do {
70014+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
70015+ if (retval) {
70016+ if (checkglob && retval->globbed) {
70017+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
70018+ if (retval2)
70019+ retval = retval2;
70020+ }
70021+ break;
70022+ }
70023+ } while ((tmpsubj = tmpsubj->parent_subject));
70024+ read_unlock(&gr_inode_lock);
70025+
70026+ return retval;
70027+}
70028+
70029+static __inline__ struct acl_object_label *
70030+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70031+ struct dentry *curr_dentry,
70032+ const struct acl_subject_label *subj, char **path, const int checkglob)
70033+{
70034+ int newglob = checkglob;
70035+ u64 inode;
70036+ dev_t device;
70037+
70038+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
70039+ as we don't want a / * rule to match instead of the / object
70040+ don't do this for create lookups that call this function though, since they're looking up
70041+ on the parent and thus need globbing checks on all paths
70042+ */
70043+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
70044+ newglob = GR_NO_GLOB;
70045+
70046+ spin_lock(&curr_dentry->d_lock);
70047+ inode = __get_ino(curr_dentry);
70048+ device = __get_dev(curr_dentry);
70049+ spin_unlock(&curr_dentry->d_lock);
70050+
70051+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
70052+}
70053+
70054+#ifdef CONFIG_HUGETLBFS
70055+static inline bool
70056+is_hugetlbfs_mnt(const struct vfsmount *mnt)
70057+{
70058+ int i;
70059+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
70060+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
70061+ return true;
70062+ }
70063+
70064+ return false;
70065+}
70066+#endif
70067+
70068+static struct acl_object_label *
70069+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70070+ const struct acl_subject_label *subj, char *path, const int checkglob)
70071+{
70072+ struct dentry *dentry = (struct dentry *) l_dentry;
70073+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70074+ struct mount *real_mnt = real_mount(mnt);
70075+ struct acl_object_label *retval;
70076+ struct dentry *parent;
70077+
70078+ read_seqlock_excl(&mount_lock);
70079+ write_seqlock(&rename_lock);
70080+
70081+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
70082+#ifdef CONFIG_NET
70083+ mnt == sock_mnt ||
70084+#endif
70085+#ifdef CONFIG_HUGETLBFS
70086+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
70087+#endif
70088+ /* ignore Eric Biederman */
70089+ IS_PRIVATE(l_dentry->d_inode))) {
70090+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
70091+ goto out;
70092+ }
70093+
70094+ for (;;) {
70095+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70096+ break;
70097+
70098+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70099+ if (!mnt_has_parent(real_mnt))
70100+ break;
70101+
70102+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70103+ if (retval != NULL)
70104+ goto out;
70105+
70106+ dentry = real_mnt->mnt_mountpoint;
70107+ real_mnt = real_mnt->mnt_parent;
70108+ mnt = &real_mnt->mnt;
70109+ continue;
70110+ }
70111+
70112+ parent = dentry->d_parent;
70113+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70114+ if (retval != NULL)
70115+ goto out;
70116+
70117+ dentry = parent;
70118+ }
70119+
70120+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70121+
70122+ /* gr_real_root is pinned so we don't have to hold a reference */
70123+ if (retval == NULL)
70124+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
70125+out:
70126+ write_sequnlock(&rename_lock);
70127+ read_sequnlock_excl(&mount_lock);
70128+
70129+ BUG_ON(retval == NULL);
70130+
70131+ return retval;
70132+}
70133+
70134+static __inline__ struct acl_object_label *
70135+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70136+ const struct acl_subject_label *subj)
70137+{
70138+ char *path = NULL;
70139+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
70140+}
70141+
70142+static __inline__ struct acl_object_label *
70143+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70144+ const struct acl_subject_label *subj)
70145+{
70146+ char *path = NULL;
70147+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
70148+}
70149+
70150+static __inline__ struct acl_object_label *
70151+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70152+ const struct acl_subject_label *subj, char *path)
70153+{
70154+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
70155+}
70156+
70157+struct acl_subject_label *
70158+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70159+ const struct acl_role_label *role)
70160+{
70161+ struct dentry *dentry = (struct dentry *) l_dentry;
70162+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70163+ struct mount *real_mnt = real_mount(mnt);
70164+ struct acl_subject_label *retval;
70165+ struct dentry *parent;
70166+
70167+ read_seqlock_excl(&mount_lock);
70168+ write_seqlock(&rename_lock);
70169+
70170+ for (;;) {
70171+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70172+ break;
70173+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70174+ if (!mnt_has_parent(real_mnt))
70175+ break;
70176+
70177+ spin_lock(&dentry->d_lock);
70178+ read_lock(&gr_inode_lock);
70179+ retval =
70180+ lookup_acl_subj_label(__get_ino(dentry),
70181+ __get_dev(dentry), role);
70182+ read_unlock(&gr_inode_lock);
70183+ spin_unlock(&dentry->d_lock);
70184+ if (retval != NULL)
70185+ goto out;
70186+
70187+ dentry = real_mnt->mnt_mountpoint;
70188+ real_mnt = real_mnt->mnt_parent;
70189+ mnt = &real_mnt->mnt;
70190+ continue;
70191+ }
70192+
70193+ spin_lock(&dentry->d_lock);
70194+ read_lock(&gr_inode_lock);
70195+ retval = lookup_acl_subj_label(__get_ino(dentry),
70196+ __get_dev(dentry), role);
70197+ read_unlock(&gr_inode_lock);
70198+ parent = dentry->d_parent;
70199+ spin_unlock(&dentry->d_lock);
70200+
70201+ if (retval != NULL)
70202+ goto out;
70203+
70204+ dentry = parent;
70205+ }
70206+
70207+ spin_lock(&dentry->d_lock);
70208+ read_lock(&gr_inode_lock);
70209+ retval = lookup_acl_subj_label(__get_ino(dentry),
70210+ __get_dev(dentry), role);
70211+ read_unlock(&gr_inode_lock);
70212+ spin_unlock(&dentry->d_lock);
70213+
70214+ if (unlikely(retval == NULL)) {
70215+ /* gr_real_root is pinned, we don't need to hold a reference */
70216+ read_lock(&gr_inode_lock);
70217+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
70218+ __get_dev(gr_real_root.dentry), role);
70219+ read_unlock(&gr_inode_lock);
70220+ }
70221+out:
70222+ write_sequnlock(&rename_lock);
70223+ read_sequnlock_excl(&mount_lock);
70224+
70225+ BUG_ON(retval == NULL);
70226+
70227+ return retval;
70228+}
70229+
70230+void
70231+assign_special_role(const char *rolename)
70232+{
70233+ struct acl_object_label *obj;
70234+ struct acl_role_label *r;
70235+ struct acl_role_label *assigned = NULL;
70236+ struct task_struct *tsk;
70237+ struct file *filp;
70238+
70239+ FOR_EACH_ROLE_START(r)
70240+ if (!strcmp(rolename, r->rolename) &&
70241+ (r->roletype & GR_ROLE_SPECIAL)) {
70242+ assigned = r;
70243+ break;
70244+ }
70245+ FOR_EACH_ROLE_END(r)
70246+
70247+ if (!assigned)
70248+ return;
70249+
70250+ read_lock(&tasklist_lock);
70251+ read_lock(&grsec_exec_file_lock);
70252+
70253+ tsk = current->real_parent;
70254+ if (tsk == NULL)
70255+ goto out_unlock;
70256+
70257+ filp = tsk->exec_file;
70258+ if (filp == NULL)
70259+ goto out_unlock;
70260+
70261+ tsk->is_writable = 0;
70262+ tsk->inherited = 0;
70263+
70264+ tsk->acl_sp_role = 1;
70265+ tsk->acl_role_id = ++acl_sp_role_value;
70266+ tsk->role = assigned;
70267+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
70268+
70269+ /* ignore additional mmap checks for processes that are writable
70270+ by the default ACL */
70271+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70272+ if (unlikely(obj->mode & GR_WRITE))
70273+ tsk->is_writable = 1;
70274+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
70275+ if (unlikely(obj->mode & GR_WRITE))
70276+ tsk->is_writable = 1;
70277+
70278+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70279+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
70280+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
70281+#endif
70282+
70283+out_unlock:
70284+ read_unlock(&grsec_exec_file_lock);
70285+ read_unlock(&tasklist_lock);
70286+ return;
70287+}
70288+
70289+
70290+static void
70291+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
70292+{
70293+ struct task_struct *task = current;
70294+ const struct cred *cred = current_cred();
70295+
70296+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
70297+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70298+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70299+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
70300+
70301+ return;
70302+}
70303+
70304+static void
70305+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
70306+{
70307+ struct task_struct *task = current;
70308+ const struct cred *cred = current_cred();
70309+
70310+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70311+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70312+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70313+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
70314+
70315+ return;
70316+}
70317+
70318+static void
70319+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
70320+{
70321+ struct task_struct *task = current;
70322+ const struct cred *cred = current_cred();
70323+
70324+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70325+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70326+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70327+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
70328+
70329+ return;
70330+}
70331+
70332+static void
70333+gr_set_proc_res(struct task_struct *task)
70334+{
70335+ struct acl_subject_label *proc;
70336+ unsigned short i;
70337+
70338+ proc = task->acl;
70339+
70340+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
70341+ return;
70342+
70343+ for (i = 0; i < RLIM_NLIMITS; i++) {
70344+ unsigned long rlim_cur, rlim_max;
70345+
70346+ if (!(proc->resmask & (1U << i)))
70347+ continue;
70348+
70349+ rlim_cur = proc->res[i].rlim_cur;
70350+ rlim_max = proc->res[i].rlim_max;
70351+
70352+ if (i == RLIMIT_NOFILE) {
70353+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
70354+ if (rlim_cur > saved_sysctl_nr_open)
70355+ rlim_cur = saved_sysctl_nr_open;
70356+ if (rlim_max > saved_sysctl_nr_open)
70357+ rlim_max = saved_sysctl_nr_open;
70358+ }
70359+
70360+ task->signal->rlim[i].rlim_cur = rlim_cur;
70361+ task->signal->rlim[i].rlim_max = rlim_max;
70362+
70363+ if (i == RLIMIT_CPU)
70364+ update_rlimit_cpu(task, rlim_cur);
70365+ }
70366+
70367+ return;
70368+}
70369+
70370+/* both of the below must be called with
70371+ rcu_read_lock();
70372+ read_lock(&tasklist_lock);
70373+ read_lock(&grsec_exec_file_lock);
70374+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
70375+*/
70376+
70377+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
70378+{
70379+ char *tmpname;
70380+ struct acl_subject_label *tmpsubj;
70381+ struct file *filp;
70382+ struct name_entry *nmatch;
70383+
70384+ filp = task->exec_file;
70385+ if (filp == NULL)
70386+ return NULL;
70387+
70388+ /* the following is to apply the correct subject
70389+ on binaries running when the RBAC system
70390+ is enabled, when the binaries have been
70391+ replaced or deleted since their execution
70392+ -----
70393+ when the RBAC system starts, the inode/dev
70394+ from exec_file will be one the RBAC system
70395+ is unaware of. It only knows the inode/dev
70396+ of the present file on disk, or the absence
70397+ of it.
70398+ */
70399+
70400+ if (filename)
70401+ nmatch = __lookup_name_entry(state, filename);
70402+ else {
70403+ preempt_disable();
70404+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
70405+
70406+ nmatch = __lookup_name_entry(state, tmpname);
70407+ preempt_enable();
70408+ }
70409+ tmpsubj = NULL;
70410+ if (nmatch) {
70411+ if (nmatch->deleted)
70412+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
70413+ else
70414+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
70415+ }
70416+ /* this also works for the reload case -- if we don't match a potentially inherited subject
70417+ then we fall back to a normal lookup based on the binary's ino/dev
70418+ */
70419+ if (tmpsubj == NULL && fallback)
70420+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
70421+
70422+ return tmpsubj;
70423+}
70424+
70425+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
70426+{
70427+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
70428+}
70429+
70430+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
70431+{
70432+ struct acl_object_label *obj;
70433+ struct file *filp;
70434+
70435+ filp = task->exec_file;
70436+
70437+ task->acl = subj;
70438+ task->is_writable = 0;
70439+ /* ignore additional mmap checks for processes that are writable
70440+ by the default ACL */
70441+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
70442+ if (unlikely(obj->mode & GR_WRITE))
70443+ task->is_writable = 1;
70444+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
70445+ if (unlikely(obj->mode & GR_WRITE))
70446+ task->is_writable = 1;
70447+
70448+ gr_set_proc_res(task);
70449+
70450+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70451+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
70452+#endif
70453+}
70454+
70455+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
70456+{
70457+ __gr_apply_subject_to_task(&running_polstate, task, subj);
70458+}
70459+
70460+__u32
70461+gr_search_file(const struct dentry * dentry, const __u32 mode,
70462+ const struct vfsmount * mnt)
70463+{
70464+ __u32 retval = mode;
70465+ struct acl_subject_label *curracl;
70466+ struct acl_object_label *currobj;
70467+
70468+ if (unlikely(!(gr_status & GR_READY)))
70469+ return (mode & ~GR_AUDITS);
70470+
70471+ curracl = current->acl;
70472+
70473+ currobj = chk_obj_label(dentry, mnt, curracl);
70474+ retval = currobj->mode & mode;
70475+
70476+ /* if we're opening a specified transfer file for writing
70477+ (e.g. /dev/initctl), then transfer our role to init
70478+ */
70479+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
70480+ current->role->roletype & GR_ROLE_PERSIST)) {
70481+ struct task_struct *task = init_pid_ns.child_reaper;
70482+
70483+ if (task->role != current->role) {
70484+ struct acl_subject_label *subj;
70485+
70486+ task->acl_sp_role = 0;
70487+ task->acl_role_id = current->acl_role_id;
70488+ task->role = current->role;
70489+ rcu_read_lock();
70490+ read_lock(&grsec_exec_file_lock);
70491+ subj = gr_get_subject_for_task(task, NULL, 1);
70492+ gr_apply_subject_to_task(task, subj);
70493+ read_unlock(&grsec_exec_file_lock);
70494+ rcu_read_unlock();
70495+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
70496+ }
70497+ }
70498+
70499+ if (unlikely
70500+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
70501+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
70502+ __u32 new_mode = mode;
70503+
70504+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70505+
70506+ retval = new_mode;
70507+
70508+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
70509+ new_mode |= GR_INHERIT;
70510+
70511+ if (!(mode & GR_NOLEARN))
70512+ gr_log_learn(dentry, mnt, new_mode);
70513+ }
70514+
70515+ return retval;
70516+}
70517+
70518+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
70519+ const struct dentry *parent,
70520+ const struct vfsmount *mnt)
70521+{
70522+ struct name_entry *match;
70523+ struct acl_object_label *matchpo;
70524+ struct acl_subject_label *curracl;
70525+ char *path;
70526+
70527+ if (unlikely(!(gr_status & GR_READY)))
70528+ return NULL;
70529+
70530+ preempt_disable();
70531+ path = gr_to_filename_rbac(new_dentry, mnt);
70532+ match = lookup_name_entry_create(path);
70533+
70534+ curracl = current->acl;
70535+
70536+ if (match) {
70537+ read_lock(&gr_inode_lock);
70538+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
70539+ read_unlock(&gr_inode_lock);
70540+
70541+ if (matchpo) {
70542+ preempt_enable();
70543+ return matchpo;
70544+ }
70545+ }
70546+
70547+ // lookup parent
70548+
70549+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
70550+
70551+ preempt_enable();
70552+ return matchpo;
70553+}
70554+
70555+__u32
70556+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
70557+ const struct vfsmount * mnt, const __u32 mode)
70558+{
70559+ struct acl_object_label *matchpo;
70560+ __u32 retval;
70561+
70562+ if (unlikely(!(gr_status & GR_READY)))
70563+ return (mode & ~GR_AUDITS);
70564+
70565+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
70566+
70567+ retval = matchpo->mode & mode;
70568+
70569+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
70570+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
70571+ __u32 new_mode = mode;
70572+
70573+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70574+
70575+ gr_log_learn(new_dentry, mnt, new_mode);
70576+ return new_mode;
70577+ }
70578+
70579+ return retval;
70580+}
70581+
70582+__u32
70583+gr_check_link(const struct dentry * new_dentry,
70584+ const struct dentry * parent_dentry,
70585+ const struct vfsmount * parent_mnt,
70586+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
70587+{
70588+ struct acl_object_label *obj;
70589+ __u32 oldmode, newmode;
70590+ __u32 needmode;
70591+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
70592+ GR_DELETE | GR_INHERIT;
70593+
70594+ if (unlikely(!(gr_status & GR_READY)))
70595+ return (GR_CREATE | GR_LINK);
70596+
70597+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
70598+ oldmode = obj->mode;
70599+
70600+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
70601+ newmode = obj->mode;
70602+
70603+ needmode = newmode & checkmodes;
70604+
70605+ // old name for hardlink must have at least the permissions of the new name
70606+ if ((oldmode & needmode) != needmode)
70607+ goto bad;
70608+
70609+ // if old name had restrictions/auditing, make sure the new name does as well
70610+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
70611+
70612+ // don't allow hardlinking of suid/sgid/fcapped files without permission
70613+ if (is_privileged_binary(old_dentry))
70614+ needmode |= GR_SETID;
70615+
70616+ if ((newmode & needmode) != needmode)
70617+ goto bad;
70618+
70619+ // enforce minimum permissions
70620+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
70621+ return newmode;
70622+bad:
70623+ needmode = oldmode;
70624+ if (is_privileged_binary(old_dentry))
70625+ needmode |= GR_SETID;
70626+
70627+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
70628+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
70629+ return (GR_CREATE | GR_LINK);
70630+ } else if (newmode & GR_SUPPRESS)
70631+ return GR_SUPPRESS;
70632+ else
70633+ return 0;
70634+}
70635+
70636+int
70637+gr_check_hidden_task(const struct task_struct *task)
70638+{
70639+ if (unlikely(!(gr_status & GR_READY)))
70640+ return 0;
70641+
70642+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
70643+ return 1;
70644+
70645+ return 0;
70646+}
70647+
70648+int
70649+gr_check_protected_task(const struct task_struct *task)
70650+{
70651+ if (unlikely(!(gr_status & GR_READY) || !task))
70652+ return 0;
70653+
70654+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70655+ task->acl != current->acl)
70656+ return 1;
70657+
70658+ return 0;
70659+}
70660+
70661+int
70662+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
70663+{
70664+ struct task_struct *p;
70665+ int ret = 0;
70666+
70667+ if (unlikely(!(gr_status & GR_READY) || !pid))
70668+ return ret;
70669+
70670+ read_lock(&tasklist_lock);
70671+ do_each_pid_task(pid, type, p) {
70672+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70673+ p->acl != current->acl) {
70674+ ret = 1;
70675+ goto out;
70676+ }
70677+ } while_each_pid_task(pid, type, p);
70678+out:
70679+ read_unlock(&tasklist_lock);
70680+
70681+ return ret;
70682+}
70683+
70684+void
70685+gr_copy_label(struct task_struct *tsk)
70686+{
70687+ struct task_struct *p = current;
70688+
70689+ tsk->inherited = p->inherited;
70690+ tsk->acl_sp_role = 0;
70691+ tsk->acl_role_id = p->acl_role_id;
70692+ tsk->acl = p->acl;
70693+ tsk->role = p->role;
70694+ tsk->signal->used_accept = 0;
70695+ tsk->signal->curr_ip = p->signal->curr_ip;
70696+ tsk->signal->saved_ip = p->signal->saved_ip;
70697+ if (p->exec_file)
70698+ get_file(p->exec_file);
70699+ tsk->exec_file = p->exec_file;
70700+ tsk->is_writable = p->is_writable;
70701+ if (unlikely(p->signal->used_accept)) {
70702+ p->signal->curr_ip = 0;
70703+ p->signal->saved_ip = 0;
70704+ }
70705+
70706+ return;
70707+}
70708+
70709+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
70710+
70711+int
70712+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
70713+{
70714+ unsigned int i;
70715+ __u16 num;
70716+ uid_t *uidlist;
70717+ uid_t curuid;
70718+ int realok = 0;
70719+ int effectiveok = 0;
70720+ int fsok = 0;
70721+ uid_t globalreal, globaleffective, globalfs;
70722+
70723+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
70724+ struct user_struct *user;
70725+
70726+ if (!uid_valid(real))
70727+ goto skipit;
70728+
70729+ /* find user based on global namespace */
70730+
70731+ globalreal = GR_GLOBAL_UID(real);
70732+
70733+ user = find_user(make_kuid(&init_user_ns, globalreal));
70734+ if (user == NULL)
70735+ goto skipit;
70736+
70737+ if (gr_process_kernel_setuid_ban(user)) {
70738+ /* for find_user */
70739+ free_uid(user);
70740+ return 1;
70741+ }
70742+
70743+ /* for find_user */
70744+ free_uid(user);
70745+
70746+skipit:
70747+#endif
70748+
70749+ if (unlikely(!(gr_status & GR_READY)))
70750+ return 0;
70751+
70752+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
70753+ gr_log_learn_uid_change(real, effective, fs);
70754+
70755+ num = current->acl->user_trans_num;
70756+ uidlist = current->acl->user_transitions;
70757+
70758+ if (uidlist == NULL)
70759+ return 0;
70760+
70761+ if (!uid_valid(real)) {
70762+ realok = 1;
70763+ globalreal = (uid_t)-1;
70764+ } else {
70765+ globalreal = GR_GLOBAL_UID(real);
70766+ }
70767+ if (!uid_valid(effective)) {
70768+ effectiveok = 1;
70769+ globaleffective = (uid_t)-1;
70770+ } else {
70771+ globaleffective = GR_GLOBAL_UID(effective);
70772+ }
70773+ if (!uid_valid(fs)) {
70774+ fsok = 1;
70775+ globalfs = (uid_t)-1;
70776+ } else {
70777+ globalfs = GR_GLOBAL_UID(fs);
70778+ }
70779+
70780+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
70781+ for (i = 0; i < num; i++) {
70782+ curuid = uidlist[i];
70783+ if (globalreal == curuid)
70784+ realok = 1;
70785+ if (globaleffective == curuid)
70786+ effectiveok = 1;
70787+ if (globalfs == curuid)
70788+ fsok = 1;
70789+ }
70790+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
70791+ for (i = 0; i < num; i++) {
70792+ curuid = uidlist[i];
70793+ if (globalreal == curuid)
70794+ break;
70795+ if (globaleffective == curuid)
70796+ break;
70797+ if (globalfs == curuid)
70798+ break;
70799+ }
70800+ /* not in deny list */
70801+ if (i == num) {
70802+ realok = 1;
70803+ effectiveok = 1;
70804+ fsok = 1;
70805+ }
70806+ }
70807+
70808+ if (realok && effectiveok && fsok)
70809+ return 0;
70810+ else {
70811+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
70812+ return 1;
70813+ }
70814+}
70815+
70816+int
70817+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
70818+{
70819+ unsigned int i;
70820+ __u16 num;
70821+ gid_t *gidlist;
70822+ gid_t curgid;
70823+ int realok = 0;
70824+ int effectiveok = 0;
70825+ int fsok = 0;
70826+ gid_t globalreal, globaleffective, globalfs;
70827+
70828+ if (unlikely(!(gr_status & GR_READY)))
70829+ return 0;
70830+
70831+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
70832+ gr_log_learn_gid_change(real, effective, fs);
70833+
70834+ num = current->acl->group_trans_num;
70835+ gidlist = current->acl->group_transitions;
70836+
70837+ if (gidlist == NULL)
70838+ return 0;
70839+
70840+ if (!gid_valid(real)) {
70841+ realok = 1;
70842+ globalreal = (gid_t)-1;
70843+ } else {
70844+ globalreal = GR_GLOBAL_GID(real);
70845+ }
70846+ if (!gid_valid(effective)) {
70847+ effectiveok = 1;
70848+ globaleffective = (gid_t)-1;
70849+ } else {
70850+ globaleffective = GR_GLOBAL_GID(effective);
70851+ }
70852+ if (!gid_valid(fs)) {
70853+ fsok = 1;
70854+ globalfs = (gid_t)-1;
70855+ } else {
70856+ globalfs = GR_GLOBAL_GID(fs);
70857+ }
70858+
70859+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
70860+ for (i = 0; i < num; i++) {
70861+ curgid = gidlist[i];
70862+ if (globalreal == curgid)
70863+ realok = 1;
70864+ if (globaleffective == curgid)
70865+ effectiveok = 1;
70866+ if (globalfs == curgid)
70867+ fsok = 1;
70868+ }
70869+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
70870+ for (i = 0; i < num; i++) {
70871+ curgid = gidlist[i];
70872+ if (globalreal == curgid)
70873+ break;
70874+ if (globaleffective == curgid)
70875+ break;
70876+ if (globalfs == curgid)
70877+ break;
70878+ }
70879+ /* not in deny list */
70880+ if (i == num) {
70881+ realok = 1;
70882+ effectiveok = 1;
70883+ fsok = 1;
70884+ }
70885+ }
70886+
70887+ if (realok && effectiveok && fsok)
70888+ return 0;
70889+ else {
70890+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
70891+ return 1;
70892+ }
70893+}
70894+
70895+extern int gr_acl_is_capable(const int cap);
70896+
70897+void
70898+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
70899+{
70900+ struct acl_role_label *role = task->role;
70901+ struct acl_role_label *origrole = role;
70902+ struct acl_subject_label *subj = NULL;
70903+ struct acl_object_label *obj;
70904+ struct file *filp;
70905+ uid_t uid;
70906+ gid_t gid;
70907+
70908+ if (unlikely(!(gr_status & GR_READY)))
70909+ return;
70910+
70911+ uid = GR_GLOBAL_UID(kuid);
70912+ gid = GR_GLOBAL_GID(kgid);
70913+
70914+ filp = task->exec_file;
70915+
70916+ /* kernel process, we'll give them the kernel role */
70917+ if (unlikely(!filp)) {
70918+ task->role = running_polstate.kernel_role;
70919+ task->acl = running_polstate.kernel_role->root_label;
70920+ return;
70921+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
70922+ /* save the current ip at time of role lookup so that the proper
70923+ IP will be learned for role_allowed_ip */
70924+ task->signal->saved_ip = task->signal->curr_ip;
70925+ role = lookup_acl_role_label(task, uid, gid);
70926+ }
70927+
70928+ /* don't change the role if we're not a privileged process */
70929+ if (role && task->role != role &&
70930+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
70931+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
70932+ return;
70933+
70934+ task->role = role;
70935+
70936+ if (task->inherited) {
70937+ /* if we reached our subject through inheritance, then first see
70938+ if there's a subject of the same name in the new role that has
70939+ an object that would result in the same inherited subject
70940+ */
70941+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
70942+ if (subj) {
70943+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
70944+ if (!(obj->mode & GR_INHERIT))
70945+ subj = NULL;
70946+ }
70947+
70948+ }
70949+ if (subj == NULL) {
70950+ /* otherwise:
70951+ perform subject lookup in possibly new role
70952+ we can use this result below in the case where role == task->role
70953+ */
70954+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
70955+ }
70956+
70957+ /* if we changed uid/gid, but result in the same role
70958+ and are using inheritance, don't lose the inherited subject
70959+ if current subject is other than what normal lookup
70960+ would result in, we arrived via inheritance, don't
70961+ lose subject
70962+ */
70963+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
70964+ (subj == task->acl)))
70965+ task->acl = subj;
70966+
70967+ /* leave task->inherited unaffected */
70968+
70969+ task->is_writable = 0;
70970+
70971+ /* ignore additional mmap checks for processes that are writable
70972+ by the default ACL */
70973+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70974+ if (unlikely(obj->mode & GR_WRITE))
70975+ task->is_writable = 1;
70976+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
70977+ if (unlikely(obj->mode & GR_WRITE))
70978+ task->is_writable = 1;
70979+
70980+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70981+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
70982+#endif
70983+
70984+ gr_set_proc_res(task);
70985+
70986+ return;
70987+}
70988+
70989+int
70990+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
70991+ const int unsafe_flags)
70992+{
70993+ struct task_struct *task = current;
70994+ struct acl_subject_label *newacl;
70995+ struct acl_object_label *obj;
70996+ __u32 retmode;
70997+
70998+ if (unlikely(!(gr_status & GR_READY)))
70999+ return 0;
71000+
71001+ newacl = chk_subj_label(dentry, mnt, task->role);
71002+
71003+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
71004+ did an exec
71005+ */
71006+ rcu_read_lock();
71007+ read_lock(&tasklist_lock);
71008+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
71009+ (task->parent->acl->mode & GR_POVERRIDE))) {
71010+ read_unlock(&tasklist_lock);
71011+ rcu_read_unlock();
71012+ goto skip_check;
71013+ }
71014+ read_unlock(&tasklist_lock);
71015+ rcu_read_unlock();
71016+
71017+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
71018+ !(task->role->roletype & GR_ROLE_GOD) &&
71019+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
71020+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71021+ if (unsafe_flags & LSM_UNSAFE_SHARE)
71022+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
71023+ else
71024+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
71025+ return -EACCES;
71026+ }
71027+
71028+skip_check:
71029+
71030+ obj = chk_obj_label(dentry, mnt, task->acl);
71031+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
71032+
71033+ if (!(task->acl->mode & GR_INHERITLEARN) &&
71034+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
71035+ if (obj->nested)
71036+ task->acl = obj->nested;
71037+ else
71038+ task->acl = newacl;
71039+ task->inherited = 0;
71040+ } else {
71041+ task->inherited = 1;
71042+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
71043+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
71044+ }
71045+
71046+ task->is_writable = 0;
71047+
71048+ /* ignore additional mmap checks for processes that are writable
71049+ by the default ACL */
71050+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
71051+ if (unlikely(obj->mode & GR_WRITE))
71052+ task->is_writable = 1;
71053+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
71054+ if (unlikely(obj->mode & GR_WRITE))
71055+ task->is_writable = 1;
71056+
71057+ gr_set_proc_res(task);
71058+
71059+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71060+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71061+#endif
71062+ return 0;
71063+}
71064+
71065+/* always called with valid inodev ptr */
71066+static void
71067+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
71068+{
71069+ struct acl_object_label *matchpo;
71070+ struct acl_subject_label *matchps;
71071+ struct acl_subject_label *subj;
71072+ struct acl_role_label *role;
71073+ unsigned int x;
71074+
71075+ FOR_EACH_ROLE_START(role)
71076+ FOR_EACH_SUBJECT_START(role, subj, x)
71077+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71078+ matchpo->mode |= GR_DELETED;
71079+ FOR_EACH_SUBJECT_END(subj,x)
71080+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71081+ /* nested subjects aren't in the role's subj_hash table */
71082+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71083+ matchpo->mode |= GR_DELETED;
71084+ FOR_EACH_NESTED_SUBJECT_END(subj)
71085+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
71086+ matchps->mode |= GR_DELETED;
71087+ FOR_EACH_ROLE_END(role)
71088+
71089+ inodev->nentry->deleted = 1;
71090+
71091+ return;
71092+}
71093+
71094+void
71095+gr_handle_delete(const u64 ino, const dev_t dev)
71096+{
71097+ struct inodev_entry *inodev;
71098+
71099+ if (unlikely(!(gr_status & GR_READY)))
71100+ return;
71101+
71102+ write_lock(&gr_inode_lock);
71103+ inodev = lookup_inodev_entry(ino, dev);
71104+ if (inodev != NULL)
71105+ do_handle_delete(inodev, ino, dev);
71106+ write_unlock(&gr_inode_lock);
71107+
71108+ return;
71109+}
71110+
71111+static void
71112+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
71113+ const u64 newinode, const dev_t newdevice,
71114+ struct acl_subject_label *subj)
71115+{
71116+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
71117+ struct acl_object_label *match;
71118+
71119+ match = subj->obj_hash[index];
71120+
71121+ while (match && (match->inode != oldinode ||
71122+ match->device != olddevice ||
71123+ !(match->mode & GR_DELETED)))
71124+ match = match->next;
71125+
71126+ if (match && (match->inode == oldinode)
71127+ && (match->device == olddevice)
71128+ && (match->mode & GR_DELETED)) {
71129+ if (match->prev == NULL) {
71130+ subj->obj_hash[index] = match->next;
71131+ if (match->next != NULL)
71132+ match->next->prev = NULL;
71133+ } else {
71134+ match->prev->next = match->next;
71135+ if (match->next != NULL)
71136+ match->next->prev = match->prev;
71137+ }
71138+ match->prev = NULL;
71139+ match->next = NULL;
71140+ match->inode = newinode;
71141+ match->device = newdevice;
71142+ match->mode &= ~GR_DELETED;
71143+
71144+ insert_acl_obj_label(match, subj);
71145+ }
71146+
71147+ return;
71148+}
71149+
71150+static void
71151+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
71152+ const u64 newinode, const dev_t newdevice,
71153+ struct acl_role_label *role)
71154+{
71155+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
71156+ struct acl_subject_label *match;
71157+
71158+ match = role->subj_hash[index];
71159+
71160+ while (match && (match->inode != oldinode ||
71161+ match->device != olddevice ||
71162+ !(match->mode & GR_DELETED)))
71163+ match = match->next;
71164+
71165+ if (match && (match->inode == oldinode)
71166+ && (match->device == olddevice)
71167+ && (match->mode & GR_DELETED)) {
71168+ if (match->prev == NULL) {
71169+ role->subj_hash[index] = match->next;
71170+ if (match->next != NULL)
71171+ match->next->prev = NULL;
71172+ } else {
71173+ match->prev->next = match->next;
71174+ if (match->next != NULL)
71175+ match->next->prev = match->prev;
71176+ }
71177+ match->prev = NULL;
71178+ match->next = NULL;
71179+ match->inode = newinode;
71180+ match->device = newdevice;
71181+ match->mode &= ~GR_DELETED;
71182+
71183+ insert_acl_subj_label(match, role);
71184+ }
71185+
71186+ return;
71187+}
71188+
71189+static void
71190+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
71191+ const u64 newinode, const dev_t newdevice)
71192+{
71193+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
71194+ struct inodev_entry *match;
71195+
71196+ match = running_polstate.inodev_set.i_hash[index];
71197+
71198+ while (match && (match->nentry->inode != oldinode ||
71199+ match->nentry->device != olddevice || !match->nentry->deleted))
71200+ match = match->next;
71201+
71202+ if (match && (match->nentry->inode == oldinode)
71203+ && (match->nentry->device == olddevice) &&
71204+ match->nentry->deleted) {
71205+ if (match->prev == NULL) {
71206+ running_polstate.inodev_set.i_hash[index] = match->next;
71207+ if (match->next != NULL)
71208+ match->next->prev = NULL;
71209+ } else {
71210+ match->prev->next = match->next;
71211+ if (match->next != NULL)
71212+ match->next->prev = match->prev;
71213+ }
71214+ match->prev = NULL;
71215+ match->next = NULL;
71216+ match->nentry->inode = newinode;
71217+ match->nentry->device = newdevice;
71218+ match->nentry->deleted = 0;
71219+
71220+ insert_inodev_entry(match);
71221+ }
71222+
71223+ return;
71224+}
71225+
71226+static void
71227+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
71228+{
71229+ struct acl_subject_label *subj;
71230+ struct acl_role_label *role;
71231+ unsigned int x;
71232+
71233+ FOR_EACH_ROLE_START(role)
71234+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
71235+
71236+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71237+ if ((subj->inode == ino) && (subj->device == dev)) {
71238+ subj->inode = ino;
71239+ subj->device = dev;
71240+ }
71241+ /* nested subjects aren't in the role's subj_hash table */
71242+ update_acl_obj_label(matchn->inode, matchn->device,
71243+ ino, dev, subj);
71244+ FOR_EACH_NESTED_SUBJECT_END(subj)
71245+ FOR_EACH_SUBJECT_START(role, subj, x)
71246+ update_acl_obj_label(matchn->inode, matchn->device,
71247+ ino, dev, subj);
71248+ FOR_EACH_SUBJECT_END(subj,x)
71249+ FOR_EACH_ROLE_END(role)
71250+
71251+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
71252+
71253+ return;
71254+}
71255+
71256+static void
71257+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
71258+ const struct vfsmount *mnt)
71259+{
71260+ u64 ino = __get_ino(dentry);
71261+ dev_t dev = __get_dev(dentry);
71262+
71263+ __do_handle_create(matchn, ino, dev);
71264+
71265+ return;
71266+}
71267+
71268+void
71269+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
71270+{
71271+ struct name_entry *matchn;
71272+
71273+ if (unlikely(!(gr_status & GR_READY)))
71274+ return;
71275+
71276+ preempt_disable();
71277+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
71278+
71279+ if (unlikely((unsigned long)matchn)) {
71280+ write_lock(&gr_inode_lock);
71281+ do_handle_create(matchn, dentry, mnt);
71282+ write_unlock(&gr_inode_lock);
71283+ }
71284+ preempt_enable();
71285+
71286+ return;
71287+}
71288+
71289+void
71290+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
71291+{
71292+ struct name_entry *matchn;
71293+
71294+ if (unlikely(!(gr_status & GR_READY)))
71295+ return;
71296+
71297+ preempt_disable();
71298+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
71299+
71300+ if (unlikely((unsigned long)matchn)) {
71301+ write_lock(&gr_inode_lock);
71302+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
71303+ write_unlock(&gr_inode_lock);
71304+ }
71305+ preempt_enable();
71306+
71307+ return;
71308+}
71309+
71310+void
71311+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
71312+ struct dentry *old_dentry,
71313+ struct dentry *new_dentry,
71314+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
71315+{
71316+ struct name_entry *matchn;
71317+ struct name_entry *matchn2 = NULL;
71318+ struct inodev_entry *inodev;
71319+ struct inode *inode = new_dentry->d_inode;
71320+ u64 old_ino = __get_ino(old_dentry);
71321+ dev_t old_dev = __get_dev(old_dentry);
71322+ unsigned int exchange = flags & RENAME_EXCHANGE;
71323+
71324+ /* vfs_rename swaps the name and parent link for old_dentry and
71325+ new_dentry
71326+ at this point, old_dentry has the new name, parent link, and inode
71327+ for the renamed file
71328+ if a file is being replaced by a rename, new_dentry has the inode
71329+ and name for the replaced file
71330+ */
71331+
71332+ if (unlikely(!(gr_status & GR_READY)))
71333+ return;
71334+
71335+ preempt_disable();
71336+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
71337+
71338+ /* exchange cases:
71339+ a filename exists for the source, but not dest
71340+ do a recreate on source
71341+ a filename exists for the dest, but not source
71342+ do a recreate on dest
71343+ a filename exists for both source and dest
71344+ delete source and dest, then create source and dest
71345+ a filename exists for neither source nor dest
71346+ no updates needed
71347+
71348+ the name entry lookups get us the old inode/dev associated with
71349+ each name, so do the deletes first (if possible) so that when
71350+ we do the create, we pick up on the right entries
71351+ */
71352+
71353+ if (exchange)
71354+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
71355+
71356+ /* we wouldn't have to check d_inode if it weren't for
71357+ NFS silly-renaming
71358+ */
71359+
71360+ write_lock(&gr_inode_lock);
71361+ if (unlikely((replace || exchange) && inode)) {
71362+ u64 new_ino = __get_ino(new_dentry);
71363+ dev_t new_dev = __get_dev(new_dentry);
71364+
71365+ inodev = lookup_inodev_entry(new_ino, new_dev);
71366+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
71367+ do_handle_delete(inodev, new_ino, new_dev);
71368+ }
71369+
71370+ inodev = lookup_inodev_entry(old_ino, old_dev);
71371+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
71372+ do_handle_delete(inodev, old_ino, old_dev);
71373+
71374+ if (unlikely(matchn != NULL))
71375+ do_handle_create(matchn, old_dentry, mnt);
71376+
71377+ if (unlikely(matchn2 != NULL))
71378+ do_handle_create(matchn2, new_dentry, mnt);
71379+
71380+ write_unlock(&gr_inode_lock);
71381+ preempt_enable();
71382+
71383+ return;
71384+}
71385+
71386+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
71387+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
71388+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
71389+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
71390+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
71391+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
71392+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
71393+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
71394+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
71395+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
71396+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
71397+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
71398+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
71399+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
71400+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
71401+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
71402+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
71403+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
71404+};
71405+
71406+void
71407+gr_learn_resource(const struct task_struct *task,
71408+ const int res, const unsigned long wanted, const int gt)
71409+{
71410+ struct acl_subject_label *acl;
71411+ const struct cred *cred;
71412+
71413+ if (unlikely((gr_status & GR_READY) &&
71414+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
71415+ goto skip_reslog;
71416+
71417+ gr_log_resource(task, res, wanted, gt);
71418+skip_reslog:
71419+
71420+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
71421+ return;
71422+
71423+ acl = task->acl;
71424+
71425+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
71426+ !(acl->resmask & (1U << (unsigned short) res))))
71427+ return;
71428+
71429+ if (wanted >= acl->res[res].rlim_cur) {
71430+ unsigned long res_add;
71431+
71432+ res_add = wanted + res_learn_bumps[res];
71433+
71434+ acl->res[res].rlim_cur = res_add;
71435+
71436+ if (wanted > acl->res[res].rlim_max)
71437+ acl->res[res].rlim_max = res_add;
71438+
71439+ /* only log the subject filename, since resource logging is supported for
71440+ single-subject learning only */
71441+ rcu_read_lock();
71442+ cred = __task_cred(task);
71443+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
71444+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
71445+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
71446+ "", (unsigned long) res, &task->signal->saved_ip);
71447+ rcu_read_unlock();
71448+ }
71449+
71450+ return;
71451+}
71452+EXPORT_SYMBOL_GPL(gr_learn_resource);
71453+#endif
71454+
71455+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
71456+void
71457+pax_set_initial_flags(struct linux_binprm *bprm)
71458+{
71459+ struct task_struct *task = current;
71460+ struct acl_subject_label *proc;
71461+ unsigned long flags;
71462+
71463+ if (unlikely(!(gr_status & GR_READY)))
71464+ return;
71465+
71466+ flags = pax_get_flags(task);
71467+
71468+ proc = task->acl;
71469+
71470+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
71471+ flags &= ~MF_PAX_PAGEEXEC;
71472+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
71473+ flags &= ~MF_PAX_SEGMEXEC;
71474+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
71475+ flags &= ~MF_PAX_RANDMMAP;
71476+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
71477+ flags &= ~MF_PAX_EMUTRAMP;
71478+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
71479+ flags &= ~MF_PAX_MPROTECT;
71480+
71481+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
71482+ flags |= MF_PAX_PAGEEXEC;
71483+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
71484+ flags |= MF_PAX_SEGMEXEC;
71485+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
71486+ flags |= MF_PAX_RANDMMAP;
71487+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
71488+ flags |= MF_PAX_EMUTRAMP;
71489+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
71490+ flags |= MF_PAX_MPROTECT;
71491+
71492+ pax_set_flags(task, flags);
71493+
71494+ return;
71495+}
71496+#endif
71497+
71498+int
71499+gr_handle_proc_ptrace(struct task_struct *task)
71500+{
71501+ struct file *filp;
71502+ struct task_struct *tmp = task;
71503+ struct task_struct *curtemp = current;
71504+ __u32 retmode;
71505+
71506+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71507+ if (unlikely(!(gr_status & GR_READY)))
71508+ return 0;
71509+#endif
71510+
71511+ read_lock(&tasklist_lock);
71512+ read_lock(&grsec_exec_file_lock);
71513+ filp = task->exec_file;
71514+
71515+ while (task_pid_nr(tmp) > 0) {
71516+ if (tmp == curtemp)
71517+ break;
71518+ tmp = tmp->real_parent;
71519+ }
71520+
71521+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71522+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
71523+ read_unlock(&grsec_exec_file_lock);
71524+ read_unlock(&tasklist_lock);
71525+ return 1;
71526+ }
71527+
71528+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71529+ if (!(gr_status & GR_READY)) {
71530+ read_unlock(&grsec_exec_file_lock);
71531+ read_unlock(&tasklist_lock);
71532+ return 0;
71533+ }
71534+#endif
71535+
71536+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
71537+ read_unlock(&grsec_exec_file_lock);
71538+ read_unlock(&tasklist_lock);
71539+
71540+ if (retmode & GR_NOPTRACE)
71541+ return 1;
71542+
71543+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
71544+ && (current->acl != task->acl || (current->acl != current->role->root_label
71545+ && task_pid_nr(current) != task_pid_nr(task))))
71546+ return 1;
71547+
71548+ return 0;
71549+}
71550+
71551+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
71552+{
71553+ if (unlikely(!(gr_status & GR_READY)))
71554+ return;
71555+
71556+ if (!(current->role->roletype & GR_ROLE_GOD))
71557+ return;
71558+
71559+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
71560+ p->role->rolename, gr_task_roletype_to_char(p),
71561+ p->acl->filename);
71562+}
71563+
71564+int
71565+gr_handle_ptrace(struct task_struct *task, const long request)
71566+{
71567+ struct task_struct *tmp = task;
71568+ struct task_struct *curtemp = current;
71569+ __u32 retmode;
71570+
71571+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71572+ if (unlikely(!(gr_status & GR_READY)))
71573+ return 0;
71574+#endif
71575+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71576+ read_lock(&tasklist_lock);
71577+ while (task_pid_nr(tmp) > 0) {
71578+ if (tmp == curtemp)
71579+ break;
71580+ tmp = tmp->real_parent;
71581+ }
71582+
71583+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71584+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
71585+ read_unlock(&tasklist_lock);
71586+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71587+ return 1;
71588+ }
71589+ read_unlock(&tasklist_lock);
71590+ }
71591+
71592+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71593+ if (!(gr_status & GR_READY))
71594+ return 0;
71595+#endif
71596+
71597+ read_lock(&grsec_exec_file_lock);
71598+ if (unlikely(!task->exec_file)) {
71599+ read_unlock(&grsec_exec_file_lock);
71600+ return 0;
71601+ }
71602+
71603+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
71604+ read_unlock(&grsec_exec_file_lock);
71605+
71606+ if (retmode & GR_NOPTRACE) {
71607+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71608+ return 1;
71609+ }
71610+
71611+ if (retmode & GR_PTRACERD) {
71612+ switch (request) {
71613+ case PTRACE_SEIZE:
71614+ case PTRACE_POKETEXT:
71615+ case PTRACE_POKEDATA:
71616+ case PTRACE_POKEUSR:
71617+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
71618+ case PTRACE_SETREGS:
71619+ case PTRACE_SETFPREGS:
71620+#endif
71621+#ifdef CONFIG_X86
71622+ case PTRACE_SETFPXREGS:
71623+#endif
71624+#ifdef CONFIG_ALTIVEC
71625+ case PTRACE_SETVRREGS:
71626+#endif
71627+ return 1;
71628+ default:
71629+ return 0;
71630+ }
71631+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
71632+ !(current->role->roletype & GR_ROLE_GOD) &&
71633+ (current->acl != task->acl)) {
71634+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71635+ return 1;
71636+ }
71637+
71638+ return 0;
71639+}
71640+
71641+static int is_writable_mmap(const struct file *filp)
71642+{
71643+ struct task_struct *task = current;
71644+ struct acl_object_label *obj, *obj2;
71645+
71646+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
71647+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
71648+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71649+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
71650+ task->role->root_label);
71651+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
71652+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
71653+ return 1;
71654+ }
71655+ }
71656+ return 0;
71657+}
71658+
71659+int
71660+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
71661+{
71662+ __u32 mode;
71663+
71664+ if (unlikely(!file || !(prot & PROT_EXEC)))
71665+ return 1;
71666+
71667+ if (is_writable_mmap(file))
71668+ return 0;
71669+
71670+ mode =
71671+ gr_search_file(file->f_path.dentry,
71672+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71673+ file->f_path.mnt);
71674+
71675+ if (!gr_tpe_allow(file))
71676+ return 0;
71677+
71678+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71679+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71680+ return 0;
71681+ } else if (unlikely(!(mode & GR_EXEC))) {
71682+ return 0;
71683+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71684+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71685+ return 1;
71686+ }
71687+
71688+ return 1;
71689+}
71690+
71691+int
71692+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
71693+{
71694+ __u32 mode;
71695+
71696+ if (unlikely(!file || !(prot & PROT_EXEC)))
71697+ return 1;
71698+
71699+ if (is_writable_mmap(file))
71700+ return 0;
71701+
71702+ mode =
71703+ gr_search_file(file->f_path.dentry,
71704+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71705+ file->f_path.mnt);
71706+
71707+ if (!gr_tpe_allow(file))
71708+ return 0;
71709+
71710+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71711+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71712+ return 0;
71713+ } else if (unlikely(!(mode & GR_EXEC))) {
71714+ return 0;
71715+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71716+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71717+ return 1;
71718+ }
71719+
71720+ return 1;
71721+}
71722+
71723+void
71724+gr_acl_handle_psacct(struct task_struct *task, const long code)
71725+{
71726+ unsigned long runtime, cputime;
71727+ cputime_t utime, stime;
71728+ unsigned int wday, cday;
71729+ __u8 whr, chr;
71730+ __u8 wmin, cmin;
71731+ __u8 wsec, csec;
71732+ struct timespec curtime, starttime;
71733+
71734+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
71735+ !(task->acl->mode & GR_PROCACCT)))
71736+ return;
71737+
71738+ curtime = ns_to_timespec(ktime_get_ns());
71739+ starttime = ns_to_timespec(task->start_time);
71740+ runtime = curtime.tv_sec - starttime.tv_sec;
71741+ wday = runtime / (60 * 60 * 24);
71742+ runtime -= wday * (60 * 60 * 24);
71743+ whr = runtime / (60 * 60);
71744+ runtime -= whr * (60 * 60);
71745+ wmin = runtime / 60;
71746+ runtime -= wmin * 60;
71747+ wsec = runtime;
71748+
71749+ task_cputime(task, &utime, &stime);
71750+ cputime = cputime_to_secs(utime + stime);
71751+ cday = cputime / (60 * 60 * 24);
71752+ cputime -= cday * (60 * 60 * 24);
71753+ chr = cputime / (60 * 60);
71754+ cputime -= chr * (60 * 60);
71755+ cmin = cputime / 60;
71756+ cputime -= cmin * 60;
71757+ csec = cputime;
71758+
71759+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
71760+
71761+ return;
71762+}
71763+
71764+#ifdef CONFIG_TASKSTATS
71765+int gr_is_taskstats_denied(int pid)
71766+{
71767+ struct task_struct *task;
71768+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71769+ const struct cred *cred;
71770+#endif
71771+ int ret = 0;
71772+
71773+ /* restrict taskstats viewing to un-chrooted root users
71774+ who have the 'view' subject flag if the RBAC system is enabled
71775+ */
71776+
71777+ rcu_read_lock();
71778+ read_lock(&tasklist_lock);
71779+ task = find_task_by_vpid(pid);
71780+ if (task) {
71781+#ifdef CONFIG_GRKERNSEC_CHROOT
71782+ if (proc_is_chrooted(task))
71783+ ret = -EACCES;
71784+#endif
71785+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71786+ cred = __task_cred(task);
71787+#ifdef CONFIG_GRKERNSEC_PROC_USER
71788+ if (gr_is_global_nonroot(cred->uid))
71789+ ret = -EACCES;
71790+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71791+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
71792+ ret = -EACCES;
71793+#endif
71794+#endif
71795+ if (gr_status & GR_READY) {
71796+ if (!(task->acl->mode & GR_VIEW))
71797+ ret = -EACCES;
71798+ }
71799+ } else
71800+ ret = -ENOENT;
71801+
71802+ read_unlock(&tasklist_lock);
71803+ rcu_read_unlock();
71804+
71805+ return ret;
71806+}
71807+#endif
71808+
71809+/* AUXV entries are filled via a descendant of search_binary_handler
71810+ after we've already applied the subject for the target
71811+*/
71812+int gr_acl_enable_at_secure(void)
71813+{
71814+ if (unlikely(!(gr_status & GR_READY)))
71815+ return 0;
71816+
71817+ if (current->acl->mode & GR_ATSECURE)
71818+ return 1;
71819+
71820+ return 0;
71821+}
71822+
71823+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
71824+{
71825+ struct task_struct *task = current;
71826+ struct dentry *dentry = file->f_path.dentry;
71827+ struct vfsmount *mnt = file->f_path.mnt;
71828+ struct acl_object_label *obj, *tmp;
71829+ struct acl_subject_label *subj;
71830+ unsigned int bufsize;
71831+ int is_not_root;
71832+ char *path;
71833+ dev_t dev = __get_dev(dentry);
71834+
71835+ if (unlikely(!(gr_status & GR_READY)))
71836+ return 1;
71837+
71838+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71839+ return 1;
71840+
71841+ /* ignore Eric Biederman */
71842+ if (IS_PRIVATE(dentry->d_inode))
71843+ return 1;
71844+
71845+ subj = task->acl;
71846+ read_lock(&gr_inode_lock);
71847+ do {
71848+ obj = lookup_acl_obj_label(ino, dev, subj);
71849+ if (obj != NULL) {
71850+ read_unlock(&gr_inode_lock);
71851+ return (obj->mode & GR_FIND) ? 1 : 0;
71852+ }
71853+ } while ((subj = subj->parent_subject));
71854+ read_unlock(&gr_inode_lock);
71855+
71856+ /* this is purely an optimization since we're looking for an object
71857+ for the directory we're doing a readdir on
71858+ if it's possible for any globbed object to match the entry we're
71859+ filling into the directory, then the object we find here will be
71860+ an anchor point with attached globbed objects
71861+ */
71862+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
71863+ if (obj->globbed == NULL)
71864+ return (obj->mode & GR_FIND) ? 1 : 0;
71865+
71866+ is_not_root = ((obj->filename[0] == '/') &&
71867+ (obj->filename[1] == '\0')) ? 0 : 1;
71868+ bufsize = PAGE_SIZE - namelen - is_not_root;
71869+
71870+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
71871+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
71872+ return 1;
71873+
71874+ preempt_disable();
71875+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
71876+ bufsize);
71877+
71878+ bufsize = strlen(path);
71879+
71880+ /* if base is "/", don't append an additional slash */
71881+ if (is_not_root)
71882+ *(path + bufsize) = '/';
71883+ memcpy(path + bufsize + is_not_root, name, namelen);
71884+ *(path + bufsize + namelen + is_not_root) = '\0';
71885+
71886+ tmp = obj->globbed;
71887+ while (tmp) {
71888+ if (!glob_match(tmp->filename, path)) {
71889+ preempt_enable();
71890+ return (tmp->mode & GR_FIND) ? 1 : 0;
71891+ }
71892+ tmp = tmp->next;
71893+ }
71894+ preempt_enable();
71895+ return (obj->mode & GR_FIND) ? 1 : 0;
71896+}
71897+
71898+void gr_put_exec_file(struct task_struct *task)
71899+{
71900+ struct file *filp;
71901+
71902+ write_lock(&grsec_exec_file_lock);
71903+ filp = task->exec_file;
71904+ task->exec_file = NULL;
71905+ write_unlock(&grsec_exec_file_lock);
71906+
71907+ if (filp)
71908+ fput(filp);
71909+
71910+ return;
71911+}
71912+
71913+
71914+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
71915+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
71916+#endif
71917+#ifdef CONFIG_SECURITY
71918+EXPORT_SYMBOL_GPL(gr_check_user_change);
71919+EXPORT_SYMBOL_GPL(gr_check_group_change);
71920+#endif
71921+
71922diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
71923new file mode 100644
71924index 0000000..18ffbbd
71925--- /dev/null
71926+++ b/grsecurity/gracl_alloc.c
71927@@ -0,0 +1,105 @@
71928+#include <linux/kernel.h>
71929+#include <linux/mm.h>
71930+#include <linux/slab.h>
71931+#include <linux/vmalloc.h>
71932+#include <linux/gracl.h>
71933+#include <linux/grsecurity.h>
71934+
71935+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
71936+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
71937+
71938+static __inline__ int
71939+alloc_pop(void)
71940+{
71941+ if (current_alloc_state->alloc_stack_next == 1)
71942+ return 0;
71943+
71944+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
71945+
71946+ current_alloc_state->alloc_stack_next--;
71947+
71948+ return 1;
71949+}
71950+
71951+static __inline__ int
71952+alloc_push(void *buf)
71953+{
71954+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
71955+ return 1;
71956+
71957+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
71958+
71959+ current_alloc_state->alloc_stack_next++;
71960+
71961+ return 0;
71962+}
71963+
71964+void *
71965+acl_alloc(unsigned long len)
71966+{
71967+ void *ret = NULL;
71968+
71969+ if (!len || len > PAGE_SIZE)
71970+ goto out;
71971+
71972+ ret = kmalloc(len, GFP_KERNEL);
71973+
71974+ if (ret) {
71975+ if (alloc_push(ret)) {
71976+ kfree(ret);
71977+ ret = NULL;
71978+ }
71979+ }
71980+
71981+out:
71982+ return ret;
71983+}
71984+
71985+void *
71986+acl_alloc_num(unsigned long num, unsigned long len)
71987+{
71988+ if (!len || (num > (PAGE_SIZE / len)))
71989+ return NULL;
71990+
71991+ return acl_alloc(num * len);
71992+}
71993+
71994+void
71995+acl_free_all(void)
71996+{
71997+ if (!current_alloc_state->alloc_stack)
71998+ return;
71999+
72000+ while (alloc_pop()) ;
72001+
72002+ if (current_alloc_state->alloc_stack) {
72003+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
72004+ kfree(current_alloc_state->alloc_stack);
72005+ else
72006+ vfree(current_alloc_state->alloc_stack);
72007+ }
72008+
72009+ current_alloc_state->alloc_stack = NULL;
72010+ current_alloc_state->alloc_stack_size = 1;
72011+ current_alloc_state->alloc_stack_next = 1;
72012+
72013+ return;
72014+}
72015+
72016+int
72017+acl_alloc_stack_init(unsigned long size)
72018+{
72019+ if ((size * sizeof (void *)) <= PAGE_SIZE)
72020+ current_alloc_state->alloc_stack =
72021+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
72022+ else
72023+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
72024+
72025+ current_alloc_state->alloc_stack_size = size;
72026+ current_alloc_state->alloc_stack_next = 1;
72027+
72028+ if (!current_alloc_state->alloc_stack)
72029+ return 0;
72030+ else
72031+ return 1;
72032+}
72033diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
72034new file mode 100644
72035index 0000000..1a94c11
72036--- /dev/null
72037+++ b/grsecurity/gracl_cap.c
72038@@ -0,0 +1,127 @@
72039+#include <linux/kernel.h>
72040+#include <linux/module.h>
72041+#include <linux/sched.h>
72042+#include <linux/gracl.h>
72043+#include <linux/grsecurity.h>
72044+#include <linux/grinternal.h>
72045+
72046+extern const char *captab_log[];
72047+extern int captab_log_entries;
72048+
72049+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
72050+{
72051+ struct acl_subject_label *curracl;
72052+
72053+ if (!gr_acl_is_enabled())
72054+ return 1;
72055+
72056+ curracl = task->acl;
72057+
72058+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72059+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72060+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
72061+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
72062+ gr_to_filename(task->exec_file->f_path.dentry,
72063+ task->exec_file->f_path.mnt) : curracl->filename,
72064+ curracl->filename, 0UL,
72065+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
72066+ return 1;
72067+ }
72068+
72069+ return 0;
72070+}
72071+
72072+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72073+{
72074+ struct acl_subject_label *curracl;
72075+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72076+ kernel_cap_t cap_audit = __cap_empty_set;
72077+
72078+ if (!gr_acl_is_enabled())
72079+ return 1;
72080+
72081+ curracl = task->acl;
72082+
72083+ cap_drop = curracl->cap_lower;
72084+ cap_mask = curracl->cap_mask;
72085+ cap_audit = curracl->cap_invert_audit;
72086+
72087+ while ((curracl = curracl->parent_subject)) {
72088+ /* if the cap isn't specified in the current computed mask but is specified in the
72089+ current level subject, and is lowered in the current level subject, then add
72090+ it to the set of dropped capabilities
72091+ otherwise, add the current level subject's mask to the current computed mask
72092+ */
72093+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72094+ cap_raise(cap_mask, cap);
72095+ if (cap_raised(curracl->cap_lower, cap))
72096+ cap_raise(cap_drop, cap);
72097+ if (cap_raised(curracl->cap_invert_audit, cap))
72098+ cap_raise(cap_audit, cap);
72099+ }
72100+ }
72101+
72102+ if (!cap_raised(cap_drop, cap)) {
72103+ if (cap_raised(cap_audit, cap))
72104+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
72105+ return 1;
72106+ }
72107+
72108+ /* only learn the capability use if the process has the capability in the
72109+ general case, the two uses in sys.c of gr_learn_cap are an exception
72110+ to this rule to ensure any role transition involves what the full-learned
72111+ policy believes in a privileged process
72112+ */
72113+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
72114+ return 1;
72115+
72116+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
72117+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
72118+
72119+ return 0;
72120+}
72121+
72122+int
72123+gr_acl_is_capable(const int cap)
72124+{
72125+ return gr_task_acl_is_capable(current, current_cred(), cap);
72126+}
72127+
72128+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
72129+{
72130+ struct acl_subject_label *curracl;
72131+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72132+
72133+ if (!gr_acl_is_enabled())
72134+ return 1;
72135+
72136+ curracl = task->acl;
72137+
72138+ cap_drop = curracl->cap_lower;
72139+ cap_mask = curracl->cap_mask;
72140+
72141+ while ((curracl = curracl->parent_subject)) {
72142+ /* if the cap isn't specified in the current computed mask but is specified in the
72143+ current level subject, and is lowered in the current level subject, then add
72144+ it to the set of dropped capabilities
72145+ otherwise, add the current level subject's mask to the current computed mask
72146+ */
72147+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72148+ cap_raise(cap_mask, cap);
72149+ if (cap_raised(curracl->cap_lower, cap))
72150+ cap_raise(cap_drop, cap);
72151+ }
72152+ }
72153+
72154+ if (!cap_raised(cap_drop, cap))
72155+ return 1;
72156+
72157+ return 0;
72158+}
72159+
72160+int
72161+gr_acl_is_capable_nolog(const int cap)
72162+{
72163+ return gr_task_acl_is_capable_nolog(current, cap);
72164+}
72165+
72166diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
72167new file mode 100644
72168index 0000000..a43dd06
72169--- /dev/null
72170+++ b/grsecurity/gracl_compat.c
72171@@ -0,0 +1,269 @@
72172+#include <linux/kernel.h>
72173+#include <linux/gracl.h>
72174+#include <linux/compat.h>
72175+#include <linux/gracl_compat.h>
72176+
72177+#include <asm/uaccess.h>
72178+
72179+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
72180+{
72181+ struct gr_arg_wrapper_compat uwrapcompat;
72182+
72183+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
72184+ return -EFAULT;
72185+
72186+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
72187+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
72188+ return -EINVAL;
72189+
72190+ uwrap->arg = compat_ptr(uwrapcompat.arg);
72191+ uwrap->version = uwrapcompat.version;
72192+ uwrap->size = sizeof(struct gr_arg);
72193+
72194+ return 0;
72195+}
72196+
72197+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
72198+{
72199+ struct gr_arg_compat argcompat;
72200+
72201+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
72202+ return -EFAULT;
72203+
72204+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
72205+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
72206+ arg->role_db.num_roles = argcompat.role_db.num_roles;
72207+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
72208+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
72209+ arg->role_db.num_objects = argcompat.role_db.num_objects;
72210+
72211+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
72212+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
72213+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
72214+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
72215+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
72216+ arg->segv_device = argcompat.segv_device;
72217+ arg->segv_inode = argcompat.segv_inode;
72218+ arg->segv_uid = argcompat.segv_uid;
72219+ arg->num_sprole_pws = argcompat.num_sprole_pws;
72220+ arg->mode = argcompat.mode;
72221+
72222+ return 0;
72223+}
72224+
72225+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
72226+{
72227+ struct acl_object_label_compat objcompat;
72228+
72229+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
72230+ return -EFAULT;
72231+
72232+ obj->filename = compat_ptr(objcompat.filename);
72233+ obj->inode = objcompat.inode;
72234+ obj->device = objcompat.device;
72235+ obj->mode = objcompat.mode;
72236+
72237+ obj->nested = compat_ptr(objcompat.nested);
72238+ obj->globbed = compat_ptr(objcompat.globbed);
72239+
72240+ obj->prev = compat_ptr(objcompat.prev);
72241+ obj->next = compat_ptr(objcompat.next);
72242+
72243+ return 0;
72244+}
72245+
72246+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
72247+{
72248+ unsigned int i;
72249+ struct acl_subject_label_compat subjcompat;
72250+
72251+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
72252+ return -EFAULT;
72253+
72254+ subj->filename = compat_ptr(subjcompat.filename);
72255+ subj->inode = subjcompat.inode;
72256+ subj->device = subjcompat.device;
72257+ subj->mode = subjcompat.mode;
72258+ subj->cap_mask = subjcompat.cap_mask;
72259+ subj->cap_lower = subjcompat.cap_lower;
72260+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
72261+
72262+ for (i = 0; i < GR_NLIMITS; i++) {
72263+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
72264+ subj->res[i].rlim_cur = RLIM_INFINITY;
72265+ else
72266+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
72267+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
72268+ subj->res[i].rlim_max = RLIM_INFINITY;
72269+ else
72270+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
72271+ }
72272+ subj->resmask = subjcompat.resmask;
72273+
72274+ subj->user_trans_type = subjcompat.user_trans_type;
72275+ subj->group_trans_type = subjcompat.group_trans_type;
72276+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
72277+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
72278+ subj->user_trans_num = subjcompat.user_trans_num;
72279+ subj->group_trans_num = subjcompat.group_trans_num;
72280+
72281+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
72282+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
72283+ subj->ip_type = subjcompat.ip_type;
72284+ subj->ips = compat_ptr(subjcompat.ips);
72285+ subj->ip_num = subjcompat.ip_num;
72286+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
72287+
72288+ subj->crashes = subjcompat.crashes;
72289+ subj->expires = subjcompat.expires;
72290+
72291+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
72292+ subj->hash = compat_ptr(subjcompat.hash);
72293+ subj->prev = compat_ptr(subjcompat.prev);
72294+ subj->next = compat_ptr(subjcompat.next);
72295+
72296+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
72297+ subj->obj_hash_size = subjcompat.obj_hash_size;
72298+ subj->pax_flags = subjcompat.pax_flags;
72299+
72300+ return 0;
72301+}
72302+
72303+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
72304+{
72305+ struct acl_role_label_compat rolecompat;
72306+
72307+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
72308+ return -EFAULT;
72309+
72310+ role->rolename = compat_ptr(rolecompat.rolename);
72311+ role->uidgid = rolecompat.uidgid;
72312+ role->roletype = rolecompat.roletype;
72313+
72314+ role->auth_attempts = rolecompat.auth_attempts;
72315+ role->expires = rolecompat.expires;
72316+
72317+ role->root_label = compat_ptr(rolecompat.root_label);
72318+ role->hash = compat_ptr(rolecompat.hash);
72319+
72320+ role->prev = compat_ptr(rolecompat.prev);
72321+ role->next = compat_ptr(rolecompat.next);
72322+
72323+ role->transitions = compat_ptr(rolecompat.transitions);
72324+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
72325+ role->domain_children = compat_ptr(rolecompat.domain_children);
72326+ role->domain_child_num = rolecompat.domain_child_num;
72327+
72328+ role->umask = rolecompat.umask;
72329+
72330+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
72331+ role->subj_hash_size = rolecompat.subj_hash_size;
72332+
72333+ return 0;
72334+}
72335+
72336+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
72337+{
72338+ struct role_allowed_ip_compat roleip_compat;
72339+
72340+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
72341+ return -EFAULT;
72342+
72343+ roleip->addr = roleip_compat.addr;
72344+ roleip->netmask = roleip_compat.netmask;
72345+
72346+ roleip->prev = compat_ptr(roleip_compat.prev);
72347+ roleip->next = compat_ptr(roleip_compat.next);
72348+
72349+ return 0;
72350+}
72351+
72352+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
72353+{
72354+ struct role_transition_compat trans_compat;
72355+
72356+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
72357+ return -EFAULT;
72358+
72359+ trans->rolename = compat_ptr(trans_compat.rolename);
72360+
72361+ trans->prev = compat_ptr(trans_compat.prev);
72362+ trans->next = compat_ptr(trans_compat.next);
72363+
72364+ return 0;
72365+
72366+}
72367+
72368+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
72369+{
72370+ struct gr_hash_struct_compat hash_compat;
72371+
72372+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
72373+ return -EFAULT;
72374+
72375+ hash->table = compat_ptr(hash_compat.table);
72376+ hash->nametable = compat_ptr(hash_compat.nametable);
72377+ hash->first = compat_ptr(hash_compat.first);
72378+
72379+ hash->table_size = hash_compat.table_size;
72380+ hash->used_size = hash_compat.used_size;
72381+
72382+ hash->type = hash_compat.type;
72383+
72384+ return 0;
72385+}
72386+
72387+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
72388+{
72389+ compat_uptr_t ptrcompat;
72390+
72391+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
72392+ return -EFAULT;
72393+
72394+ *(void **)ptr = compat_ptr(ptrcompat);
72395+
72396+ return 0;
72397+}
72398+
72399+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
72400+{
72401+ struct acl_ip_label_compat ip_compat;
72402+
72403+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
72404+ return -EFAULT;
72405+
72406+ ip->iface = compat_ptr(ip_compat.iface);
72407+ ip->addr = ip_compat.addr;
72408+ ip->netmask = ip_compat.netmask;
72409+ ip->low = ip_compat.low;
72410+ ip->high = ip_compat.high;
72411+ ip->mode = ip_compat.mode;
72412+ ip->type = ip_compat.type;
72413+
72414+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
72415+
72416+ ip->prev = compat_ptr(ip_compat.prev);
72417+ ip->next = compat_ptr(ip_compat.next);
72418+
72419+ return 0;
72420+}
72421+
72422+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
72423+{
72424+ struct sprole_pw_compat pw_compat;
72425+
72426+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
72427+ return -EFAULT;
72428+
72429+ pw->rolename = compat_ptr(pw_compat.rolename);
72430+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
72431+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
72432+
72433+ return 0;
72434+}
72435+
72436+size_t get_gr_arg_wrapper_size_compat(void)
72437+{
72438+ return sizeof(struct gr_arg_wrapper_compat);
72439+}
72440+
72441diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
72442new file mode 100644
72443index 0000000..8ee8e4f
72444--- /dev/null
72445+++ b/grsecurity/gracl_fs.c
72446@@ -0,0 +1,447 @@
72447+#include <linux/kernel.h>
72448+#include <linux/sched.h>
72449+#include <linux/types.h>
72450+#include <linux/fs.h>
72451+#include <linux/file.h>
72452+#include <linux/stat.h>
72453+#include <linux/grsecurity.h>
72454+#include <linux/grinternal.h>
72455+#include <linux/gracl.h>
72456+
72457+umode_t
72458+gr_acl_umask(void)
72459+{
72460+ if (unlikely(!gr_acl_is_enabled()))
72461+ return 0;
72462+
72463+ return current->role->umask;
72464+}
72465+
72466+__u32
72467+gr_acl_handle_hidden_file(const struct dentry * dentry,
72468+ const struct vfsmount * mnt)
72469+{
72470+ __u32 mode;
72471+
72472+ if (unlikely(d_is_negative(dentry)))
72473+ return GR_FIND;
72474+
72475+ mode =
72476+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
72477+
72478+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
72479+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72480+ return mode;
72481+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
72482+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72483+ return 0;
72484+ } else if (unlikely(!(mode & GR_FIND)))
72485+ return 0;
72486+
72487+ return GR_FIND;
72488+}
72489+
72490+__u32
72491+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
72492+ int acc_mode)
72493+{
72494+ __u32 reqmode = GR_FIND;
72495+ __u32 mode;
72496+
72497+ if (unlikely(d_is_negative(dentry)))
72498+ return reqmode;
72499+
72500+ if (acc_mode & MAY_APPEND)
72501+ reqmode |= GR_APPEND;
72502+ else if (acc_mode & MAY_WRITE)
72503+ reqmode |= GR_WRITE;
72504+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
72505+ reqmode |= GR_READ;
72506+
72507+ mode =
72508+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72509+ mnt);
72510+
72511+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72512+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72513+ reqmode & GR_READ ? " reading" : "",
72514+ reqmode & GR_WRITE ? " writing" : reqmode &
72515+ GR_APPEND ? " appending" : "");
72516+ return reqmode;
72517+ } else
72518+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72519+ {
72520+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72521+ reqmode & GR_READ ? " reading" : "",
72522+ reqmode & GR_WRITE ? " writing" : reqmode &
72523+ GR_APPEND ? " appending" : "");
72524+ return 0;
72525+ } else if (unlikely((mode & reqmode) != reqmode))
72526+ return 0;
72527+
72528+ return reqmode;
72529+}
72530+
72531+__u32
72532+gr_acl_handle_creat(const struct dentry * dentry,
72533+ const struct dentry * p_dentry,
72534+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
72535+ const int imode)
72536+{
72537+ __u32 reqmode = GR_WRITE | GR_CREATE;
72538+ __u32 mode;
72539+
72540+ if (acc_mode & MAY_APPEND)
72541+ reqmode |= GR_APPEND;
72542+ // if a directory was required or the directory already exists, then
72543+ // don't count this open as a read
72544+ if ((acc_mode & MAY_READ) &&
72545+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
72546+ reqmode |= GR_READ;
72547+ if ((open_flags & O_CREAT) &&
72548+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72549+ reqmode |= GR_SETID;
72550+
72551+ mode =
72552+ gr_check_create(dentry, p_dentry, p_mnt,
72553+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72554+
72555+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72556+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72557+ reqmode & GR_READ ? " reading" : "",
72558+ reqmode & GR_WRITE ? " writing" : reqmode &
72559+ GR_APPEND ? " appending" : "");
72560+ return reqmode;
72561+ } else
72562+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72563+ {
72564+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72565+ reqmode & GR_READ ? " reading" : "",
72566+ reqmode & GR_WRITE ? " writing" : reqmode &
72567+ GR_APPEND ? " appending" : "");
72568+ return 0;
72569+ } else if (unlikely((mode & reqmode) != reqmode))
72570+ return 0;
72571+
72572+ return reqmode;
72573+}
72574+
72575+__u32
72576+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
72577+ const int fmode)
72578+{
72579+ __u32 mode, reqmode = GR_FIND;
72580+
72581+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
72582+ reqmode |= GR_EXEC;
72583+ if (fmode & S_IWOTH)
72584+ reqmode |= GR_WRITE;
72585+ if (fmode & S_IROTH)
72586+ reqmode |= GR_READ;
72587+
72588+ mode =
72589+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72590+ mnt);
72591+
72592+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72593+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72594+ reqmode & GR_READ ? " reading" : "",
72595+ reqmode & GR_WRITE ? " writing" : "",
72596+ reqmode & GR_EXEC ? " executing" : "");
72597+ return reqmode;
72598+ } else
72599+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72600+ {
72601+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72602+ reqmode & GR_READ ? " reading" : "",
72603+ reqmode & GR_WRITE ? " writing" : "",
72604+ reqmode & GR_EXEC ? " executing" : "");
72605+ return 0;
72606+ } else if (unlikely((mode & reqmode) != reqmode))
72607+ return 0;
72608+
72609+ return reqmode;
72610+}
72611+
72612+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
72613+{
72614+ __u32 mode;
72615+
72616+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
72617+
72618+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72619+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
72620+ return mode;
72621+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72622+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
72623+ return 0;
72624+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72625+ return 0;
72626+
72627+ return (reqmode);
72628+}
72629+
72630+__u32
72631+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
72632+{
72633+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
72634+}
72635+
72636+__u32
72637+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
72638+{
72639+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
72640+}
72641+
72642+__u32
72643+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
72644+{
72645+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
72646+}
72647+
72648+__u32
72649+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
72650+{
72651+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
72652+}
72653+
72654+__u32
72655+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
72656+ umode_t *modeptr)
72657+{
72658+ umode_t mode;
72659+
72660+ *modeptr &= ~gr_acl_umask();
72661+ mode = *modeptr;
72662+
72663+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
72664+ return 1;
72665+
72666+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
72667+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
72668+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
72669+ GR_CHMOD_ACL_MSG);
72670+ } else {
72671+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
72672+ }
72673+}
72674+
72675+__u32
72676+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
72677+{
72678+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
72679+}
72680+
72681+__u32
72682+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
72683+{
72684+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
72685+}
72686+
72687+__u32
72688+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
72689+{
72690+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
72691+}
72692+
72693+__u32
72694+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
72695+{
72696+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
72697+}
72698+
72699+__u32
72700+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
72701+{
72702+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
72703+ GR_UNIXCONNECT_ACL_MSG);
72704+}
72705+
72706+/* hardlinks require at minimum create and link permission,
72707+ any additional privilege required is based on the
72708+ privilege of the file being linked to
72709+*/
72710+__u32
72711+gr_acl_handle_link(const struct dentry * new_dentry,
72712+ const struct dentry * parent_dentry,
72713+ const struct vfsmount * parent_mnt,
72714+ const struct dentry * old_dentry,
72715+ const struct vfsmount * old_mnt, const struct filename *to)
72716+{
72717+ __u32 mode;
72718+ __u32 needmode = GR_CREATE | GR_LINK;
72719+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
72720+
72721+ mode =
72722+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
72723+ old_mnt);
72724+
72725+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
72726+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
72727+ return mode;
72728+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
72729+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
72730+ return 0;
72731+ } else if (unlikely((mode & needmode) != needmode))
72732+ return 0;
72733+
72734+ return 1;
72735+}
72736+
72737+__u32
72738+gr_acl_handle_symlink(const struct dentry * new_dentry,
72739+ const struct dentry * parent_dentry,
72740+ const struct vfsmount * parent_mnt, const struct filename *from)
72741+{
72742+ __u32 needmode = GR_WRITE | GR_CREATE;
72743+ __u32 mode;
72744+
72745+ mode =
72746+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
72747+ GR_CREATE | GR_AUDIT_CREATE |
72748+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
72749+
72750+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
72751+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
72752+ return mode;
72753+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
72754+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
72755+ return 0;
72756+ } else if (unlikely((mode & needmode) != needmode))
72757+ return 0;
72758+
72759+ return (GR_WRITE | GR_CREATE);
72760+}
72761+
72762+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
72763+{
72764+ __u32 mode;
72765+
72766+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72767+
72768+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72769+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
72770+ return mode;
72771+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72772+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
72773+ return 0;
72774+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72775+ return 0;
72776+
72777+ return (reqmode);
72778+}
72779+
72780+__u32
72781+gr_acl_handle_mknod(const struct dentry * new_dentry,
72782+ const struct dentry * parent_dentry,
72783+ const struct vfsmount * parent_mnt,
72784+ const int mode)
72785+{
72786+ __u32 reqmode = GR_WRITE | GR_CREATE;
72787+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72788+ reqmode |= GR_SETID;
72789+
72790+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
72791+ reqmode, GR_MKNOD_ACL_MSG);
72792+}
72793+
72794+__u32
72795+gr_acl_handle_mkdir(const struct dentry *new_dentry,
72796+ const struct dentry *parent_dentry,
72797+ const struct vfsmount *parent_mnt)
72798+{
72799+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
72800+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
72801+}
72802+
72803+#define RENAME_CHECK_SUCCESS(old, new) \
72804+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
72805+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
72806+
72807+int
72808+gr_acl_handle_rename(struct dentry *new_dentry,
72809+ struct dentry *parent_dentry,
72810+ const struct vfsmount *parent_mnt,
72811+ struct dentry *old_dentry,
72812+ struct inode *old_parent_inode,
72813+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
72814+{
72815+ __u32 comp1, comp2;
72816+ int error = 0;
72817+
72818+ if (unlikely(!gr_acl_is_enabled()))
72819+ return 0;
72820+
72821+ if (flags & RENAME_EXCHANGE) {
72822+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
72823+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72824+ GR_SUPPRESS, parent_mnt);
72825+ comp2 =
72826+ gr_search_file(old_dentry,
72827+ GR_READ | GR_WRITE | GR_AUDIT_READ |
72828+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
72829+ } else if (d_is_negative(new_dentry)) {
72830+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
72831+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
72832+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
72833+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
72834+ GR_DELETE | GR_AUDIT_DELETE |
72835+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72836+ GR_SUPPRESS, old_mnt);
72837+ } else {
72838+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
72839+ GR_CREATE | GR_DELETE |
72840+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
72841+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72842+ GR_SUPPRESS, parent_mnt);
72843+ comp2 =
72844+ gr_search_file(old_dentry,
72845+ GR_READ | GR_WRITE | GR_AUDIT_READ |
72846+ GR_DELETE | GR_AUDIT_DELETE |
72847+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
72848+ }
72849+
72850+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
72851+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
72852+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
72853+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
72854+ && !(comp2 & GR_SUPPRESS)) {
72855+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
72856+ error = -EACCES;
72857+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
72858+ error = -EACCES;
72859+
72860+ return error;
72861+}
72862+
72863+void
72864+gr_acl_handle_exit(void)
72865+{
72866+ u16 id;
72867+ char *rolename;
72868+
72869+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
72870+ !(current->role->roletype & GR_ROLE_PERSIST))) {
72871+ id = current->acl_role_id;
72872+ rolename = current->role->rolename;
72873+ gr_set_acls(1);
72874+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
72875+ }
72876+
72877+ gr_put_exec_file(current);
72878+ return;
72879+}
72880+
72881+int
72882+gr_acl_handle_procpidmem(const struct task_struct *task)
72883+{
72884+ if (unlikely(!gr_acl_is_enabled()))
72885+ return 0;
72886+
72887+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
72888+ !(current->acl->mode & GR_POVERRIDE) &&
72889+ !(current->role->roletype & GR_ROLE_GOD))
72890+ return -EACCES;
72891+
72892+ return 0;
72893+}
72894diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
72895new file mode 100644
72896index 0000000..f056b81
72897--- /dev/null
72898+++ b/grsecurity/gracl_ip.c
72899@@ -0,0 +1,386 @@
72900+#include <linux/kernel.h>
72901+#include <asm/uaccess.h>
72902+#include <asm/errno.h>
72903+#include <net/sock.h>
72904+#include <linux/file.h>
72905+#include <linux/fs.h>
72906+#include <linux/net.h>
72907+#include <linux/in.h>
72908+#include <linux/skbuff.h>
72909+#include <linux/ip.h>
72910+#include <linux/udp.h>
72911+#include <linux/types.h>
72912+#include <linux/sched.h>
72913+#include <linux/netdevice.h>
72914+#include <linux/inetdevice.h>
72915+#include <linux/gracl.h>
72916+#include <linux/grsecurity.h>
72917+#include <linux/grinternal.h>
72918+
72919+#define GR_BIND 0x01
72920+#define GR_CONNECT 0x02
72921+#define GR_INVERT 0x04
72922+#define GR_BINDOVERRIDE 0x08
72923+#define GR_CONNECTOVERRIDE 0x10
72924+#define GR_SOCK_FAMILY 0x20
72925+
72926+static const char * gr_protocols[IPPROTO_MAX] = {
72927+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
72928+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
72929+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
72930+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
72931+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
72932+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
72933+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
72934+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
72935+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
72936+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
72937+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
72938+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
72939+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
72940+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
72941+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
72942+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
72943+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
72944+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
72945+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
72946+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
72947+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
72948+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
72949+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
72950+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
72951+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
72952+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
72953+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
72954+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
72955+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
72956+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
72957+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
72958+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
72959+ };
72960+
72961+static const char * gr_socktypes[SOCK_MAX] = {
72962+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
72963+ "unknown:7", "unknown:8", "unknown:9", "packet"
72964+ };
72965+
72966+static const char * gr_sockfamilies[AF_MAX+1] = {
72967+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
72968+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
72969+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
72970+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
72971+ };
72972+
72973+const char *
72974+gr_proto_to_name(unsigned char proto)
72975+{
72976+ return gr_protocols[proto];
72977+}
72978+
72979+const char *
72980+gr_socktype_to_name(unsigned char type)
72981+{
72982+ return gr_socktypes[type];
72983+}
72984+
72985+const char *
72986+gr_sockfamily_to_name(unsigned char family)
72987+{
72988+ return gr_sockfamilies[family];
72989+}
72990+
72991+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
72992+
72993+int
72994+gr_search_socket(const int domain, const int type, const int protocol)
72995+{
72996+ struct acl_subject_label *curr;
72997+ const struct cred *cred = current_cred();
72998+
72999+ if (unlikely(!gr_acl_is_enabled()))
73000+ goto exit;
73001+
73002+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
73003+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
73004+ goto exit; // let the kernel handle it
73005+
73006+ curr = current->acl;
73007+
73008+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
73009+ /* the family is allowed, if this is PF_INET allow it only if
73010+ the extra sock type/protocol checks pass */
73011+ if (domain == PF_INET)
73012+ goto inet_check;
73013+ goto exit;
73014+ } else {
73015+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73016+ __u32 fakeip = 0;
73017+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73018+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73019+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73020+ gr_to_filename(current->exec_file->f_path.dentry,
73021+ current->exec_file->f_path.mnt) :
73022+ curr->filename, curr->filename,
73023+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
73024+ &current->signal->saved_ip);
73025+ goto exit;
73026+ }
73027+ goto exit_fail;
73028+ }
73029+
73030+inet_check:
73031+ /* the rest of this checking is for IPv4 only */
73032+ if (!curr->ips)
73033+ goto exit;
73034+
73035+ if ((curr->ip_type & (1U << type)) &&
73036+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
73037+ goto exit;
73038+
73039+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73040+ /* we don't place acls on raw sockets , and sometimes
73041+ dgram/ip sockets are opened for ioctl and not
73042+ bind/connect, so we'll fake a bind learn log */
73043+ if (type == SOCK_RAW || type == SOCK_PACKET) {
73044+ __u32 fakeip = 0;
73045+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73046+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73047+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73048+ gr_to_filename(current->exec_file->f_path.dentry,
73049+ current->exec_file->f_path.mnt) :
73050+ curr->filename, curr->filename,
73051+ &fakeip, 0, type,
73052+ protocol, GR_CONNECT, &current->signal->saved_ip);
73053+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
73054+ __u32 fakeip = 0;
73055+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73056+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73057+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73058+ gr_to_filename(current->exec_file->f_path.dentry,
73059+ current->exec_file->f_path.mnt) :
73060+ curr->filename, curr->filename,
73061+ &fakeip, 0, type,
73062+ protocol, GR_BIND, &current->signal->saved_ip);
73063+ }
73064+ /* we'll log when they use connect or bind */
73065+ goto exit;
73066+ }
73067+
73068+exit_fail:
73069+ if (domain == PF_INET)
73070+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
73071+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
73072+ else if (rcu_access_pointer(net_families[domain]) != NULL)
73073+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
73074+ gr_socktype_to_name(type), protocol);
73075+
73076+ return 0;
73077+exit:
73078+ return 1;
73079+}
73080+
73081+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
73082+{
73083+ if ((ip->mode & mode) &&
73084+ (ip_port >= ip->low) &&
73085+ (ip_port <= ip->high) &&
73086+ ((ntohl(ip_addr) & our_netmask) ==
73087+ (ntohl(our_addr) & our_netmask))
73088+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
73089+ && (ip->type & (1U << type))) {
73090+ if (ip->mode & GR_INVERT)
73091+ return 2; // specifically denied
73092+ else
73093+ return 1; // allowed
73094+ }
73095+
73096+ return 0; // not specifically allowed, may continue parsing
73097+}
73098+
73099+static int
73100+gr_search_connectbind(const int full_mode, struct sock *sk,
73101+ struct sockaddr_in *addr, const int type)
73102+{
73103+ char iface[IFNAMSIZ] = {0};
73104+ struct acl_subject_label *curr;
73105+ struct acl_ip_label *ip;
73106+ struct inet_sock *isk;
73107+ struct net_device *dev;
73108+ struct in_device *idev;
73109+ unsigned long i;
73110+ int ret;
73111+ int mode = full_mode & (GR_BIND | GR_CONNECT);
73112+ __u32 ip_addr = 0;
73113+ __u32 our_addr;
73114+ __u32 our_netmask;
73115+ char *p;
73116+ __u16 ip_port = 0;
73117+ const struct cred *cred = current_cred();
73118+
73119+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
73120+ return 0;
73121+
73122+ curr = current->acl;
73123+ isk = inet_sk(sk);
73124+
73125+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
73126+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
73127+ addr->sin_addr.s_addr = curr->inaddr_any_override;
73128+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
73129+ struct sockaddr_in saddr;
73130+ int err;
73131+
73132+ saddr.sin_family = AF_INET;
73133+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
73134+ saddr.sin_port = isk->inet_sport;
73135+
73136+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73137+ if (err)
73138+ return err;
73139+
73140+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73141+ if (err)
73142+ return err;
73143+ }
73144+
73145+ if (!curr->ips)
73146+ return 0;
73147+
73148+ ip_addr = addr->sin_addr.s_addr;
73149+ ip_port = ntohs(addr->sin_port);
73150+
73151+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73152+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73153+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73154+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73155+ gr_to_filename(current->exec_file->f_path.dentry,
73156+ current->exec_file->f_path.mnt) :
73157+ curr->filename, curr->filename,
73158+ &ip_addr, ip_port, type,
73159+ sk->sk_protocol, mode, &current->signal->saved_ip);
73160+ return 0;
73161+ }
73162+
73163+ for (i = 0; i < curr->ip_num; i++) {
73164+ ip = *(curr->ips + i);
73165+ if (ip->iface != NULL) {
73166+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
73167+ p = strchr(iface, ':');
73168+ if (p != NULL)
73169+ *p = '\0';
73170+ dev = dev_get_by_name(sock_net(sk), iface);
73171+ if (dev == NULL)
73172+ continue;
73173+ idev = in_dev_get(dev);
73174+ if (idev == NULL) {
73175+ dev_put(dev);
73176+ continue;
73177+ }
73178+ rcu_read_lock();
73179+ for_ifa(idev) {
73180+ if (!strcmp(ip->iface, ifa->ifa_label)) {
73181+ our_addr = ifa->ifa_address;
73182+ our_netmask = 0xffffffff;
73183+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73184+ if (ret == 1) {
73185+ rcu_read_unlock();
73186+ in_dev_put(idev);
73187+ dev_put(dev);
73188+ return 0;
73189+ } else if (ret == 2) {
73190+ rcu_read_unlock();
73191+ in_dev_put(idev);
73192+ dev_put(dev);
73193+ goto denied;
73194+ }
73195+ }
73196+ } endfor_ifa(idev);
73197+ rcu_read_unlock();
73198+ in_dev_put(idev);
73199+ dev_put(dev);
73200+ } else {
73201+ our_addr = ip->addr;
73202+ our_netmask = ip->netmask;
73203+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73204+ if (ret == 1)
73205+ return 0;
73206+ else if (ret == 2)
73207+ goto denied;
73208+ }
73209+ }
73210+
73211+denied:
73212+ if (mode == GR_BIND)
73213+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73214+ else if (mode == GR_CONNECT)
73215+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73216+
73217+ return -EACCES;
73218+}
73219+
73220+int
73221+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
73222+{
73223+ /* always allow disconnection of dgram sockets with connect */
73224+ if (addr->sin_family == AF_UNSPEC)
73225+ return 0;
73226+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
73227+}
73228+
73229+int
73230+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
73231+{
73232+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
73233+}
73234+
73235+int gr_search_listen(struct socket *sock)
73236+{
73237+ struct sock *sk = sock->sk;
73238+ struct sockaddr_in addr;
73239+
73240+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73241+ addr.sin_port = inet_sk(sk)->inet_sport;
73242+
73243+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73244+}
73245+
73246+int gr_search_accept(struct socket *sock)
73247+{
73248+ struct sock *sk = sock->sk;
73249+ struct sockaddr_in addr;
73250+
73251+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73252+ addr.sin_port = inet_sk(sk)->inet_sport;
73253+
73254+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73255+}
73256+
73257+int
73258+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
73259+{
73260+ if (addr)
73261+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
73262+ else {
73263+ struct sockaddr_in sin;
73264+ const struct inet_sock *inet = inet_sk(sk);
73265+
73266+ sin.sin_addr.s_addr = inet->inet_daddr;
73267+ sin.sin_port = inet->inet_dport;
73268+
73269+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73270+ }
73271+}
73272+
73273+int
73274+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
73275+{
73276+ struct sockaddr_in sin;
73277+
73278+ if (unlikely(skb->len < sizeof (struct udphdr)))
73279+ return 0; // skip this packet
73280+
73281+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
73282+ sin.sin_port = udp_hdr(skb)->source;
73283+
73284+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73285+}
73286diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
73287new file mode 100644
73288index 0000000..25f54ef
73289--- /dev/null
73290+++ b/grsecurity/gracl_learn.c
73291@@ -0,0 +1,207 @@
73292+#include <linux/kernel.h>
73293+#include <linux/mm.h>
73294+#include <linux/sched.h>
73295+#include <linux/poll.h>
73296+#include <linux/string.h>
73297+#include <linux/file.h>
73298+#include <linux/types.h>
73299+#include <linux/vmalloc.h>
73300+#include <linux/grinternal.h>
73301+
73302+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
73303+ size_t count, loff_t *ppos);
73304+extern int gr_acl_is_enabled(void);
73305+
73306+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
73307+static int gr_learn_attached;
73308+
73309+/* use a 512k buffer */
73310+#define LEARN_BUFFER_SIZE (512 * 1024)
73311+
73312+static DEFINE_SPINLOCK(gr_learn_lock);
73313+static DEFINE_MUTEX(gr_learn_user_mutex);
73314+
73315+/* we need to maintain two buffers, so that the kernel context of grlearn
73316+ uses a semaphore around the userspace copying, and the other kernel contexts
73317+ use a spinlock when copying into the buffer, since they cannot sleep
73318+*/
73319+static char *learn_buffer;
73320+static char *learn_buffer_user;
73321+static int learn_buffer_len;
73322+static int learn_buffer_user_len;
73323+
73324+static ssize_t
73325+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
73326+{
73327+ DECLARE_WAITQUEUE(wait, current);
73328+ ssize_t retval = 0;
73329+
73330+ add_wait_queue(&learn_wait, &wait);
73331+ set_current_state(TASK_INTERRUPTIBLE);
73332+ do {
73333+ mutex_lock(&gr_learn_user_mutex);
73334+ spin_lock(&gr_learn_lock);
73335+ if (learn_buffer_len)
73336+ break;
73337+ spin_unlock(&gr_learn_lock);
73338+ mutex_unlock(&gr_learn_user_mutex);
73339+ if (file->f_flags & O_NONBLOCK) {
73340+ retval = -EAGAIN;
73341+ goto out;
73342+ }
73343+ if (signal_pending(current)) {
73344+ retval = -ERESTARTSYS;
73345+ goto out;
73346+ }
73347+
73348+ schedule();
73349+ } while (1);
73350+
73351+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
73352+ learn_buffer_user_len = learn_buffer_len;
73353+ retval = learn_buffer_len;
73354+ learn_buffer_len = 0;
73355+
73356+ spin_unlock(&gr_learn_lock);
73357+
73358+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
73359+ retval = -EFAULT;
73360+
73361+ mutex_unlock(&gr_learn_user_mutex);
73362+out:
73363+ set_current_state(TASK_RUNNING);
73364+ remove_wait_queue(&learn_wait, &wait);
73365+ return retval;
73366+}
73367+
73368+static unsigned int
73369+poll_learn(struct file * file, poll_table * wait)
73370+{
73371+ poll_wait(file, &learn_wait, wait);
73372+
73373+ if (learn_buffer_len)
73374+ return (POLLIN | POLLRDNORM);
73375+
73376+ return 0;
73377+}
73378+
73379+void
73380+gr_clear_learn_entries(void)
73381+{
73382+ char *tmp;
73383+
73384+ mutex_lock(&gr_learn_user_mutex);
73385+ spin_lock(&gr_learn_lock);
73386+ tmp = learn_buffer;
73387+ learn_buffer = NULL;
73388+ spin_unlock(&gr_learn_lock);
73389+ if (tmp)
73390+ vfree(tmp);
73391+ if (learn_buffer_user != NULL) {
73392+ vfree(learn_buffer_user);
73393+ learn_buffer_user = NULL;
73394+ }
73395+ learn_buffer_len = 0;
73396+ mutex_unlock(&gr_learn_user_mutex);
73397+
73398+ return;
73399+}
73400+
73401+void
73402+gr_add_learn_entry(const char *fmt, ...)
73403+{
73404+ va_list args;
73405+ unsigned int len;
73406+
73407+ if (!gr_learn_attached)
73408+ return;
73409+
73410+ spin_lock(&gr_learn_lock);
73411+
73412+ /* leave a gap at the end so we know when it's "full" but don't have to
73413+ compute the exact length of the string we're trying to append
73414+ */
73415+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
73416+ spin_unlock(&gr_learn_lock);
73417+ wake_up_interruptible(&learn_wait);
73418+ return;
73419+ }
73420+ if (learn_buffer == NULL) {
73421+ spin_unlock(&gr_learn_lock);
73422+ return;
73423+ }
73424+
73425+ va_start(args, fmt);
73426+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
73427+ va_end(args);
73428+
73429+ learn_buffer_len += len + 1;
73430+
73431+ spin_unlock(&gr_learn_lock);
73432+ wake_up_interruptible(&learn_wait);
73433+
73434+ return;
73435+}
73436+
73437+static int
73438+open_learn(struct inode *inode, struct file *file)
73439+{
73440+ if (file->f_mode & FMODE_READ && gr_learn_attached)
73441+ return -EBUSY;
73442+ if (file->f_mode & FMODE_READ) {
73443+ int retval = 0;
73444+ mutex_lock(&gr_learn_user_mutex);
73445+ if (learn_buffer == NULL)
73446+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
73447+ if (learn_buffer_user == NULL)
73448+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
73449+ if (learn_buffer == NULL) {
73450+ retval = -ENOMEM;
73451+ goto out_error;
73452+ }
73453+ if (learn_buffer_user == NULL) {
73454+ retval = -ENOMEM;
73455+ goto out_error;
73456+ }
73457+ learn_buffer_len = 0;
73458+ learn_buffer_user_len = 0;
73459+ gr_learn_attached = 1;
73460+out_error:
73461+ mutex_unlock(&gr_learn_user_mutex);
73462+ return retval;
73463+ }
73464+ return 0;
73465+}
73466+
73467+static int
73468+close_learn(struct inode *inode, struct file *file)
73469+{
73470+ if (file->f_mode & FMODE_READ) {
73471+ char *tmp = NULL;
73472+ mutex_lock(&gr_learn_user_mutex);
73473+ spin_lock(&gr_learn_lock);
73474+ tmp = learn_buffer;
73475+ learn_buffer = NULL;
73476+ spin_unlock(&gr_learn_lock);
73477+ if (tmp)
73478+ vfree(tmp);
73479+ if (learn_buffer_user != NULL) {
73480+ vfree(learn_buffer_user);
73481+ learn_buffer_user = NULL;
73482+ }
73483+ learn_buffer_len = 0;
73484+ learn_buffer_user_len = 0;
73485+ gr_learn_attached = 0;
73486+ mutex_unlock(&gr_learn_user_mutex);
73487+ }
73488+
73489+ return 0;
73490+}
73491+
73492+const struct file_operations grsec_fops = {
73493+ .read = read_learn,
73494+ .write = write_grsec_handler,
73495+ .open = open_learn,
73496+ .release = close_learn,
73497+ .poll = poll_learn,
73498+};
73499diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
73500new file mode 100644
73501index 0000000..fd26052
73502--- /dev/null
73503+++ b/grsecurity/gracl_policy.c
73504@@ -0,0 +1,1781 @@
73505+#include <linux/kernel.h>
73506+#include <linux/module.h>
73507+#include <linux/sched.h>
73508+#include <linux/mm.h>
73509+#include <linux/file.h>
73510+#include <linux/fs.h>
73511+#include <linux/namei.h>
73512+#include <linux/mount.h>
73513+#include <linux/tty.h>
73514+#include <linux/proc_fs.h>
73515+#include <linux/lglock.h>
73516+#include <linux/slab.h>
73517+#include <linux/vmalloc.h>
73518+#include <linux/types.h>
73519+#include <linux/sysctl.h>
73520+#include <linux/netdevice.h>
73521+#include <linux/ptrace.h>
73522+#include <linux/gracl.h>
73523+#include <linux/gralloc.h>
73524+#include <linux/security.h>
73525+#include <linux/grinternal.h>
73526+#include <linux/pid_namespace.h>
73527+#include <linux/stop_machine.h>
73528+#include <linux/fdtable.h>
73529+#include <linux/percpu.h>
73530+#include <linux/lglock.h>
73531+#include <linux/hugetlb.h>
73532+#include <linux/posix-timers.h>
73533+#include "../fs/mount.h"
73534+
73535+#include <asm/uaccess.h>
73536+#include <asm/errno.h>
73537+#include <asm/mman.h>
73538+
73539+extern struct gr_policy_state *polstate;
73540+
73541+#define FOR_EACH_ROLE_START(role) \
73542+ role = polstate->role_list; \
73543+ while (role) {
73544+
73545+#define FOR_EACH_ROLE_END(role) \
73546+ role = role->prev; \
73547+ }
73548+
73549+struct path gr_real_root;
73550+
73551+extern struct gr_alloc_state *current_alloc_state;
73552+
73553+u16 acl_sp_role_value;
73554+
73555+static DEFINE_MUTEX(gr_dev_mutex);
73556+
73557+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
73558+extern void gr_clear_learn_entries(void);
73559+
73560+struct gr_arg *gr_usermode __read_only;
73561+unsigned char *gr_system_salt __read_only;
73562+unsigned char *gr_system_sum __read_only;
73563+
73564+static unsigned int gr_auth_attempts = 0;
73565+static unsigned long gr_auth_expires = 0UL;
73566+
73567+struct acl_object_label *fakefs_obj_rw;
73568+struct acl_object_label *fakefs_obj_rwx;
73569+
73570+extern int gr_init_uidset(void);
73571+extern void gr_free_uidset(void);
73572+extern void gr_remove_uid(uid_t uid);
73573+extern int gr_find_uid(uid_t uid);
73574+
73575+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
73576+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
73577+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
73578+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
73579+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
73580+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
73581+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
73582+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
73583+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
73584+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73585+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73586+extern void assign_special_role(const char *rolename);
73587+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
73588+extern int gr_rbac_disable(void *unused);
73589+extern void gr_enable_rbac_system(void);
73590+
73591+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
73592+{
73593+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
73594+ return -EFAULT;
73595+
73596+ return 0;
73597+}
73598+
73599+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73600+{
73601+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
73602+ return -EFAULT;
73603+
73604+ return 0;
73605+}
73606+
73607+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73608+{
73609+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
73610+ return -EFAULT;
73611+
73612+ return 0;
73613+}
73614+
73615+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
73616+{
73617+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
73618+ return -EFAULT;
73619+
73620+ return 0;
73621+}
73622+
73623+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73624+{
73625+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
73626+ return -EFAULT;
73627+
73628+ return 0;
73629+}
73630+
73631+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73632+{
73633+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
73634+ return -EFAULT;
73635+
73636+ return 0;
73637+}
73638+
73639+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73640+{
73641+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
73642+ return -EFAULT;
73643+
73644+ return 0;
73645+}
73646+
73647+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
73648+{
73649+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
73650+ return -EFAULT;
73651+
73652+ return 0;
73653+}
73654+
73655+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
73656+{
73657+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
73658+ return -EFAULT;
73659+
73660+ return 0;
73661+}
73662+
73663+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
73664+{
73665+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
73666+ return -EFAULT;
73667+
73668+ if ((uwrap->version != GRSECURITY_VERSION) ||
73669+ (uwrap->size != sizeof(struct gr_arg)))
73670+ return -EINVAL;
73671+
73672+ return 0;
73673+}
73674+
73675+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
73676+{
73677+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
73678+ return -EFAULT;
73679+
73680+ return 0;
73681+}
73682+
73683+static size_t get_gr_arg_wrapper_size_normal(void)
73684+{
73685+ return sizeof(struct gr_arg_wrapper);
73686+}
73687+
73688+#ifdef CONFIG_COMPAT
73689+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
73690+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
73691+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
73692+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
73693+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
73694+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
73695+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
73696+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
73697+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
73698+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
73699+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
73700+extern size_t get_gr_arg_wrapper_size_compat(void);
73701+
73702+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
73703+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
73704+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
73705+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
73706+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
73707+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
73708+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
73709+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
73710+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
73711+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
73712+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
73713+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
73714+
73715+#else
73716+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
73717+#define copy_gr_arg copy_gr_arg_normal
73718+#define copy_gr_hash_struct copy_gr_hash_struct_normal
73719+#define copy_acl_object_label copy_acl_object_label_normal
73720+#define copy_acl_subject_label copy_acl_subject_label_normal
73721+#define copy_acl_role_label copy_acl_role_label_normal
73722+#define copy_acl_ip_label copy_acl_ip_label_normal
73723+#define copy_pointer_from_array copy_pointer_from_array_normal
73724+#define copy_sprole_pw copy_sprole_pw_normal
73725+#define copy_role_transition copy_role_transition_normal
73726+#define copy_role_allowed_ip copy_role_allowed_ip_normal
73727+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
73728+#endif
73729+
73730+static struct acl_subject_label *
73731+lookup_subject_map(const struct acl_subject_label *userp)
73732+{
73733+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
73734+ struct subject_map *match;
73735+
73736+ match = polstate->subj_map_set.s_hash[index];
73737+
73738+ while (match && match->user != userp)
73739+ match = match->next;
73740+
73741+ if (match != NULL)
73742+ return match->kernel;
73743+ else
73744+ return NULL;
73745+}
73746+
73747+static void
73748+insert_subj_map_entry(struct subject_map *subjmap)
73749+{
73750+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
73751+ struct subject_map **curr;
73752+
73753+ subjmap->prev = NULL;
73754+
73755+ curr = &polstate->subj_map_set.s_hash[index];
73756+ if (*curr != NULL)
73757+ (*curr)->prev = subjmap;
73758+
73759+ subjmap->next = *curr;
73760+ *curr = subjmap;
73761+
73762+ return;
73763+}
73764+
73765+static void
73766+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
73767+{
73768+ unsigned int index =
73769+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
73770+ struct acl_role_label **curr;
73771+ struct acl_role_label *tmp, *tmp2;
73772+
73773+ curr = &polstate->acl_role_set.r_hash[index];
73774+
73775+ /* simple case, slot is empty, just set it to our role */
73776+ if (*curr == NULL) {
73777+ *curr = role;
73778+ } else {
73779+ /* example:
73780+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
73781+ 2 -> 3
73782+ */
73783+ /* first check to see if we can already be reached via this slot */
73784+ tmp = *curr;
73785+ while (tmp && tmp != role)
73786+ tmp = tmp->next;
73787+ if (tmp == role) {
73788+ /* we don't need to add ourselves to this slot's chain */
73789+ return;
73790+ }
73791+ /* we need to add ourselves to this chain, two cases */
73792+ if (role->next == NULL) {
73793+ /* simple case, append the current chain to our role */
73794+ role->next = *curr;
73795+ *curr = role;
73796+ } else {
73797+ /* 1 -> 2 -> 3 -> 4
73798+ 2 -> 3 -> 4
73799+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
73800+ */
73801+ /* trickier case: walk our role's chain until we find
73802+ the role for the start of the current slot's chain */
73803+ tmp = role;
73804+ tmp2 = *curr;
73805+ while (tmp->next && tmp->next != tmp2)
73806+ tmp = tmp->next;
73807+ if (tmp->next == tmp2) {
73808+ /* from example above, we found 3, so just
73809+ replace this slot's chain with ours */
73810+ *curr = role;
73811+ } else {
73812+ /* we didn't find a subset of our role's chain
73813+ in the current slot's chain, so append their
73814+ chain to ours, and set us as the first role in
73815+ the slot's chain
73816+
73817+ we could fold this case with the case above,
73818+ but making it explicit for clarity
73819+ */
73820+ tmp->next = tmp2;
73821+ *curr = role;
73822+ }
73823+ }
73824+ }
73825+
73826+ return;
73827+}
73828+
73829+static void
73830+insert_acl_role_label(struct acl_role_label *role)
73831+{
73832+ int i;
73833+
73834+ if (polstate->role_list == NULL) {
73835+ polstate->role_list = role;
73836+ role->prev = NULL;
73837+ } else {
73838+ role->prev = polstate->role_list;
73839+ polstate->role_list = role;
73840+ }
73841+
73842+ /* used for hash chains */
73843+ role->next = NULL;
73844+
73845+ if (role->roletype & GR_ROLE_DOMAIN) {
73846+ for (i = 0; i < role->domain_child_num; i++)
73847+ __insert_acl_role_label(role, role->domain_children[i]);
73848+ } else
73849+ __insert_acl_role_label(role, role->uidgid);
73850+}
73851+
73852+static int
73853+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
73854+{
73855+ struct name_entry **curr, *nentry;
73856+ struct inodev_entry *ientry;
73857+ unsigned int len = strlen(name);
73858+ unsigned int key = full_name_hash(name, len);
73859+ unsigned int index = key % polstate->name_set.n_size;
73860+
73861+ curr = &polstate->name_set.n_hash[index];
73862+
73863+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
73864+ curr = &((*curr)->next);
73865+
73866+ if (*curr != NULL)
73867+ return 1;
73868+
73869+ nentry = acl_alloc(sizeof (struct name_entry));
73870+ if (nentry == NULL)
73871+ return 0;
73872+ ientry = acl_alloc(sizeof (struct inodev_entry));
73873+ if (ientry == NULL)
73874+ return 0;
73875+ ientry->nentry = nentry;
73876+
73877+ nentry->key = key;
73878+ nentry->name = name;
73879+ nentry->inode = inode;
73880+ nentry->device = device;
73881+ nentry->len = len;
73882+ nentry->deleted = deleted;
73883+
73884+ nentry->prev = NULL;
73885+ curr = &polstate->name_set.n_hash[index];
73886+ if (*curr != NULL)
73887+ (*curr)->prev = nentry;
73888+ nentry->next = *curr;
73889+ *curr = nentry;
73890+
73891+ /* insert us into the table searchable by inode/dev */
73892+ __insert_inodev_entry(polstate, ientry);
73893+
73894+ return 1;
73895+}
73896+
73897+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
73898+
73899+static void *
73900+create_table(__u32 * len, int elementsize)
73901+{
73902+ unsigned int table_sizes[] = {
73903+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
73904+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
73905+ 4194301, 8388593, 16777213, 33554393, 67108859
73906+ };
73907+ void *newtable = NULL;
73908+ unsigned int pwr = 0;
73909+
73910+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
73911+ table_sizes[pwr] <= *len)
73912+ pwr++;
73913+
73914+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
73915+ return newtable;
73916+
73917+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
73918+ newtable =
73919+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
73920+ else
73921+ newtable = vmalloc(table_sizes[pwr] * elementsize);
73922+
73923+ *len = table_sizes[pwr];
73924+
73925+ return newtable;
73926+}
73927+
73928+static int
73929+init_variables(const struct gr_arg *arg, bool reload)
73930+{
73931+ struct task_struct *reaper = init_pid_ns.child_reaper;
73932+ unsigned int stacksize;
73933+
73934+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
73935+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
73936+ polstate->name_set.n_size = arg->role_db.num_objects;
73937+ polstate->inodev_set.i_size = arg->role_db.num_objects;
73938+
73939+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
73940+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
73941+ return 1;
73942+
73943+ if (!reload) {
73944+ if (!gr_init_uidset())
73945+ return 1;
73946+ }
73947+
73948+ /* set up the stack that holds allocation info */
73949+
73950+ stacksize = arg->role_db.num_pointers + 5;
73951+
73952+ if (!acl_alloc_stack_init(stacksize))
73953+ return 1;
73954+
73955+ if (!reload) {
73956+ /* grab reference for the real root dentry and vfsmount */
73957+ get_fs_root(reaper->fs, &gr_real_root);
73958+
73959+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
73960+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
73961+#endif
73962+
73963+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
73964+ if (fakefs_obj_rw == NULL)
73965+ return 1;
73966+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
73967+
73968+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
73969+ if (fakefs_obj_rwx == NULL)
73970+ return 1;
73971+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
73972+ }
73973+
73974+ polstate->subj_map_set.s_hash =
73975+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
73976+ polstate->acl_role_set.r_hash =
73977+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
73978+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
73979+ polstate->inodev_set.i_hash =
73980+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
73981+
73982+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
73983+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
73984+ return 1;
73985+
73986+ memset(polstate->subj_map_set.s_hash, 0,
73987+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
73988+ memset(polstate->acl_role_set.r_hash, 0,
73989+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
73990+ memset(polstate->name_set.n_hash, 0,
73991+ sizeof (struct name_entry *) * polstate->name_set.n_size);
73992+ memset(polstate->inodev_set.i_hash, 0,
73993+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
73994+
73995+ return 0;
73996+}
73997+
73998+/* free information not needed after startup
73999+ currently contains user->kernel pointer mappings for subjects
74000+*/
74001+
74002+static void
74003+free_init_variables(void)
74004+{
74005+ __u32 i;
74006+
74007+ if (polstate->subj_map_set.s_hash) {
74008+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
74009+ if (polstate->subj_map_set.s_hash[i]) {
74010+ kfree(polstate->subj_map_set.s_hash[i]);
74011+ polstate->subj_map_set.s_hash[i] = NULL;
74012+ }
74013+ }
74014+
74015+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
74016+ PAGE_SIZE)
74017+ kfree(polstate->subj_map_set.s_hash);
74018+ else
74019+ vfree(polstate->subj_map_set.s_hash);
74020+ }
74021+
74022+ return;
74023+}
74024+
74025+static void
74026+free_variables(bool reload)
74027+{
74028+ struct acl_subject_label *s;
74029+ struct acl_role_label *r;
74030+ struct task_struct *task, *task2;
74031+ unsigned int x;
74032+
74033+ if (!reload) {
74034+ gr_clear_learn_entries();
74035+
74036+ read_lock(&tasklist_lock);
74037+ do_each_thread(task2, task) {
74038+ task->acl_sp_role = 0;
74039+ task->acl_role_id = 0;
74040+ task->inherited = 0;
74041+ task->acl = NULL;
74042+ task->role = NULL;
74043+ } while_each_thread(task2, task);
74044+ read_unlock(&tasklist_lock);
74045+
74046+ kfree(fakefs_obj_rw);
74047+ fakefs_obj_rw = NULL;
74048+ kfree(fakefs_obj_rwx);
74049+ fakefs_obj_rwx = NULL;
74050+
74051+ /* release the reference to the real root dentry and vfsmount */
74052+ path_put(&gr_real_root);
74053+ memset(&gr_real_root, 0, sizeof(gr_real_root));
74054+ }
74055+
74056+ /* free all object hash tables */
74057+
74058+ FOR_EACH_ROLE_START(r)
74059+ if (r->subj_hash == NULL)
74060+ goto next_role;
74061+ FOR_EACH_SUBJECT_START(r, s, x)
74062+ if (s->obj_hash == NULL)
74063+ break;
74064+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74065+ kfree(s->obj_hash);
74066+ else
74067+ vfree(s->obj_hash);
74068+ FOR_EACH_SUBJECT_END(s, x)
74069+ FOR_EACH_NESTED_SUBJECT_START(r, s)
74070+ if (s->obj_hash == NULL)
74071+ break;
74072+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74073+ kfree(s->obj_hash);
74074+ else
74075+ vfree(s->obj_hash);
74076+ FOR_EACH_NESTED_SUBJECT_END(s)
74077+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
74078+ kfree(r->subj_hash);
74079+ else
74080+ vfree(r->subj_hash);
74081+ r->subj_hash = NULL;
74082+next_role:
74083+ FOR_EACH_ROLE_END(r)
74084+
74085+ acl_free_all();
74086+
74087+ if (polstate->acl_role_set.r_hash) {
74088+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
74089+ PAGE_SIZE)
74090+ kfree(polstate->acl_role_set.r_hash);
74091+ else
74092+ vfree(polstate->acl_role_set.r_hash);
74093+ }
74094+ if (polstate->name_set.n_hash) {
74095+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
74096+ PAGE_SIZE)
74097+ kfree(polstate->name_set.n_hash);
74098+ else
74099+ vfree(polstate->name_set.n_hash);
74100+ }
74101+
74102+ if (polstate->inodev_set.i_hash) {
74103+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
74104+ PAGE_SIZE)
74105+ kfree(polstate->inodev_set.i_hash);
74106+ else
74107+ vfree(polstate->inodev_set.i_hash);
74108+ }
74109+
74110+ if (!reload)
74111+ gr_free_uidset();
74112+
74113+ memset(&polstate->name_set, 0, sizeof (struct name_db));
74114+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
74115+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
74116+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
74117+
74118+ polstate->default_role = NULL;
74119+ polstate->kernel_role = NULL;
74120+ polstate->role_list = NULL;
74121+
74122+ return;
74123+}
74124+
74125+static struct acl_subject_label *
74126+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
74127+
74128+static int alloc_and_copy_string(char **name, unsigned int maxlen)
74129+{
74130+ unsigned int len = strnlen_user(*name, maxlen);
74131+ char *tmp;
74132+
74133+ if (!len || len >= maxlen)
74134+ return -EINVAL;
74135+
74136+ if ((tmp = (char *) acl_alloc(len)) == NULL)
74137+ return -ENOMEM;
74138+
74139+ if (copy_from_user(tmp, *name, len))
74140+ return -EFAULT;
74141+
74142+ tmp[len-1] = '\0';
74143+ *name = tmp;
74144+
74145+ return 0;
74146+}
74147+
74148+static int
74149+copy_user_glob(struct acl_object_label *obj)
74150+{
74151+ struct acl_object_label *g_tmp, **guser;
74152+ int error;
74153+
74154+ if (obj->globbed == NULL)
74155+ return 0;
74156+
74157+ guser = &obj->globbed;
74158+ while (*guser) {
74159+ g_tmp = (struct acl_object_label *)
74160+ acl_alloc(sizeof (struct acl_object_label));
74161+ if (g_tmp == NULL)
74162+ return -ENOMEM;
74163+
74164+ if (copy_acl_object_label(g_tmp, *guser))
74165+ return -EFAULT;
74166+
74167+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
74168+ if (error)
74169+ return error;
74170+
74171+ *guser = g_tmp;
74172+ guser = &(g_tmp->next);
74173+ }
74174+
74175+ return 0;
74176+}
74177+
74178+static int
74179+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
74180+ struct acl_role_label *role)
74181+{
74182+ struct acl_object_label *o_tmp;
74183+ int ret;
74184+
74185+ while (userp) {
74186+ if ((o_tmp = (struct acl_object_label *)
74187+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
74188+ return -ENOMEM;
74189+
74190+ if (copy_acl_object_label(o_tmp, userp))
74191+ return -EFAULT;
74192+
74193+ userp = o_tmp->prev;
74194+
74195+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
74196+ if (ret)
74197+ return ret;
74198+
74199+ insert_acl_obj_label(o_tmp, subj);
74200+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
74201+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
74202+ return -ENOMEM;
74203+
74204+ ret = copy_user_glob(o_tmp);
74205+ if (ret)
74206+ return ret;
74207+
74208+ if (o_tmp->nested) {
74209+ int already_copied;
74210+
74211+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
74212+ if (IS_ERR(o_tmp->nested))
74213+ return PTR_ERR(o_tmp->nested);
74214+
74215+ /* insert into nested subject list if we haven't copied this one yet
74216+ to prevent duplicate entries */
74217+ if (!already_copied) {
74218+ o_tmp->nested->next = role->hash->first;
74219+ role->hash->first = o_tmp->nested;
74220+ }
74221+ }
74222+ }
74223+
74224+ return 0;
74225+}
74226+
74227+static __u32
74228+count_user_subjs(struct acl_subject_label *userp)
74229+{
74230+ struct acl_subject_label s_tmp;
74231+ __u32 num = 0;
74232+
74233+ while (userp) {
74234+ if (copy_acl_subject_label(&s_tmp, userp))
74235+ break;
74236+
74237+ userp = s_tmp.prev;
74238+ }
74239+
74240+ return num;
74241+}
74242+
74243+static int
74244+copy_user_allowedips(struct acl_role_label *rolep)
74245+{
74246+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
74247+
74248+ ruserip = rolep->allowed_ips;
74249+
74250+ while (ruserip) {
74251+ rlast = rtmp;
74252+
74253+ if ((rtmp = (struct role_allowed_ip *)
74254+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
74255+ return -ENOMEM;
74256+
74257+ if (copy_role_allowed_ip(rtmp, ruserip))
74258+ return -EFAULT;
74259+
74260+ ruserip = rtmp->prev;
74261+
74262+ if (!rlast) {
74263+ rtmp->prev = NULL;
74264+ rolep->allowed_ips = rtmp;
74265+ } else {
74266+ rlast->next = rtmp;
74267+ rtmp->prev = rlast;
74268+ }
74269+
74270+ if (!ruserip)
74271+ rtmp->next = NULL;
74272+ }
74273+
74274+ return 0;
74275+}
74276+
74277+static int
74278+copy_user_transitions(struct acl_role_label *rolep)
74279+{
74280+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
74281+ int error;
74282+
74283+ rusertp = rolep->transitions;
74284+
74285+ while (rusertp) {
74286+ rlast = rtmp;
74287+
74288+ if ((rtmp = (struct role_transition *)
74289+ acl_alloc(sizeof (struct role_transition))) == NULL)
74290+ return -ENOMEM;
74291+
74292+ if (copy_role_transition(rtmp, rusertp))
74293+ return -EFAULT;
74294+
74295+ rusertp = rtmp->prev;
74296+
74297+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
74298+ if (error)
74299+ return error;
74300+
74301+ if (!rlast) {
74302+ rtmp->prev = NULL;
74303+ rolep->transitions = rtmp;
74304+ } else {
74305+ rlast->next = rtmp;
74306+ rtmp->prev = rlast;
74307+ }
74308+
74309+ if (!rusertp)
74310+ rtmp->next = NULL;
74311+ }
74312+
74313+ return 0;
74314+}
74315+
74316+static __u32 count_user_objs(const struct acl_object_label __user *userp)
74317+{
74318+ struct acl_object_label o_tmp;
74319+ __u32 num = 0;
74320+
74321+ while (userp) {
74322+ if (copy_acl_object_label(&o_tmp, userp))
74323+ break;
74324+
74325+ userp = o_tmp.prev;
74326+ num++;
74327+ }
74328+
74329+ return num;
74330+}
74331+
74332+static struct acl_subject_label *
74333+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
74334+{
74335+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
74336+ __u32 num_objs;
74337+ struct acl_ip_label **i_tmp, *i_utmp2;
74338+ struct gr_hash_struct ghash;
74339+ struct subject_map *subjmap;
74340+ unsigned int i_num;
74341+ int err;
74342+
74343+ if (already_copied != NULL)
74344+ *already_copied = 0;
74345+
74346+ s_tmp = lookup_subject_map(userp);
74347+
74348+ /* we've already copied this subject into the kernel, just return
74349+ the reference to it, and don't copy it over again
74350+ */
74351+ if (s_tmp) {
74352+ if (already_copied != NULL)
74353+ *already_copied = 1;
74354+ return(s_tmp);
74355+ }
74356+
74357+ if ((s_tmp = (struct acl_subject_label *)
74358+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
74359+ return ERR_PTR(-ENOMEM);
74360+
74361+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
74362+ if (subjmap == NULL)
74363+ return ERR_PTR(-ENOMEM);
74364+
74365+ subjmap->user = userp;
74366+ subjmap->kernel = s_tmp;
74367+ insert_subj_map_entry(subjmap);
74368+
74369+ if (copy_acl_subject_label(s_tmp, userp))
74370+ return ERR_PTR(-EFAULT);
74371+
74372+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
74373+ if (err)
74374+ return ERR_PTR(err);
74375+
74376+ if (!strcmp(s_tmp->filename, "/"))
74377+ role->root_label = s_tmp;
74378+
74379+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
74380+ return ERR_PTR(-EFAULT);
74381+
74382+ /* copy user and group transition tables */
74383+
74384+ if (s_tmp->user_trans_num) {
74385+ uid_t *uidlist;
74386+
74387+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
74388+ if (uidlist == NULL)
74389+ return ERR_PTR(-ENOMEM);
74390+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
74391+ return ERR_PTR(-EFAULT);
74392+
74393+ s_tmp->user_transitions = uidlist;
74394+ }
74395+
74396+ if (s_tmp->group_trans_num) {
74397+ gid_t *gidlist;
74398+
74399+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
74400+ if (gidlist == NULL)
74401+ return ERR_PTR(-ENOMEM);
74402+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
74403+ return ERR_PTR(-EFAULT);
74404+
74405+ s_tmp->group_transitions = gidlist;
74406+ }
74407+
74408+ /* set up object hash table */
74409+ num_objs = count_user_objs(ghash.first);
74410+
74411+ s_tmp->obj_hash_size = num_objs;
74412+ s_tmp->obj_hash =
74413+ (struct acl_object_label **)
74414+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
74415+
74416+ if (!s_tmp->obj_hash)
74417+ return ERR_PTR(-ENOMEM);
74418+
74419+ memset(s_tmp->obj_hash, 0,
74420+ s_tmp->obj_hash_size *
74421+ sizeof (struct acl_object_label *));
74422+
74423+ /* add in objects */
74424+ err = copy_user_objs(ghash.first, s_tmp, role);
74425+
74426+ if (err)
74427+ return ERR_PTR(err);
74428+
74429+ /* set pointer for parent subject */
74430+ if (s_tmp->parent_subject) {
74431+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
74432+
74433+ if (IS_ERR(s_tmp2))
74434+ return s_tmp2;
74435+
74436+ s_tmp->parent_subject = s_tmp2;
74437+ }
74438+
74439+ /* add in ip acls */
74440+
74441+ if (!s_tmp->ip_num) {
74442+ s_tmp->ips = NULL;
74443+ goto insert;
74444+ }
74445+
74446+ i_tmp =
74447+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
74448+ sizeof (struct acl_ip_label *));
74449+
74450+ if (!i_tmp)
74451+ return ERR_PTR(-ENOMEM);
74452+
74453+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
74454+ *(i_tmp + i_num) =
74455+ (struct acl_ip_label *)
74456+ acl_alloc(sizeof (struct acl_ip_label));
74457+ if (!*(i_tmp + i_num))
74458+ return ERR_PTR(-ENOMEM);
74459+
74460+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
74461+ return ERR_PTR(-EFAULT);
74462+
74463+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
74464+ return ERR_PTR(-EFAULT);
74465+
74466+ if ((*(i_tmp + i_num))->iface == NULL)
74467+ continue;
74468+
74469+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
74470+ if (err)
74471+ return ERR_PTR(err);
74472+ }
74473+
74474+ s_tmp->ips = i_tmp;
74475+
74476+insert:
74477+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
74478+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
74479+ return ERR_PTR(-ENOMEM);
74480+
74481+ return s_tmp;
74482+}
74483+
74484+static int
74485+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
74486+{
74487+ struct acl_subject_label s_pre;
74488+ struct acl_subject_label * ret;
74489+ int err;
74490+
74491+ while (userp) {
74492+ if (copy_acl_subject_label(&s_pre, userp))
74493+ return -EFAULT;
74494+
74495+ ret = do_copy_user_subj(userp, role, NULL);
74496+
74497+ err = PTR_ERR(ret);
74498+ if (IS_ERR(ret))
74499+ return err;
74500+
74501+ insert_acl_subj_label(ret, role);
74502+
74503+ userp = s_pre.prev;
74504+ }
74505+
74506+ return 0;
74507+}
74508+
74509+static int
74510+copy_user_acl(struct gr_arg *arg)
74511+{
74512+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
74513+ struct acl_subject_label *subj_list;
74514+ struct sprole_pw *sptmp;
74515+ struct gr_hash_struct *ghash;
74516+ uid_t *domainlist;
74517+ unsigned int r_num;
74518+ int err = 0;
74519+ __u16 i;
74520+ __u32 num_subjs;
74521+
74522+ /* we need a default and kernel role */
74523+ if (arg->role_db.num_roles < 2)
74524+ return -EINVAL;
74525+
74526+ /* copy special role authentication info from userspace */
74527+
74528+ polstate->num_sprole_pws = arg->num_sprole_pws;
74529+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
74530+
74531+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
74532+ return -ENOMEM;
74533+
74534+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74535+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
74536+ if (!sptmp)
74537+ return -ENOMEM;
74538+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
74539+ return -EFAULT;
74540+
74541+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
74542+ if (err)
74543+ return err;
74544+
74545+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74546+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
74547+#endif
74548+
74549+ polstate->acl_special_roles[i] = sptmp;
74550+ }
74551+
74552+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
74553+
74554+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
74555+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
74556+
74557+ if (!r_tmp)
74558+ return -ENOMEM;
74559+
74560+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
74561+ return -EFAULT;
74562+
74563+ if (copy_acl_role_label(r_tmp, r_utmp2))
74564+ return -EFAULT;
74565+
74566+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
74567+ if (err)
74568+ return err;
74569+
74570+ if (!strcmp(r_tmp->rolename, "default")
74571+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
74572+ polstate->default_role = r_tmp;
74573+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
74574+ polstate->kernel_role = r_tmp;
74575+ }
74576+
74577+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
74578+ return -ENOMEM;
74579+
74580+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
74581+ return -EFAULT;
74582+
74583+ r_tmp->hash = ghash;
74584+
74585+ num_subjs = count_user_subjs(r_tmp->hash->first);
74586+
74587+ r_tmp->subj_hash_size = num_subjs;
74588+ r_tmp->subj_hash =
74589+ (struct acl_subject_label **)
74590+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
74591+
74592+ if (!r_tmp->subj_hash)
74593+ return -ENOMEM;
74594+
74595+ err = copy_user_allowedips(r_tmp);
74596+ if (err)
74597+ return err;
74598+
74599+ /* copy domain info */
74600+ if (r_tmp->domain_children != NULL) {
74601+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
74602+ if (domainlist == NULL)
74603+ return -ENOMEM;
74604+
74605+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
74606+ return -EFAULT;
74607+
74608+ r_tmp->domain_children = domainlist;
74609+ }
74610+
74611+ err = copy_user_transitions(r_tmp);
74612+ if (err)
74613+ return err;
74614+
74615+ memset(r_tmp->subj_hash, 0,
74616+ r_tmp->subj_hash_size *
74617+ sizeof (struct acl_subject_label *));
74618+
74619+ /* acquire the list of subjects, then NULL out
74620+ the list prior to parsing the subjects for this role,
74621+ as during this parsing the list is replaced with a list
74622+ of *nested* subjects for the role
74623+ */
74624+ subj_list = r_tmp->hash->first;
74625+
74626+ /* set nested subject list to null */
74627+ r_tmp->hash->first = NULL;
74628+
74629+ err = copy_user_subjs(subj_list, r_tmp);
74630+
74631+ if (err)
74632+ return err;
74633+
74634+ insert_acl_role_label(r_tmp);
74635+ }
74636+
74637+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
74638+ return -EINVAL;
74639+
74640+ return err;
74641+}
74642+
74643+static int gracl_reload_apply_policies(void *reload)
74644+{
74645+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
74646+ struct task_struct *task, *task2;
74647+ struct acl_role_label *role, *rtmp;
74648+ struct acl_subject_label *subj;
74649+ const struct cred *cred;
74650+ int role_applied;
74651+ int ret = 0;
74652+
74653+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
74654+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
74655+
74656+ /* first make sure we'll be able to apply the new policy cleanly */
74657+ do_each_thread(task2, task) {
74658+ if (task->exec_file == NULL)
74659+ continue;
74660+ role_applied = 0;
74661+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74662+ /* preserve special roles */
74663+ FOR_EACH_ROLE_START(role)
74664+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74665+ rtmp = task->role;
74666+ task->role = role;
74667+ role_applied = 1;
74668+ break;
74669+ }
74670+ FOR_EACH_ROLE_END(role)
74671+ }
74672+ if (!role_applied) {
74673+ cred = __task_cred(task);
74674+ rtmp = task->role;
74675+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74676+ }
74677+ /* this handles non-nested inherited subjects, nested subjects will still
74678+ be dropped currently */
74679+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74680+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
74681+ /* change the role back so that we've made no modifications to the policy */
74682+ task->role = rtmp;
74683+
74684+ if (subj == NULL || task->tmpacl == NULL) {
74685+ ret = -EINVAL;
74686+ goto out;
74687+ }
74688+ } while_each_thread(task2, task);
74689+
74690+ /* now actually apply the policy */
74691+
74692+ do_each_thread(task2, task) {
74693+ if (task->exec_file) {
74694+ role_applied = 0;
74695+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74696+ /* preserve special roles */
74697+ FOR_EACH_ROLE_START(role)
74698+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74699+ task->role = role;
74700+ role_applied = 1;
74701+ break;
74702+ }
74703+ FOR_EACH_ROLE_END(role)
74704+ }
74705+ if (!role_applied) {
74706+ cred = __task_cred(task);
74707+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74708+ }
74709+ /* this handles non-nested inherited subjects, nested subjects will still
74710+ be dropped currently */
74711+ if (!reload_state->oldmode && task->inherited)
74712+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74713+ else {
74714+ /* looked up and tagged to the task previously */
74715+ subj = task->tmpacl;
74716+ }
74717+ /* subj will be non-null */
74718+ __gr_apply_subject_to_task(polstate, task, subj);
74719+ if (reload_state->oldmode) {
74720+ task->acl_role_id = 0;
74721+ task->acl_sp_role = 0;
74722+ task->inherited = 0;
74723+ }
74724+ } else {
74725+ // it's a kernel process
74726+ task->role = polstate->kernel_role;
74727+ task->acl = polstate->kernel_role->root_label;
74728+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
74729+ task->acl->mode &= ~GR_PROCFIND;
74730+#endif
74731+ }
74732+ } while_each_thread(task2, task);
74733+
74734+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
74735+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
74736+
74737+out:
74738+
74739+ return ret;
74740+}
74741+
74742+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
74743+{
74744+ struct gr_reload_state new_reload_state = { };
74745+ int err;
74746+
74747+ new_reload_state.oldpolicy_ptr = polstate;
74748+ new_reload_state.oldalloc_ptr = current_alloc_state;
74749+ new_reload_state.oldmode = oldmode;
74750+
74751+ current_alloc_state = &new_reload_state.newalloc;
74752+ polstate = &new_reload_state.newpolicy;
74753+
74754+ /* everything relevant is now saved off, copy in the new policy */
74755+ if (init_variables(args, true)) {
74756+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
74757+ err = -ENOMEM;
74758+ goto error;
74759+ }
74760+
74761+ err = copy_user_acl(args);
74762+ free_init_variables();
74763+ if (err)
74764+ goto error;
74765+ /* the new policy is copied in, with the old policy available via saved_state
74766+ first go through applying roles, making sure to preserve special roles
74767+ then apply new subjects, making sure to preserve inherited and nested subjects,
74768+ though currently only inherited subjects will be preserved
74769+ */
74770+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
74771+ if (err)
74772+ goto error;
74773+
74774+ /* we've now applied the new policy, so restore the old policy state to free it */
74775+ polstate = &new_reload_state.oldpolicy;
74776+ current_alloc_state = &new_reload_state.oldalloc;
74777+ free_variables(true);
74778+
74779+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
74780+ to running_polstate/current_alloc_state inside stop_machine
74781+ */
74782+ err = 0;
74783+ goto out;
74784+error:
74785+ /* on error of loading the new policy, we'll just keep the previous
74786+ policy set around
74787+ */
74788+ free_variables(true);
74789+
74790+ /* doesn't affect runtime, but maintains consistent state */
74791+out:
74792+ polstate = new_reload_state.oldpolicy_ptr;
74793+ current_alloc_state = new_reload_state.oldalloc_ptr;
74794+
74795+ return err;
74796+}
74797+
74798+static int
74799+gracl_init(struct gr_arg *args)
74800+{
74801+ int error = 0;
74802+
74803+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
74804+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
74805+
74806+ if (init_variables(args, false)) {
74807+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
74808+ error = -ENOMEM;
74809+ goto out;
74810+ }
74811+
74812+ error = copy_user_acl(args);
74813+ free_init_variables();
74814+ if (error)
74815+ goto out;
74816+
74817+ error = gr_set_acls(0);
74818+ if (error)
74819+ goto out;
74820+
74821+ gr_enable_rbac_system();
74822+
74823+ return 0;
74824+
74825+out:
74826+ free_variables(false);
74827+ return error;
74828+}
74829+
74830+static int
74831+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
74832+ unsigned char **sum)
74833+{
74834+ struct acl_role_label *r;
74835+ struct role_allowed_ip *ipp;
74836+ struct role_transition *trans;
74837+ unsigned int i;
74838+ int found = 0;
74839+ u32 curr_ip = current->signal->curr_ip;
74840+
74841+ current->signal->saved_ip = curr_ip;
74842+
74843+ /* check transition table */
74844+
74845+ for (trans = current->role->transitions; trans; trans = trans->next) {
74846+ if (!strcmp(rolename, trans->rolename)) {
74847+ found = 1;
74848+ break;
74849+ }
74850+ }
74851+
74852+ if (!found)
74853+ return 0;
74854+
74855+ /* handle special roles that do not require authentication
74856+ and check ip */
74857+
74858+ FOR_EACH_ROLE_START(r)
74859+ if (!strcmp(rolename, r->rolename) &&
74860+ (r->roletype & GR_ROLE_SPECIAL)) {
74861+ found = 0;
74862+ if (r->allowed_ips != NULL) {
74863+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
74864+ if ((ntohl(curr_ip) & ipp->netmask) ==
74865+ (ntohl(ipp->addr) & ipp->netmask))
74866+ found = 1;
74867+ }
74868+ } else
74869+ found = 2;
74870+ if (!found)
74871+ return 0;
74872+
74873+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
74874+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
74875+ *salt = NULL;
74876+ *sum = NULL;
74877+ return 1;
74878+ }
74879+ }
74880+ FOR_EACH_ROLE_END(r)
74881+
74882+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74883+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
74884+ *salt = polstate->acl_special_roles[i]->salt;
74885+ *sum = polstate->acl_special_roles[i]->sum;
74886+ return 1;
74887+ }
74888+ }
74889+
74890+ return 0;
74891+}
74892+
74893+int gr_check_secure_terminal(struct task_struct *task)
74894+{
74895+ struct task_struct *p, *p2, *p3;
74896+ struct files_struct *files;
74897+ struct fdtable *fdt;
74898+ struct file *our_file = NULL, *file;
74899+ int i;
74900+
74901+ if (task->signal->tty == NULL)
74902+ return 1;
74903+
74904+ files = get_files_struct(task);
74905+ if (files != NULL) {
74906+ rcu_read_lock();
74907+ fdt = files_fdtable(files);
74908+ for (i=0; i < fdt->max_fds; i++) {
74909+ file = fcheck_files(files, i);
74910+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
74911+ get_file(file);
74912+ our_file = file;
74913+ }
74914+ }
74915+ rcu_read_unlock();
74916+ put_files_struct(files);
74917+ }
74918+
74919+ if (our_file == NULL)
74920+ return 1;
74921+
74922+ read_lock(&tasklist_lock);
74923+ do_each_thread(p2, p) {
74924+ files = get_files_struct(p);
74925+ if (files == NULL ||
74926+ (p->signal && p->signal->tty == task->signal->tty)) {
74927+ if (files != NULL)
74928+ put_files_struct(files);
74929+ continue;
74930+ }
74931+ rcu_read_lock();
74932+ fdt = files_fdtable(files);
74933+ for (i=0; i < fdt->max_fds; i++) {
74934+ file = fcheck_files(files, i);
74935+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
74936+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
74937+ p3 = task;
74938+ while (task_pid_nr(p3) > 0) {
74939+ if (p3 == p)
74940+ break;
74941+ p3 = p3->real_parent;
74942+ }
74943+ if (p3 == p)
74944+ break;
74945+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
74946+ gr_handle_alertkill(p);
74947+ rcu_read_unlock();
74948+ put_files_struct(files);
74949+ read_unlock(&tasklist_lock);
74950+ fput(our_file);
74951+ return 0;
74952+ }
74953+ }
74954+ rcu_read_unlock();
74955+ put_files_struct(files);
74956+ } while_each_thread(p2, p);
74957+ read_unlock(&tasklist_lock);
74958+
74959+ fput(our_file);
74960+ return 1;
74961+}
74962+
74963+ssize_t
74964+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
74965+{
74966+ struct gr_arg_wrapper uwrap;
74967+ unsigned char *sprole_salt = NULL;
74968+ unsigned char *sprole_sum = NULL;
74969+ int error = 0;
74970+ int error2 = 0;
74971+ size_t req_count = 0;
74972+ unsigned char oldmode = 0;
74973+
74974+ mutex_lock(&gr_dev_mutex);
74975+
74976+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
74977+ error = -EPERM;
74978+ goto out;
74979+ }
74980+
74981+#ifdef CONFIG_COMPAT
74982+ pax_open_kernel();
74983+ if (is_compat_task()) {
74984+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
74985+ copy_gr_arg = &copy_gr_arg_compat;
74986+ copy_acl_object_label = &copy_acl_object_label_compat;
74987+ copy_acl_subject_label = &copy_acl_subject_label_compat;
74988+ copy_acl_role_label = &copy_acl_role_label_compat;
74989+ copy_acl_ip_label = &copy_acl_ip_label_compat;
74990+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
74991+ copy_role_transition = &copy_role_transition_compat;
74992+ copy_sprole_pw = &copy_sprole_pw_compat;
74993+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
74994+ copy_pointer_from_array = &copy_pointer_from_array_compat;
74995+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
74996+ } else {
74997+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
74998+ copy_gr_arg = &copy_gr_arg_normal;
74999+ copy_acl_object_label = &copy_acl_object_label_normal;
75000+ copy_acl_subject_label = &copy_acl_subject_label_normal;
75001+ copy_acl_role_label = &copy_acl_role_label_normal;
75002+ copy_acl_ip_label = &copy_acl_ip_label_normal;
75003+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
75004+ copy_role_transition = &copy_role_transition_normal;
75005+ copy_sprole_pw = &copy_sprole_pw_normal;
75006+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
75007+ copy_pointer_from_array = &copy_pointer_from_array_normal;
75008+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
75009+ }
75010+ pax_close_kernel();
75011+#endif
75012+
75013+ req_count = get_gr_arg_wrapper_size();
75014+
75015+ if (count != req_count) {
75016+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
75017+ error = -EINVAL;
75018+ goto out;
75019+ }
75020+
75021+
75022+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
75023+ gr_auth_expires = 0;
75024+ gr_auth_attempts = 0;
75025+ }
75026+
75027+ error = copy_gr_arg_wrapper(buf, &uwrap);
75028+ if (error)
75029+ goto out;
75030+
75031+ error = copy_gr_arg(uwrap.arg, gr_usermode);
75032+ if (error)
75033+ goto out;
75034+
75035+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75036+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75037+ time_after(gr_auth_expires, get_seconds())) {
75038+ error = -EBUSY;
75039+ goto out;
75040+ }
75041+
75042+ /* if non-root trying to do anything other than use a special role,
75043+ do not attempt authentication, do not count towards authentication
75044+ locking
75045+ */
75046+
75047+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
75048+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75049+ gr_is_global_nonroot(current_uid())) {
75050+ error = -EPERM;
75051+ goto out;
75052+ }
75053+
75054+ /* ensure pw and special role name are null terminated */
75055+
75056+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
75057+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
75058+
75059+ /* Okay.
75060+ * We have our enough of the argument structure..(we have yet
75061+ * to copy_from_user the tables themselves) . Copy the tables
75062+ * only if we need them, i.e. for loading operations. */
75063+
75064+ switch (gr_usermode->mode) {
75065+ case GR_STATUS:
75066+ if (gr_acl_is_enabled()) {
75067+ error = 1;
75068+ if (!gr_check_secure_terminal(current))
75069+ error = 3;
75070+ } else
75071+ error = 2;
75072+ goto out;
75073+ case GR_SHUTDOWN:
75074+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75075+ stop_machine(gr_rbac_disable, NULL, NULL);
75076+ free_variables(false);
75077+ memset(gr_usermode, 0, sizeof(struct gr_arg));
75078+ memset(gr_system_salt, 0, GR_SALT_LEN);
75079+ memset(gr_system_sum, 0, GR_SHA_LEN);
75080+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
75081+ } else if (gr_acl_is_enabled()) {
75082+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
75083+ error = -EPERM;
75084+ } else {
75085+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
75086+ error = -EAGAIN;
75087+ }
75088+ break;
75089+ case GR_ENABLE:
75090+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
75091+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
75092+ else {
75093+ if (gr_acl_is_enabled())
75094+ error = -EAGAIN;
75095+ else
75096+ error = error2;
75097+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
75098+ }
75099+ break;
75100+ case GR_OLDRELOAD:
75101+ oldmode = 1;
75102+ case GR_RELOAD:
75103+ if (!gr_acl_is_enabled()) {
75104+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
75105+ error = -EAGAIN;
75106+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75107+ error2 = gracl_reload(gr_usermode, oldmode);
75108+ if (!error2)
75109+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
75110+ else {
75111+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75112+ error = error2;
75113+ }
75114+ } else {
75115+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75116+ error = -EPERM;
75117+ }
75118+ break;
75119+ case GR_SEGVMOD:
75120+ if (unlikely(!gr_acl_is_enabled())) {
75121+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
75122+ error = -EAGAIN;
75123+ break;
75124+ }
75125+
75126+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75127+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
75128+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
75129+ struct acl_subject_label *segvacl;
75130+ segvacl =
75131+ lookup_acl_subj_label(gr_usermode->segv_inode,
75132+ gr_usermode->segv_device,
75133+ current->role);
75134+ if (segvacl) {
75135+ segvacl->crashes = 0;
75136+ segvacl->expires = 0;
75137+ }
75138+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
75139+ gr_remove_uid(gr_usermode->segv_uid);
75140+ }
75141+ } else {
75142+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
75143+ error = -EPERM;
75144+ }
75145+ break;
75146+ case GR_SPROLE:
75147+ case GR_SPROLEPAM:
75148+ if (unlikely(!gr_acl_is_enabled())) {
75149+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
75150+ error = -EAGAIN;
75151+ break;
75152+ }
75153+
75154+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
75155+ current->role->expires = 0;
75156+ current->role->auth_attempts = 0;
75157+ }
75158+
75159+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75160+ time_after(current->role->expires, get_seconds())) {
75161+ error = -EBUSY;
75162+ goto out;
75163+ }
75164+
75165+ if (lookup_special_role_auth
75166+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
75167+ && ((!sprole_salt && !sprole_sum)
75168+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
75169+ char *p = "";
75170+ assign_special_role(gr_usermode->sp_role);
75171+ read_lock(&tasklist_lock);
75172+ if (current->real_parent)
75173+ p = current->real_parent->role->rolename;
75174+ read_unlock(&tasklist_lock);
75175+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
75176+ p, acl_sp_role_value);
75177+ } else {
75178+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
75179+ error = -EPERM;
75180+ if(!(current->role->auth_attempts++))
75181+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75182+
75183+ goto out;
75184+ }
75185+ break;
75186+ case GR_UNSPROLE:
75187+ if (unlikely(!gr_acl_is_enabled())) {
75188+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
75189+ error = -EAGAIN;
75190+ break;
75191+ }
75192+
75193+ if (current->role->roletype & GR_ROLE_SPECIAL) {
75194+ char *p = "";
75195+ int i = 0;
75196+
75197+ read_lock(&tasklist_lock);
75198+ if (current->real_parent) {
75199+ p = current->real_parent->role->rolename;
75200+ i = current->real_parent->acl_role_id;
75201+ }
75202+ read_unlock(&tasklist_lock);
75203+
75204+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
75205+ gr_set_acls(1);
75206+ } else {
75207+ error = -EPERM;
75208+ goto out;
75209+ }
75210+ break;
75211+ default:
75212+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
75213+ error = -EINVAL;
75214+ break;
75215+ }
75216+
75217+ if (error != -EPERM)
75218+ goto out;
75219+
75220+ if(!(gr_auth_attempts++))
75221+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75222+
75223+ out:
75224+ mutex_unlock(&gr_dev_mutex);
75225+
75226+ if (!error)
75227+ error = req_count;
75228+
75229+ return error;
75230+}
75231+
75232+int
75233+gr_set_acls(const int type)
75234+{
75235+ struct task_struct *task, *task2;
75236+ struct acl_role_label *role = current->role;
75237+ struct acl_subject_label *subj;
75238+ __u16 acl_role_id = current->acl_role_id;
75239+ const struct cred *cred;
75240+ int ret;
75241+
75242+ rcu_read_lock();
75243+ read_lock(&tasklist_lock);
75244+ read_lock(&grsec_exec_file_lock);
75245+ do_each_thread(task2, task) {
75246+ /* check to see if we're called from the exit handler,
75247+ if so, only replace ACLs that have inherited the admin
75248+ ACL */
75249+
75250+ if (type && (task->role != role ||
75251+ task->acl_role_id != acl_role_id))
75252+ continue;
75253+
75254+ task->acl_role_id = 0;
75255+ task->acl_sp_role = 0;
75256+ task->inherited = 0;
75257+
75258+ if (task->exec_file) {
75259+ cred = __task_cred(task);
75260+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75261+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
75262+ if (subj == NULL) {
75263+ ret = -EINVAL;
75264+ read_unlock(&grsec_exec_file_lock);
75265+ read_unlock(&tasklist_lock);
75266+ rcu_read_unlock();
75267+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
75268+ return ret;
75269+ }
75270+ __gr_apply_subject_to_task(polstate, task, subj);
75271+ } else {
75272+ // it's a kernel process
75273+ task->role = polstate->kernel_role;
75274+ task->acl = polstate->kernel_role->root_label;
75275+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75276+ task->acl->mode &= ~GR_PROCFIND;
75277+#endif
75278+ }
75279+ } while_each_thread(task2, task);
75280+ read_unlock(&grsec_exec_file_lock);
75281+ read_unlock(&tasklist_lock);
75282+ rcu_read_unlock();
75283+
75284+ return 0;
75285+}
75286diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
75287new file mode 100644
75288index 0000000..39645c9
75289--- /dev/null
75290+++ b/grsecurity/gracl_res.c
75291@@ -0,0 +1,68 @@
75292+#include <linux/kernel.h>
75293+#include <linux/sched.h>
75294+#include <linux/gracl.h>
75295+#include <linux/grinternal.h>
75296+
75297+static const char *restab_log[] = {
75298+ [RLIMIT_CPU] = "RLIMIT_CPU",
75299+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
75300+ [RLIMIT_DATA] = "RLIMIT_DATA",
75301+ [RLIMIT_STACK] = "RLIMIT_STACK",
75302+ [RLIMIT_CORE] = "RLIMIT_CORE",
75303+ [RLIMIT_RSS] = "RLIMIT_RSS",
75304+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
75305+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
75306+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
75307+ [RLIMIT_AS] = "RLIMIT_AS",
75308+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
75309+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
75310+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
75311+ [RLIMIT_NICE] = "RLIMIT_NICE",
75312+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
75313+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
75314+ [GR_CRASH_RES] = "RLIMIT_CRASH"
75315+};
75316+
75317+void
75318+gr_log_resource(const struct task_struct *task,
75319+ const int res, const unsigned long wanted, const int gt)
75320+{
75321+ const struct cred *cred;
75322+ unsigned long rlim;
75323+
75324+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
75325+ return;
75326+
75327+ // not yet supported resource
75328+ if (unlikely(!restab_log[res]))
75329+ return;
75330+
75331+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
75332+ rlim = task_rlimit_max(task, res);
75333+ else
75334+ rlim = task_rlimit(task, res);
75335+
75336+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
75337+ return;
75338+
75339+ rcu_read_lock();
75340+ cred = __task_cred(task);
75341+
75342+ if (res == RLIMIT_NPROC &&
75343+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
75344+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
75345+ goto out_rcu_unlock;
75346+ else if (res == RLIMIT_MEMLOCK &&
75347+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
75348+ goto out_rcu_unlock;
75349+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
75350+ goto out_rcu_unlock;
75351+ rcu_read_unlock();
75352+
75353+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
75354+
75355+ return;
75356+out_rcu_unlock:
75357+ rcu_read_unlock();
75358+ return;
75359+}
75360diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
75361new file mode 100644
75362index 0000000..218b66b
75363--- /dev/null
75364+++ b/grsecurity/gracl_segv.c
75365@@ -0,0 +1,324 @@
75366+#include <linux/kernel.h>
75367+#include <linux/mm.h>
75368+#include <asm/uaccess.h>
75369+#include <asm/errno.h>
75370+#include <asm/mman.h>
75371+#include <net/sock.h>
75372+#include <linux/file.h>
75373+#include <linux/fs.h>
75374+#include <linux/net.h>
75375+#include <linux/in.h>
75376+#include <linux/slab.h>
75377+#include <linux/types.h>
75378+#include <linux/sched.h>
75379+#include <linux/timer.h>
75380+#include <linux/gracl.h>
75381+#include <linux/grsecurity.h>
75382+#include <linux/grinternal.h>
75383+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75384+#include <linux/magic.h>
75385+#include <linux/pagemap.h>
75386+#include "../fs/btrfs/async-thread.h"
75387+#include "../fs/btrfs/ctree.h"
75388+#include "../fs/btrfs/btrfs_inode.h"
75389+#endif
75390+
75391+static struct crash_uid *uid_set;
75392+static unsigned short uid_used;
75393+static DEFINE_SPINLOCK(gr_uid_lock);
75394+extern rwlock_t gr_inode_lock;
75395+extern struct acl_subject_label *
75396+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
75397+ struct acl_role_label *role);
75398+
75399+static inline dev_t __get_dev(const struct dentry *dentry)
75400+{
75401+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75402+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75403+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
75404+ else
75405+#endif
75406+ return dentry->d_sb->s_dev;
75407+}
75408+
75409+static inline u64 __get_ino(const struct dentry *dentry)
75410+{
75411+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75412+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75413+ return btrfs_ino(dentry->d_inode);
75414+ else
75415+#endif
75416+ return dentry->d_inode->i_ino;
75417+}
75418+
75419+int
75420+gr_init_uidset(void)
75421+{
75422+ uid_set =
75423+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
75424+ uid_used = 0;
75425+
75426+ return uid_set ? 1 : 0;
75427+}
75428+
75429+void
75430+gr_free_uidset(void)
75431+{
75432+ if (uid_set) {
75433+ struct crash_uid *tmpset;
75434+ spin_lock(&gr_uid_lock);
75435+ tmpset = uid_set;
75436+ uid_set = NULL;
75437+ uid_used = 0;
75438+ spin_unlock(&gr_uid_lock);
75439+ if (tmpset)
75440+ kfree(tmpset);
75441+ }
75442+
75443+ return;
75444+}
75445+
75446+int
75447+gr_find_uid(const uid_t uid)
75448+{
75449+ struct crash_uid *tmp = uid_set;
75450+ uid_t buid;
75451+ int low = 0, high = uid_used - 1, mid;
75452+
75453+ while (high >= low) {
75454+ mid = (low + high) >> 1;
75455+ buid = tmp[mid].uid;
75456+ if (buid == uid)
75457+ return mid;
75458+ if (buid > uid)
75459+ high = mid - 1;
75460+ if (buid < uid)
75461+ low = mid + 1;
75462+ }
75463+
75464+ return -1;
75465+}
75466+
75467+static __inline__ void
75468+gr_insertsort(void)
75469+{
75470+ unsigned short i, j;
75471+ struct crash_uid index;
75472+
75473+ for (i = 1; i < uid_used; i++) {
75474+ index = uid_set[i];
75475+ j = i;
75476+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
75477+ uid_set[j] = uid_set[j - 1];
75478+ j--;
75479+ }
75480+ uid_set[j] = index;
75481+ }
75482+
75483+ return;
75484+}
75485+
75486+static __inline__ void
75487+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
75488+{
75489+ int loc;
75490+ uid_t uid = GR_GLOBAL_UID(kuid);
75491+
75492+ if (uid_used == GR_UIDTABLE_MAX)
75493+ return;
75494+
75495+ loc = gr_find_uid(uid);
75496+
75497+ if (loc >= 0) {
75498+ uid_set[loc].expires = expires;
75499+ return;
75500+ }
75501+
75502+ uid_set[uid_used].uid = uid;
75503+ uid_set[uid_used].expires = expires;
75504+ uid_used++;
75505+
75506+ gr_insertsort();
75507+
75508+ return;
75509+}
75510+
75511+void
75512+gr_remove_uid(const unsigned short loc)
75513+{
75514+ unsigned short i;
75515+
75516+ for (i = loc + 1; i < uid_used; i++)
75517+ uid_set[i - 1] = uid_set[i];
75518+
75519+ uid_used--;
75520+
75521+ return;
75522+}
75523+
75524+int
75525+gr_check_crash_uid(const kuid_t kuid)
75526+{
75527+ int loc;
75528+ int ret = 0;
75529+ uid_t uid;
75530+
75531+ if (unlikely(!gr_acl_is_enabled()))
75532+ return 0;
75533+
75534+ uid = GR_GLOBAL_UID(kuid);
75535+
75536+ spin_lock(&gr_uid_lock);
75537+ loc = gr_find_uid(uid);
75538+
75539+ if (loc < 0)
75540+ goto out_unlock;
75541+
75542+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
75543+ gr_remove_uid(loc);
75544+ else
75545+ ret = 1;
75546+
75547+out_unlock:
75548+ spin_unlock(&gr_uid_lock);
75549+ return ret;
75550+}
75551+
75552+static __inline__ int
75553+proc_is_setxid(const struct cred *cred)
75554+{
75555+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
75556+ !uid_eq(cred->uid, cred->fsuid))
75557+ return 1;
75558+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
75559+ !gid_eq(cred->gid, cred->fsgid))
75560+ return 1;
75561+
75562+ return 0;
75563+}
75564+
75565+extern int gr_fake_force_sig(int sig, struct task_struct *t);
75566+
75567+void
75568+gr_handle_crash(struct task_struct *task, const int sig)
75569+{
75570+ struct acl_subject_label *curr;
75571+ struct task_struct *tsk, *tsk2;
75572+ const struct cred *cred;
75573+ const struct cred *cred2;
75574+
75575+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
75576+ return;
75577+
75578+ if (unlikely(!gr_acl_is_enabled()))
75579+ return;
75580+
75581+ curr = task->acl;
75582+
75583+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
75584+ return;
75585+
75586+ if (time_before_eq(curr->expires, get_seconds())) {
75587+ curr->expires = 0;
75588+ curr->crashes = 0;
75589+ }
75590+
75591+ curr->crashes++;
75592+
75593+ if (!curr->expires)
75594+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
75595+
75596+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75597+ time_after(curr->expires, get_seconds())) {
75598+ rcu_read_lock();
75599+ cred = __task_cred(task);
75600+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
75601+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75602+ spin_lock(&gr_uid_lock);
75603+ gr_insert_uid(cred->uid, curr->expires);
75604+ spin_unlock(&gr_uid_lock);
75605+ curr->expires = 0;
75606+ curr->crashes = 0;
75607+ read_lock(&tasklist_lock);
75608+ do_each_thread(tsk2, tsk) {
75609+ cred2 = __task_cred(tsk);
75610+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
75611+ gr_fake_force_sig(SIGKILL, tsk);
75612+ } while_each_thread(tsk2, tsk);
75613+ read_unlock(&tasklist_lock);
75614+ } else {
75615+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75616+ read_lock(&tasklist_lock);
75617+ read_lock(&grsec_exec_file_lock);
75618+ do_each_thread(tsk2, tsk) {
75619+ if (likely(tsk != task)) {
75620+ // if this thread has the same subject as the one that triggered
75621+ // RES_CRASH and it's the same binary, kill it
75622+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
75623+ gr_fake_force_sig(SIGKILL, tsk);
75624+ }
75625+ } while_each_thread(tsk2, tsk);
75626+ read_unlock(&grsec_exec_file_lock);
75627+ read_unlock(&tasklist_lock);
75628+ }
75629+ rcu_read_unlock();
75630+ }
75631+
75632+ return;
75633+}
75634+
75635+int
75636+gr_check_crash_exec(const struct file *filp)
75637+{
75638+ struct acl_subject_label *curr;
75639+ struct dentry *dentry;
75640+
75641+ if (unlikely(!gr_acl_is_enabled()))
75642+ return 0;
75643+
75644+ read_lock(&gr_inode_lock);
75645+ dentry = filp->f_path.dentry;
75646+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
75647+ current->role);
75648+ read_unlock(&gr_inode_lock);
75649+
75650+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
75651+ (!curr->crashes && !curr->expires))
75652+ return 0;
75653+
75654+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75655+ time_after(curr->expires, get_seconds()))
75656+ return 1;
75657+ else if (time_before_eq(curr->expires, get_seconds())) {
75658+ curr->crashes = 0;
75659+ curr->expires = 0;
75660+ }
75661+
75662+ return 0;
75663+}
75664+
75665+void
75666+gr_handle_alertkill(struct task_struct *task)
75667+{
75668+ struct acl_subject_label *curracl;
75669+ __u32 curr_ip;
75670+ struct task_struct *p, *p2;
75671+
75672+ if (unlikely(!gr_acl_is_enabled()))
75673+ return;
75674+
75675+ curracl = task->acl;
75676+ curr_ip = task->signal->curr_ip;
75677+
75678+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
75679+ read_lock(&tasklist_lock);
75680+ do_each_thread(p2, p) {
75681+ if (p->signal->curr_ip == curr_ip)
75682+ gr_fake_force_sig(SIGKILL, p);
75683+ } while_each_thread(p2, p);
75684+ read_unlock(&tasklist_lock);
75685+ } else if (curracl->mode & GR_KILLPROC)
75686+ gr_fake_force_sig(SIGKILL, task);
75687+
75688+ return;
75689+}
75690diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
75691new file mode 100644
75692index 0000000..6b0c9cc
75693--- /dev/null
75694+++ b/grsecurity/gracl_shm.c
75695@@ -0,0 +1,40 @@
75696+#include <linux/kernel.h>
75697+#include <linux/mm.h>
75698+#include <linux/sched.h>
75699+#include <linux/file.h>
75700+#include <linux/ipc.h>
75701+#include <linux/gracl.h>
75702+#include <linux/grsecurity.h>
75703+#include <linux/grinternal.h>
75704+
75705+int
75706+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
75707+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
75708+{
75709+ struct task_struct *task;
75710+
75711+ if (!gr_acl_is_enabled())
75712+ return 1;
75713+
75714+ rcu_read_lock();
75715+ read_lock(&tasklist_lock);
75716+
75717+ task = find_task_by_vpid(shm_cprid);
75718+
75719+ if (unlikely(!task))
75720+ task = find_task_by_vpid(shm_lapid);
75721+
75722+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
75723+ (task_pid_nr(task) == shm_lapid)) &&
75724+ (task->acl->mode & GR_PROTSHM) &&
75725+ (task->acl != current->acl))) {
75726+ read_unlock(&tasklist_lock);
75727+ rcu_read_unlock();
75728+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
75729+ return 0;
75730+ }
75731+ read_unlock(&tasklist_lock);
75732+ rcu_read_unlock();
75733+
75734+ return 1;
75735+}
75736diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
75737new file mode 100644
75738index 0000000..bc0be01
75739--- /dev/null
75740+++ b/grsecurity/grsec_chdir.c
75741@@ -0,0 +1,19 @@
75742+#include <linux/kernel.h>
75743+#include <linux/sched.h>
75744+#include <linux/fs.h>
75745+#include <linux/file.h>
75746+#include <linux/grsecurity.h>
75747+#include <linux/grinternal.h>
75748+
75749+void
75750+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
75751+{
75752+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
75753+ if ((grsec_enable_chdir && grsec_enable_group &&
75754+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
75755+ !grsec_enable_group)) {
75756+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
75757+ }
75758+#endif
75759+ return;
75760+}
75761diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
75762new file mode 100644
75763index 0000000..114ea4f
75764--- /dev/null
75765+++ b/grsecurity/grsec_chroot.c
75766@@ -0,0 +1,467 @@
75767+#include <linux/kernel.h>
75768+#include <linux/module.h>
75769+#include <linux/sched.h>
75770+#include <linux/file.h>
75771+#include <linux/fs.h>
75772+#include <linux/mount.h>
75773+#include <linux/types.h>
75774+#include "../fs/mount.h"
75775+#include <linux/grsecurity.h>
75776+#include <linux/grinternal.h>
75777+
75778+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75779+int gr_init_ran;
75780+#endif
75781+
75782+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
75783+{
75784+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75785+ struct dentry *tmpd = dentry;
75786+
75787+ read_seqlock_excl(&mount_lock);
75788+ write_seqlock(&rename_lock);
75789+
75790+ while (tmpd != mnt->mnt_root) {
75791+ atomic_inc(&tmpd->chroot_refcnt);
75792+ tmpd = tmpd->d_parent;
75793+ }
75794+ atomic_inc(&tmpd->chroot_refcnt);
75795+
75796+ write_sequnlock(&rename_lock);
75797+ read_sequnlock_excl(&mount_lock);
75798+#endif
75799+}
75800+
75801+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
75802+{
75803+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75804+ struct dentry *tmpd = dentry;
75805+
75806+ read_seqlock_excl(&mount_lock);
75807+ write_seqlock(&rename_lock);
75808+
75809+ while (tmpd != mnt->mnt_root) {
75810+ atomic_dec(&tmpd->chroot_refcnt);
75811+ tmpd = tmpd->d_parent;
75812+ }
75813+ atomic_dec(&tmpd->chroot_refcnt);
75814+
75815+ write_sequnlock(&rename_lock);
75816+ read_sequnlock_excl(&mount_lock);
75817+#endif
75818+}
75819+
75820+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75821+static struct dentry *get_closest_chroot(struct dentry *dentry)
75822+{
75823+ write_seqlock(&rename_lock);
75824+ do {
75825+ if (atomic_read(&dentry->chroot_refcnt)) {
75826+ write_sequnlock(&rename_lock);
75827+ return dentry;
75828+ }
75829+ dentry = dentry->d_parent;
75830+ } while (!IS_ROOT(dentry));
75831+ write_sequnlock(&rename_lock);
75832+ return NULL;
75833+}
75834+#endif
75835+
75836+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
75837+ struct dentry *newdentry, struct vfsmount *newmnt)
75838+{
75839+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75840+ struct dentry *chroot;
75841+
75842+ if (unlikely(!grsec_enable_chroot_rename))
75843+ return 0;
75844+
75845+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
75846+ return 0;
75847+
75848+ chroot = get_closest_chroot(olddentry);
75849+
75850+ if (chroot == NULL)
75851+ return 0;
75852+
75853+ if (is_subdir(newdentry, chroot))
75854+ return 0;
75855+
75856+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
75857+
75858+ return 1;
75859+#else
75860+ return 0;
75861+#endif
75862+}
75863+
75864+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
75865+{
75866+#ifdef CONFIG_GRKERNSEC
75867+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
75868+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
75869+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75870+ && gr_init_ran
75871+#endif
75872+ )
75873+ task->gr_is_chrooted = 1;
75874+ else {
75875+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75876+ if (task_pid_nr(task) == 1 && !gr_init_ran)
75877+ gr_init_ran = 1;
75878+#endif
75879+ task->gr_is_chrooted = 0;
75880+ }
75881+
75882+ task->gr_chroot_dentry = path->dentry;
75883+#endif
75884+ return;
75885+}
75886+
75887+void gr_clear_chroot_entries(struct task_struct *task)
75888+{
75889+#ifdef CONFIG_GRKERNSEC
75890+ task->gr_is_chrooted = 0;
75891+ task->gr_chroot_dentry = NULL;
75892+#endif
75893+ return;
75894+}
75895+
75896+int
75897+gr_handle_chroot_unix(const pid_t pid)
75898+{
75899+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
75900+ struct task_struct *p;
75901+
75902+ if (unlikely(!grsec_enable_chroot_unix))
75903+ return 1;
75904+
75905+ if (likely(!proc_is_chrooted(current)))
75906+ return 1;
75907+
75908+ rcu_read_lock();
75909+ read_lock(&tasklist_lock);
75910+ p = find_task_by_vpid_unrestricted(pid);
75911+ if (unlikely(p && !have_same_root(current, p))) {
75912+ read_unlock(&tasklist_lock);
75913+ rcu_read_unlock();
75914+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
75915+ return 0;
75916+ }
75917+ read_unlock(&tasklist_lock);
75918+ rcu_read_unlock();
75919+#endif
75920+ return 1;
75921+}
75922+
75923+int
75924+gr_handle_chroot_nice(void)
75925+{
75926+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
75927+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
75928+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
75929+ return -EPERM;
75930+ }
75931+#endif
75932+ return 0;
75933+}
75934+
75935+int
75936+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
75937+{
75938+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
75939+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
75940+ && proc_is_chrooted(current)) {
75941+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
75942+ return -EACCES;
75943+ }
75944+#endif
75945+ return 0;
75946+}
75947+
75948+int
75949+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
75950+{
75951+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75952+ struct task_struct *p;
75953+ int ret = 0;
75954+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
75955+ return ret;
75956+
75957+ read_lock(&tasklist_lock);
75958+ do_each_pid_task(pid, type, p) {
75959+ if (!have_same_root(current, p)) {
75960+ ret = 1;
75961+ goto out;
75962+ }
75963+ } while_each_pid_task(pid, type, p);
75964+out:
75965+ read_unlock(&tasklist_lock);
75966+ return ret;
75967+#endif
75968+ return 0;
75969+}
75970+
75971+int
75972+gr_pid_is_chrooted(struct task_struct *p)
75973+{
75974+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75975+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
75976+ return 0;
75977+
75978+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
75979+ !have_same_root(current, p)) {
75980+ return 1;
75981+ }
75982+#endif
75983+ return 0;
75984+}
75985+
75986+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
75987+
75988+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
75989+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
75990+{
75991+ struct path path, currentroot;
75992+ int ret = 0;
75993+
75994+ path.dentry = (struct dentry *)u_dentry;
75995+ path.mnt = (struct vfsmount *)u_mnt;
75996+ get_fs_root(current->fs, &currentroot);
75997+ if (path_is_under(&path, &currentroot))
75998+ ret = 1;
75999+ path_put(&currentroot);
76000+
76001+ return ret;
76002+}
76003+#endif
76004+
76005+int
76006+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
76007+{
76008+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76009+ if (!grsec_enable_chroot_fchdir)
76010+ return 1;
76011+
76012+ if (!proc_is_chrooted(current))
76013+ return 1;
76014+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
76015+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
76016+ return 0;
76017+ }
76018+#endif
76019+ return 1;
76020+}
76021+
76022+int
76023+gr_chroot_fhandle(void)
76024+{
76025+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76026+ if (!grsec_enable_chroot_fchdir)
76027+ return 1;
76028+
76029+ if (!proc_is_chrooted(current))
76030+ return 1;
76031+ else {
76032+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
76033+ return 0;
76034+ }
76035+#endif
76036+ return 1;
76037+}
76038+
76039+int
76040+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76041+ const u64 shm_createtime)
76042+{
76043+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76044+ struct task_struct *p;
76045+
76046+ if (unlikely(!grsec_enable_chroot_shmat))
76047+ return 1;
76048+
76049+ if (likely(!proc_is_chrooted(current)))
76050+ return 1;
76051+
76052+ rcu_read_lock();
76053+ read_lock(&tasklist_lock);
76054+
76055+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
76056+ if (time_before_eq64(p->start_time, shm_createtime)) {
76057+ if (have_same_root(current, p)) {
76058+ goto allow;
76059+ } else {
76060+ read_unlock(&tasklist_lock);
76061+ rcu_read_unlock();
76062+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76063+ return 0;
76064+ }
76065+ }
76066+ /* creator exited, pid reuse, fall through to next check */
76067+ }
76068+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
76069+ if (unlikely(!have_same_root(current, p))) {
76070+ read_unlock(&tasklist_lock);
76071+ rcu_read_unlock();
76072+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76073+ return 0;
76074+ }
76075+ }
76076+
76077+allow:
76078+ read_unlock(&tasklist_lock);
76079+ rcu_read_unlock();
76080+#endif
76081+ return 1;
76082+}
76083+
76084+void
76085+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
76086+{
76087+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
76088+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
76089+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
76090+#endif
76091+ return;
76092+}
76093+
76094+int
76095+gr_handle_chroot_mknod(const struct dentry *dentry,
76096+ const struct vfsmount *mnt, const int mode)
76097+{
76098+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
76099+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
76100+ proc_is_chrooted(current)) {
76101+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
76102+ return -EPERM;
76103+ }
76104+#endif
76105+ return 0;
76106+}
76107+
76108+int
76109+gr_handle_chroot_mount(const struct dentry *dentry,
76110+ const struct vfsmount *mnt, const char *dev_name)
76111+{
76112+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
76113+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
76114+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
76115+ return -EPERM;
76116+ }
76117+#endif
76118+ return 0;
76119+}
76120+
76121+int
76122+gr_handle_chroot_pivot(void)
76123+{
76124+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
76125+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
76126+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
76127+ return -EPERM;
76128+ }
76129+#endif
76130+ return 0;
76131+}
76132+
76133+int
76134+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
76135+{
76136+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
76137+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
76138+ !gr_is_outside_chroot(dentry, mnt)) {
76139+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
76140+ return -EPERM;
76141+ }
76142+#endif
76143+ return 0;
76144+}
76145+
76146+extern const char *captab_log[];
76147+extern int captab_log_entries;
76148+
76149+int
76150+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76151+{
76152+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76153+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76154+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76155+ if (cap_raised(chroot_caps, cap)) {
76156+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
76157+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
76158+ }
76159+ return 0;
76160+ }
76161+ }
76162+#endif
76163+ return 1;
76164+}
76165+
76166+int
76167+gr_chroot_is_capable(const int cap)
76168+{
76169+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76170+ return gr_task_chroot_is_capable(current, current_cred(), cap);
76171+#endif
76172+ return 1;
76173+}
76174+
76175+int
76176+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
76177+{
76178+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76179+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76180+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76181+ if (cap_raised(chroot_caps, cap)) {
76182+ return 0;
76183+ }
76184+ }
76185+#endif
76186+ return 1;
76187+}
76188+
76189+int
76190+gr_chroot_is_capable_nolog(const int cap)
76191+{
76192+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76193+ return gr_task_chroot_is_capable_nolog(current, cap);
76194+#endif
76195+ return 1;
76196+}
76197+
76198+int
76199+gr_handle_chroot_sysctl(const int op)
76200+{
76201+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
76202+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
76203+ proc_is_chrooted(current))
76204+ return -EACCES;
76205+#endif
76206+ return 0;
76207+}
76208+
76209+void
76210+gr_handle_chroot_chdir(const struct path *path)
76211+{
76212+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
76213+ if (grsec_enable_chroot_chdir)
76214+ set_fs_pwd(current->fs, path);
76215+#endif
76216+ return;
76217+}
76218+
76219+int
76220+gr_handle_chroot_chmod(const struct dentry *dentry,
76221+ const struct vfsmount *mnt, const int mode)
76222+{
76223+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
76224+ /* allow chmod +s on directories, but not files */
76225+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
76226+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
76227+ proc_is_chrooted(current)) {
76228+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
76229+ return -EPERM;
76230+ }
76231+#endif
76232+ return 0;
76233+}
76234diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
76235new file mode 100644
76236index 0000000..946f750
76237--- /dev/null
76238+++ b/grsecurity/grsec_disabled.c
76239@@ -0,0 +1,445 @@
76240+#include <linux/kernel.h>
76241+#include <linux/module.h>
76242+#include <linux/sched.h>
76243+#include <linux/file.h>
76244+#include <linux/fs.h>
76245+#include <linux/kdev_t.h>
76246+#include <linux/net.h>
76247+#include <linux/in.h>
76248+#include <linux/ip.h>
76249+#include <linux/skbuff.h>
76250+#include <linux/sysctl.h>
76251+
76252+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76253+void
76254+pax_set_initial_flags(struct linux_binprm *bprm)
76255+{
76256+ return;
76257+}
76258+#endif
76259+
76260+#ifdef CONFIG_SYSCTL
76261+__u32
76262+gr_handle_sysctl(const struct ctl_table * table, const int op)
76263+{
76264+ return 0;
76265+}
76266+#endif
76267+
76268+#ifdef CONFIG_TASKSTATS
76269+int gr_is_taskstats_denied(int pid)
76270+{
76271+ return 0;
76272+}
76273+#endif
76274+
76275+int
76276+gr_acl_is_enabled(void)
76277+{
76278+ return 0;
76279+}
76280+
76281+int
76282+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
76283+{
76284+ return 0;
76285+}
76286+
76287+void
76288+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
76289+{
76290+ return;
76291+}
76292+
76293+int
76294+gr_handle_rawio(const struct inode *inode)
76295+{
76296+ return 0;
76297+}
76298+
76299+void
76300+gr_acl_handle_psacct(struct task_struct *task, const long code)
76301+{
76302+ return;
76303+}
76304+
76305+int
76306+gr_handle_ptrace(struct task_struct *task, const long request)
76307+{
76308+ return 0;
76309+}
76310+
76311+int
76312+gr_handle_proc_ptrace(struct task_struct *task)
76313+{
76314+ return 0;
76315+}
76316+
76317+int
76318+gr_set_acls(const int type)
76319+{
76320+ return 0;
76321+}
76322+
76323+int
76324+gr_check_hidden_task(const struct task_struct *tsk)
76325+{
76326+ return 0;
76327+}
76328+
76329+int
76330+gr_check_protected_task(const struct task_struct *task)
76331+{
76332+ return 0;
76333+}
76334+
76335+int
76336+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
76337+{
76338+ return 0;
76339+}
76340+
76341+void
76342+gr_copy_label(struct task_struct *tsk)
76343+{
76344+ return;
76345+}
76346+
76347+void
76348+gr_set_pax_flags(struct task_struct *task)
76349+{
76350+ return;
76351+}
76352+
76353+int
76354+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
76355+ const int unsafe_share)
76356+{
76357+ return 0;
76358+}
76359+
76360+void
76361+gr_handle_delete(const u64 ino, const dev_t dev)
76362+{
76363+ return;
76364+}
76365+
76366+void
76367+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
76368+{
76369+ return;
76370+}
76371+
76372+void
76373+gr_handle_crash(struct task_struct *task, const int sig)
76374+{
76375+ return;
76376+}
76377+
76378+int
76379+gr_check_crash_exec(const struct file *filp)
76380+{
76381+ return 0;
76382+}
76383+
76384+int
76385+gr_check_crash_uid(const kuid_t uid)
76386+{
76387+ return 0;
76388+}
76389+
76390+void
76391+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
76392+ struct dentry *old_dentry,
76393+ struct dentry *new_dentry,
76394+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
76395+{
76396+ return;
76397+}
76398+
76399+int
76400+gr_search_socket(const int family, const int type, const int protocol)
76401+{
76402+ return 1;
76403+}
76404+
76405+int
76406+gr_search_connectbind(const int mode, const struct socket *sock,
76407+ const struct sockaddr_in *addr)
76408+{
76409+ return 0;
76410+}
76411+
76412+void
76413+gr_handle_alertkill(struct task_struct *task)
76414+{
76415+ return;
76416+}
76417+
76418+__u32
76419+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
76420+{
76421+ return 1;
76422+}
76423+
76424+__u32
76425+gr_acl_handle_hidden_file(const struct dentry * dentry,
76426+ const struct vfsmount * mnt)
76427+{
76428+ return 1;
76429+}
76430+
76431+__u32
76432+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
76433+ int acc_mode)
76434+{
76435+ return 1;
76436+}
76437+
76438+__u32
76439+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
76440+{
76441+ return 1;
76442+}
76443+
76444+__u32
76445+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
76446+{
76447+ return 1;
76448+}
76449+
76450+int
76451+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
76452+ unsigned int *vm_flags)
76453+{
76454+ return 1;
76455+}
76456+
76457+__u32
76458+gr_acl_handle_truncate(const struct dentry * dentry,
76459+ const struct vfsmount * mnt)
76460+{
76461+ return 1;
76462+}
76463+
76464+__u32
76465+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
76466+{
76467+ return 1;
76468+}
76469+
76470+__u32
76471+gr_acl_handle_access(const struct dentry * dentry,
76472+ const struct vfsmount * mnt, const int fmode)
76473+{
76474+ return 1;
76475+}
76476+
76477+__u32
76478+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
76479+ umode_t *mode)
76480+{
76481+ return 1;
76482+}
76483+
76484+__u32
76485+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
76486+{
76487+ return 1;
76488+}
76489+
76490+__u32
76491+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
76492+{
76493+ return 1;
76494+}
76495+
76496+__u32
76497+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
76498+{
76499+ return 1;
76500+}
76501+
76502+void
76503+grsecurity_init(void)
76504+{
76505+ return;
76506+}
76507+
76508+umode_t gr_acl_umask(void)
76509+{
76510+ return 0;
76511+}
76512+
76513+__u32
76514+gr_acl_handle_mknod(const struct dentry * new_dentry,
76515+ const struct dentry * parent_dentry,
76516+ const struct vfsmount * parent_mnt,
76517+ const int mode)
76518+{
76519+ return 1;
76520+}
76521+
76522+__u32
76523+gr_acl_handle_mkdir(const struct dentry * new_dentry,
76524+ const struct dentry * parent_dentry,
76525+ const struct vfsmount * parent_mnt)
76526+{
76527+ return 1;
76528+}
76529+
76530+__u32
76531+gr_acl_handle_symlink(const struct dentry * new_dentry,
76532+ const struct dentry * parent_dentry,
76533+ const struct vfsmount * parent_mnt, const struct filename *from)
76534+{
76535+ return 1;
76536+}
76537+
76538+__u32
76539+gr_acl_handle_link(const struct dentry * new_dentry,
76540+ const struct dentry * parent_dentry,
76541+ const struct vfsmount * parent_mnt,
76542+ const struct dentry * old_dentry,
76543+ const struct vfsmount * old_mnt, const struct filename *to)
76544+{
76545+ return 1;
76546+}
76547+
76548+int
76549+gr_acl_handle_rename(const struct dentry *new_dentry,
76550+ const struct dentry *parent_dentry,
76551+ const struct vfsmount *parent_mnt,
76552+ const struct dentry *old_dentry,
76553+ const struct inode *old_parent_inode,
76554+ const struct vfsmount *old_mnt, const struct filename *newname,
76555+ unsigned int flags)
76556+{
76557+ return 0;
76558+}
76559+
76560+int
76561+gr_acl_handle_filldir(const struct file *file, const char *name,
76562+ const int namelen, const u64 ino)
76563+{
76564+ return 1;
76565+}
76566+
76567+int
76568+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76569+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76570+{
76571+ return 1;
76572+}
76573+
76574+int
76575+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
76576+{
76577+ return 0;
76578+}
76579+
76580+int
76581+gr_search_accept(const struct socket *sock)
76582+{
76583+ return 0;
76584+}
76585+
76586+int
76587+gr_search_listen(const struct socket *sock)
76588+{
76589+ return 0;
76590+}
76591+
76592+int
76593+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
76594+{
76595+ return 0;
76596+}
76597+
76598+__u32
76599+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
76600+{
76601+ return 1;
76602+}
76603+
76604+__u32
76605+gr_acl_handle_creat(const struct dentry * dentry,
76606+ const struct dentry * p_dentry,
76607+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
76608+ const int imode)
76609+{
76610+ return 1;
76611+}
76612+
76613+void
76614+gr_acl_handle_exit(void)
76615+{
76616+ return;
76617+}
76618+
76619+int
76620+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
76621+{
76622+ return 1;
76623+}
76624+
76625+void
76626+gr_set_role_label(const kuid_t uid, const kgid_t gid)
76627+{
76628+ return;
76629+}
76630+
76631+int
76632+gr_acl_handle_procpidmem(const struct task_struct *task)
76633+{
76634+ return 0;
76635+}
76636+
76637+int
76638+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
76639+{
76640+ return 0;
76641+}
76642+
76643+int
76644+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
76645+{
76646+ return 0;
76647+}
76648+
76649+int
76650+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
76651+{
76652+ return 0;
76653+}
76654+
76655+int
76656+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
76657+{
76658+ return 0;
76659+}
76660+
76661+int gr_acl_enable_at_secure(void)
76662+{
76663+ return 0;
76664+}
76665+
76666+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
76667+{
76668+ return dentry->d_sb->s_dev;
76669+}
76670+
76671+u64 gr_get_ino_from_dentry(struct dentry *dentry)
76672+{
76673+ return dentry->d_inode->i_ino;
76674+}
76675+
76676+void gr_put_exec_file(struct task_struct *task)
76677+{
76678+ return;
76679+}
76680+
76681+#ifdef CONFIG_SECURITY
76682+EXPORT_SYMBOL_GPL(gr_check_user_change);
76683+EXPORT_SYMBOL_GPL(gr_check_group_change);
76684+#endif
76685diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
76686new file mode 100644
76687index 0000000..fb7531e
76688--- /dev/null
76689+++ b/grsecurity/grsec_exec.c
76690@@ -0,0 +1,189 @@
76691+#include <linux/kernel.h>
76692+#include <linux/sched.h>
76693+#include <linux/file.h>
76694+#include <linux/binfmts.h>
76695+#include <linux/fs.h>
76696+#include <linux/types.h>
76697+#include <linux/grdefs.h>
76698+#include <linux/grsecurity.h>
76699+#include <linux/grinternal.h>
76700+#include <linux/capability.h>
76701+#include <linux/module.h>
76702+#include <linux/compat.h>
76703+
76704+#include <asm/uaccess.h>
76705+
76706+#ifdef CONFIG_GRKERNSEC_EXECLOG
76707+static char gr_exec_arg_buf[132];
76708+static DEFINE_MUTEX(gr_exec_arg_mutex);
76709+#endif
76710+
76711+struct user_arg_ptr {
76712+#ifdef CONFIG_COMPAT
76713+ bool is_compat;
76714+#endif
76715+ union {
76716+ const char __user *const __user *native;
76717+#ifdef CONFIG_COMPAT
76718+ const compat_uptr_t __user *compat;
76719+#endif
76720+ } ptr;
76721+};
76722+
76723+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
76724+
76725+void
76726+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
76727+{
76728+#ifdef CONFIG_GRKERNSEC_EXECLOG
76729+ char *grarg = gr_exec_arg_buf;
76730+ unsigned int i, x, execlen = 0;
76731+ char c;
76732+
76733+ if (!((grsec_enable_execlog && grsec_enable_group &&
76734+ in_group_p(grsec_audit_gid))
76735+ || (grsec_enable_execlog && !grsec_enable_group)))
76736+ return;
76737+
76738+ mutex_lock(&gr_exec_arg_mutex);
76739+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
76740+
76741+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
76742+ const char __user *p;
76743+ unsigned int len;
76744+
76745+ p = get_user_arg_ptr(argv, i);
76746+ if (IS_ERR(p))
76747+ goto log;
76748+
76749+ len = strnlen_user(p, 128 - execlen);
76750+ if (len > 128 - execlen)
76751+ len = 128 - execlen;
76752+ else if (len > 0)
76753+ len--;
76754+ if (copy_from_user(grarg + execlen, p, len))
76755+ goto log;
76756+
76757+ /* rewrite unprintable characters */
76758+ for (x = 0; x < len; x++) {
76759+ c = *(grarg + execlen + x);
76760+ if (c < 32 || c > 126)
76761+ *(grarg + execlen + x) = ' ';
76762+ }
76763+
76764+ execlen += len;
76765+ *(grarg + execlen) = ' ';
76766+ *(grarg + execlen + 1) = '\0';
76767+ execlen++;
76768+ }
76769+
76770+ log:
76771+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
76772+ bprm->file->f_path.mnt, grarg);
76773+ mutex_unlock(&gr_exec_arg_mutex);
76774+#endif
76775+ return;
76776+}
76777+
76778+#ifdef CONFIG_GRKERNSEC
76779+extern int gr_acl_is_capable(const int cap);
76780+extern int gr_acl_is_capable_nolog(const int cap);
76781+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76782+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
76783+extern int gr_chroot_is_capable(const int cap);
76784+extern int gr_chroot_is_capable_nolog(const int cap);
76785+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76786+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
76787+#endif
76788+
76789+const char *captab_log[] = {
76790+ "CAP_CHOWN",
76791+ "CAP_DAC_OVERRIDE",
76792+ "CAP_DAC_READ_SEARCH",
76793+ "CAP_FOWNER",
76794+ "CAP_FSETID",
76795+ "CAP_KILL",
76796+ "CAP_SETGID",
76797+ "CAP_SETUID",
76798+ "CAP_SETPCAP",
76799+ "CAP_LINUX_IMMUTABLE",
76800+ "CAP_NET_BIND_SERVICE",
76801+ "CAP_NET_BROADCAST",
76802+ "CAP_NET_ADMIN",
76803+ "CAP_NET_RAW",
76804+ "CAP_IPC_LOCK",
76805+ "CAP_IPC_OWNER",
76806+ "CAP_SYS_MODULE",
76807+ "CAP_SYS_RAWIO",
76808+ "CAP_SYS_CHROOT",
76809+ "CAP_SYS_PTRACE",
76810+ "CAP_SYS_PACCT",
76811+ "CAP_SYS_ADMIN",
76812+ "CAP_SYS_BOOT",
76813+ "CAP_SYS_NICE",
76814+ "CAP_SYS_RESOURCE",
76815+ "CAP_SYS_TIME",
76816+ "CAP_SYS_TTY_CONFIG",
76817+ "CAP_MKNOD",
76818+ "CAP_LEASE",
76819+ "CAP_AUDIT_WRITE",
76820+ "CAP_AUDIT_CONTROL",
76821+ "CAP_SETFCAP",
76822+ "CAP_MAC_OVERRIDE",
76823+ "CAP_MAC_ADMIN",
76824+ "CAP_SYSLOG",
76825+ "CAP_WAKE_ALARM",
76826+ "CAP_BLOCK_SUSPEND",
76827+ "CAP_AUDIT_READ"
76828+};
76829+
76830+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
76831+
76832+int gr_is_capable(const int cap)
76833+{
76834+#ifdef CONFIG_GRKERNSEC
76835+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
76836+ return 1;
76837+ return 0;
76838+#else
76839+ return 1;
76840+#endif
76841+}
76842+
76843+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76844+{
76845+#ifdef CONFIG_GRKERNSEC
76846+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
76847+ return 1;
76848+ return 0;
76849+#else
76850+ return 1;
76851+#endif
76852+}
76853+
76854+int gr_is_capable_nolog(const int cap)
76855+{
76856+#ifdef CONFIG_GRKERNSEC
76857+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
76858+ return 1;
76859+ return 0;
76860+#else
76861+ return 1;
76862+#endif
76863+}
76864+
76865+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
76866+{
76867+#ifdef CONFIG_GRKERNSEC
76868+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
76869+ return 1;
76870+ return 0;
76871+#else
76872+ return 1;
76873+#endif
76874+}
76875+
76876+EXPORT_SYMBOL_GPL(gr_is_capable);
76877+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
76878+EXPORT_SYMBOL_GPL(gr_task_is_capable);
76879+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
76880diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
76881new file mode 100644
76882index 0000000..06cc6ea
76883--- /dev/null
76884+++ b/grsecurity/grsec_fifo.c
76885@@ -0,0 +1,24 @@
76886+#include <linux/kernel.h>
76887+#include <linux/sched.h>
76888+#include <linux/fs.h>
76889+#include <linux/file.h>
76890+#include <linux/grinternal.h>
76891+
76892+int
76893+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
76894+ const struct dentry *dir, const int flag, const int acc_mode)
76895+{
76896+#ifdef CONFIG_GRKERNSEC_FIFO
76897+ const struct cred *cred = current_cred();
76898+
76899+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
76900+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
76901+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
76902+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
76903+ if (!inode_permission(dentry->d_inode, acc_mode))
76904+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
76905+ return -EACCES;
76906+ }
76907+#endif
76908+ return 0;
76909+}
76910diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
76911new file mode 100644
76912index 0000000..8ca18bf
76913--- /dev/null
76914+++ b/grsecurity/grsec_fork.c
76915@@ -0,0 +1,23 @@
76916+#include <linux/kernel.h>
76917+#include <linux/sched.h>
76918+#include <linux/grsecurity.h>
76919+#include <linux/grinternal.h>
76920+#include <linux/errno.h>
76921+
76922+void
76923+gr_log_forkfail(const int retval)
76924+{
76925+#ifdef CONFIG_GRKERNSEC_FORKFAIL
76926+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
76927+ switch (retval) {
76928+ case -EAGAIN:
76929+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
76930+ break;
76931+ case -ENOMEM:
76932+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
76933+ break;
76934+ }
76935+ }
76936+#endif
76937+ return;
76938+}
76939diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
76940new file mode 100644
76941index 0000000..4ed9e7d
76942--- /dev/null
76943+++ b/grsecurity/grsec_init.c
76944@@ -0,0 +1,290 @@
76945+#include <linux/kernel.h>
76946+#include <linux/sched.h>
76947+#include <linux/mm.h>
76948+#include <linux/gracl.h>
76949+#include <linux/slab.h>
76950+#include <linux/vmalloc.h>
76951+#include <linux/percpu.h>
76952+#include <linux/module.h>
76953+
76954+int grsec_enable_ptrace_readexec;
76955+int grsec_enable_setxid;
76956+int grsec_enable_symlinkown;
76957+kgid_t grsec_symlinkown_gid;
76958+int grsec_enable_brute;
76959+int grsec_enable_link;
76960+int grsec_enable_dmesg;
76961+int grsec_enable_harden_ptrace;
76962+int grsec_enable_harden_ipc;
76963+int grsec_enable_fifo;
76964+int grsec_enable_execlog;
76965+int grsec_enable_signal;
76966+int grsec_enable_forkfail;
76967+int grsec_enable_audit_ptrace;
76968+int grsec_enable_time;
76969+int grsec_enable_group;
76970+kgid_t grsec_audit_gid;
76971+int grsec_enable_chdir;
76972+int grsec_enable_mount;
76973+int grsec_enable_rofs;
76974+int grsec_deny_new_usb;
76975+int grsec_enable_chroot_findtask;
76976+int grsec_enable_chroot_mount;
76977+int grsec_enable_chroot_shmat;
76978+int grsec_enable_chroot_fchdir;
76979+int grsec_enable_chroot_double;
76980+int grsec_enable_chroot_pivot;
76981+int grsec_enable_chroot_chdir;
76982+int grsec_enable_chroot_chmod;
76983+int grsec_enable_chroot_mknod;
76984+int grsec_enable_chroot_nice;
76985+int grsec_enable_chroot_execlog;
76986+int grsec_enable_chroot_caps;
76987+int grsec_enable_chroot_rename;
76988+int grsec_enable_chroot_sysctl;
76989+int grsec_enable_chroot_unix;
76990+int grsec_enable_tpe;
76991+kgid_t grsec_tpe_gid;
76992+int grsec_enable_blackhole;
76993+#ifdef CONFIG_IPV6_MODULE
76994+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
76995+#endif
76996+int grsec_lastack_retries;
76997+int grsec_enable_tpe_all;
76998+int grsec_enable_tpe_invert;
76999+int grsec_enable_socket_all;
77000+kgid_t grsec_socket_all_gid;
77001+int grsec_enable_socket_client;
77002+kgid_t grsec_socket_client_gid;
77003+int grsec_enable_socket_server;
77004+kgid_t grsec_socket_server_gid;
77005+int grsec_resource_logging;
77006+int grsec_disable_privio;
77007+int grsec_enable_log_rwxmaps;
77008+int grsec_lock;
77009+
77010+DEFINE_SPINLOCK(grsec_alert_lock);
77011+unsigned long grsec_alert_wtime = 0;
77012+unsigned long grsec_alert_fyet = 0;
77013+
77014+DEFINE_SPINLOCK(grsec_audit_lock);
77015+
77016+DEFINE_RWLOCK(grsec_exec_file_lock);
77017+
77018+char *gr_shared_page[4];
77019+
77020+char *gr_alert_log_fmt;
77021+char *gr_audit_log_fmt;
77022+char *gr_alert_log_buf;
77023+char *gr_audit_log_buf;
77024+
77025+extern struct gr_arg *gr_usermode;
77026+extern unsigned char *gr_system_salt;
77027+extern unsigned char *gr_system_sum;
77028+
77029+void __init
77030+grsecurity_init(void)
77031+{
77032+ int j;
77033+ /* create the per-cpu shared pages */
77034+
77035+#ifdef CONFIG_X86
77036+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
77037+#endif
77038+
77039+ for (j = 0; j < 4; j++) {
77040+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
77041+ if (gr_shared_page[j] == NULL) {
77042+ panic("Unable to allocate grsecurity shared page");
77043+ return;
77044+ }
77045+ }
77046+
77047+ /* allocate log buffers */
77048+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
77049+ if (!gr_alert_log_fmt) {
77050+ panic("Unable to allocate grsecurity alert log format buffer");
77051+ return;
77052+ }
77053+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
77054+ if (!gr_audit_log_fmt) {
77055+ panic("Unable to allocate grsecurity audit log format buffer");
77056+ return;
77057+ }
77058+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77059+ if (!gr_alert_log_buf) {
77060+ panic("Unable to allocate grsecurity alert log buffer");
77061+ return;
77062+ }
77063+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77064+ if (!gr_audit_log_buf) {
77065+ panic("Unable to allocate grsecurity audit log buffer");
77066+ return;
77067+ }
77068+
77069+ /* allocate memory for authentication structure */
77070+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
77071+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
77072+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
77073+
77074+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
77075+ panic("Unable to allocate grsecurity authentication structure");
77076+ return;
77077+ }
77078+
77079+#ifdef CONFIG_GRKERNSEC_IO
77080+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
77081+ grsec_disable_privio = 1;
77082+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77083+ grsec_disable_privio = 1;
77084+#else
77085+ grsec_disable_privio = 0;
77086+#endif
77087+#endif
77088+
77089+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
77090+ /* for backward compatibility, tpe_invert always defaults to on if
77091+ enabled in the kernel
77092+ */
77093+ grsec_enable_tpe_invert = 1;
77094+#endif
77095+
77096+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77097+#ifndef CONFIG_GRKERNSEC_SYSCTL
77098+ grsec_lock = 1;
77099+#endif
77100+
77101+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77102+ grsec_enable_log_rwxmaps = 1;
77103+#endif
77104+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
77105+ grsec_enable_group = 1;
77106+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
77107+#endif
77108+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77109+ grsec_enable_ptrace_readexec = 1;
77110+#endif
77111+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77112+ grsec_enable_chdir = 1;
77113+#endif
77114+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
77115+ grsec_enable_harden_ptrace = 1;
77116+#endif
77117+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77118+ grsec_enable_harden_ipc = 1;
77119+#endif
77120+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77121+ grsec_enable_mount = 1;
77122+#endif
77123+#ifdef CONFIG_GRKERNSEC_LINK
77124+ grsec_enable_link = 1;
77125+#endif
77126+#ifdef CONFIG_GRKERNSEC_BRUTE
77127+ grsec_enable_brute = 1;
77128+#endif
77129+#ifdef CONFIG_GRKERNSEC_DMESG
77130+ grsec_enable_dmesg = 1;
77131+#endif
77132+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77133+ grsec_enable_blackhole = 1;
77134+ grsec_lastack_retries = 4;
77135+#endif
77136+#ifdef CONFIG_GRKERNSEC_FIFO
77137+ grsec_enable_fifo = 1;
77138+#endif
77139+#ifdef CONFIG_GRKERNSEC_EXECLOG
77140+ grsec_enable_execlog = 1;
77141+#endif
77142+#ifdef CONFIG_GRKERNSEC_SETXID
77143+ grsec_enable_setxid = 1;
77144+#endif
77145+#ifdef CONFIG_GRKERNSEC_SIGNAL
77146+ grsec_enable_signal = 1;
77147+#endif
77148+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77149+ grsec_enable_forkfail = 1;
77150+#endif
77151+#ifdef CONFIG_GRKERNSEC_TIME
77152+ grsec_enable_time = 1;
77153+#endif
77154+#ifdef CONFIG_GRKERNSEC_RESLOG
77155+ grsec_resource_logging = 1;
77156+#endif
77157+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77158+ grsec_enable_chroot_findtask = 1;
77159+#endif
77160+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77161+ grsec_enable_chroot_unix = 1;
77162+#endif
77163+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77164+ grsec_enable_chroot_mount = 1;
77165+#endif
77166+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77167+ grsec_enable_chroot_fchdir = 1;
77168+#endif
77169+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77170+ grsec_enable_chroot_shmat = 1;
77171+#endif
77172+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77173+ grsec_enable_audit_ptrace = 1;
77174+#endif
77175+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77176+ grsec_enable_chroot_double = 1;
77177+#endif
77178+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77179+ grsec_enable_chroot_pivot = 1;
77180+#endif
77181+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77182+ grsec_enable_chroot_chdir = 1;
77183+#endif
77184+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77185+ grsec_enable_chroot_chmod = 1;
77186+#endif
77187+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77188+ grsec_enable_chroot_mknod = 1;
77189+#endif
77190+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77191+ grsec_enable_chroot_nice = 1;
77192+#endif
77193+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77194+ grsec_enable_chroot_execlog = 1;
77195+#endif
77196+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77197+ grsec_enable_chroot_caps = 1;
77198+#endif
77199+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77200+ grsec_enable_chroot_rename = 1;
77201+#endif
77202+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77203+ grsec_enable_chroot_sysctl = 1;
77204+#endif
77205+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77206+ grsec_enable_symlinkown = 1;
77207+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
77208+#endif
77209+#ifdef CONFIG_GRKERNSEC_TPE
77210+ grsec_enable_tpe = 1;
77211+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
77212+#ifdef CONFIG_GRKERNSEC_TPE_ALL
77213+ grsec_enable_tpe_all = 1;
77214+#endif
77215+#endif
77216+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
77217+ grsec_enable_socket_all = 1;
77218+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
77219+#endif
77220+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
77221+ grsec_enable_socket_client = 1;
77222+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
77223+#endif
77224+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
77225+ grsec_enable_socket_server = 1;
77226+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
77227+#endif
77228+#endif
77229+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
77230+ grsec_deny_new_usb = 1;
77231+#endif
77232+
77233+ return;
77234+}
77235diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
77236new file mode 100644
77237index 0000000..1773300
77238--- /dev/null
77239+++ b/grsecurity/grsec_ipc.c
77240@@ -0,0 +1,48 @@
77241+#include <linux/kernel.h>
77242+#include <linux/mm.h>
77243+#include <linux/sched.h>
77244+#include <linux/file.h>
77245+#include <linux/ipc.h>
77246+#include <linux/ipc_namespace.h>
77247+#include <linux/grsecurity.h>
77248+#include <linux/grinternal.h>
77249+
77250+int
77251+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
77252+{
77253+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77254+ int write;
77255+ int orig_granted_mode;
77256+ kuid_t euid;
77257+ kgid_t egid;
77258+
77259+ if (!grsec_enable_harden_ipc)
77260+ return 1;
77261+
77262+ euid = current_euid();
77263+ egid = current_egid();
77264+
77265+ write = requested_mode & 00002;
77266+ orig_granted_mode = ipcp->mode;
77267+
77268+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
77269+ orig_granted_mode >>= 6;
77270+ else {
77271+ /* if likely wrong permissions, lock to user */
77272+ if (orig_granted_mode & 0007)
77273+ orig_granted_mode = 0;
77274+ /* otherwise do a egid-only check */
77275+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
77276+ orig_granted_mode >>= 3;
77277+ /* otherwise, no access */
77278+ else
77279+ orig_granted_mode = 0;
77280+ }
77281+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
77282+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
77283+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
77284+ return 0;
77285+ }
77286+#endif
77287+ return 1;
77288+}
77289diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
77290new file mode 100644
77291index 0000000..5e05e20
77292--- /dev/null
77293+++ b/grsecurity/grsec_link.c
77294@@ -0,0 +1,58 @@
77295+#include <linux/kernel.h>
77296+#include <linux/sched.h>
77297+#include <linux/fs.h>
77298+#include <linux/file.h>
77299+#include <linux/grinternal.h>
77300+
77301+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
77302+{
77303+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77304+ const struct inode *link_inode = link->dentry->d_inode;
77305+
77306+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
77307+ /* ignore root-owned links, e.g. /proc/self */
77308+ gr_is_global_nonroot(link_inode->i_uid) && target &&
77309+ !uid_eq(link_inode->i_uid, target->i_uid)) {
77310+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
77311+ return 1;
77312+ }
77313+#endif
77314+ return 0;
77315+}
77316+
77317+int
77318+gr_handle_follow_link(const struct inode *parent,
77319+ const struct inode *inode,
77320+ const struct dentry *dentry, const struct vfsmount *mnt)
77321+{
77322+#ifdef CONFIG_GRKERNSEC_LINK
77323+ const struct cred *cred = current_cred();
77324+
77325+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
77326+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
77327+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
77328+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
77329+ return -EACCES;
77330+ }
77331+#endif
77332+ return 0;
77333+}
77334+
77335+int
77336+gr_handle_hardlink(const struct dentry *dentry,
77337+ const struct vfsmount *mnt,
77338+ struct inode *inode, const int mode, const struct filename *to)
77339+{
77340+#ifdef CONFIG_GRKERNSEC_LINK
77341+ const struct cred *cred = current_cred();
77342+
77343+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
77344+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
77345+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
77346+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
77347+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
77348+ return -EPERM;
77349+ }
77350+#endif
77351+ return 0;
77352+}
77353diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
77354new file mode 100644
77355index 0000000..dbe0a6b
77356--- /dev/null
77357+++ b/grsecurity/grsec_log.c
77358@@ -0,0 +1,341 @@
77359+#include <linux/kernel.h>
77360+#include <linux/sched.h>
77361+#include <linux/file.h>
77362+#include <linux/tty.h>
77363+#include <linux/fs.h>
77364+#include <linux/mm.h>
77365+#include <linux/grinternal.h>
77366+
77367+#ifdef CONFIG_TREE_PREEMPT_RCU
77368+#define DISABLE_PREEMPT() preempt_disable()
77369+#define ENABLE_PREEMPT() preempt_enable()
77370+#else
77371+#define DISABLE_PREEMPT()
77372+#define ENABLE_PREEMPT()
77373+#endif
77374+
77375+#define BEGIN_LOCKS(x) \
77376+ DISABLE_PREEMPT(); \
77377+ rcu_read_lock(); \
77378+ read_lock(&tasklist_lock); \
77379+ read_lock(&grsec_exec_file_lock); \
77380+ if (x != GR_DO_AUDIT) \
77381+ spin_lock(&grsec_alert_lock); \
77382+ else \
77383+ spin_lock(&grsec_audit_lock)
77384+
77385+#define END_LOCKS(x) \
77386+ if (x != GR_DO_AUDIT) \
77387+ spin_unlock(&grsec_alert_lock); \
77388+ else \
77389+ spin_unlock(&grsec_audit_lock); \
77390+ read_unlock(&grsec_exec_file_lock); \
77391+ read_unlock(&tasklist_lock); \
77392+ rcu_read_unlock(); \
77393+ ENABLE_PREEMPT(); \
77394+ if (x == GR_DONT_AUDIT) \
77395+ gr_handle_alertkill(current)
77396+
77397+enum {
77398+ FLOODING,
77399+ NO_FLOODING
77400+};
77401+
77402+extern char *gr_alert_log_fmt;
77403+extern char *gr_audit_log_fmt;
77404+extern char *gr_alert_log_buf;
77405+extern char *gr_audit_log_buf;
77406+
77407+static int gr_log_start(int audit)
77408+{
77409+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
77410+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
77411+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77412+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
77413+ unsigned long curr_secs = get_seconds();
77414+
77415+ if (audit == GR_DO_AUDIT)
77416+ goto set_fmt;
77417+
77418+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
77419+ grsec_alert_wtime = curr_secs;
77420+ grsec_alert_fyet = 0;
77421+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
77422+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
77423+ grsec_alert_fyet++;
77424+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
77425+ grsec_alert_wtime = curr_secs;
77426+ grsec_alert_fyet++;
77427+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
77428+ return FLOODING;
77429+ }
77430+ else return FLOODING;
77431+
77432+set_fmt:
77433+#endif
77434+ memset(buf, 0, PAGE_SIZE);
77435+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
77436+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
77437+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77438+ } else if (current->signal->curr_ip) {
77439+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
77440+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
77441+ } else if (gr_acl_is_enabled()) {
77442+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
77443+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77444+ } else {
77445+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
77446+ strcpy(buf, fmt);
77447+ }
77448+
77449+ return NO_FLOODING;
77450+}
77451+
77452+static void gr_log_middle(int audit, const char *msg, va_list ap)
77453+ __attribute__ ((format (printf, 2, 0)));
77454+
77455+static void gr_log_middle(int audit, const char *msg, va_list ap)
77456+{
77457+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77458+ unsigned int len = strlen(buf);
77459+
77460+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77461+
77462+ return;
77463+}
77464+
77465+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77466+ __attribute__ ((format (printf, 2, 3)));
77467+
77468+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77469+{
77470+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77471+ unsigned int len = strlen(buf);
77472+ va_list ap;
77473+
77474+ va_start(ap, msg);
77475+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77476+ va_end(ap);
77477+
77478+ return;
77479+}
77480+
77481+static void gr_log_end(int audit, int append_default)
77482+{
77483+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77484+ if (append_default) {
77485+ struct task_struct *task = current;
77486+ struct task_struct *parent = task->real_parent;
77487+ const struct cred *cred = __task_cred(task);
77488+ const struct cred *pcred = __task_cred(parent);
77489+ unsigned int len = strlen(buf);
77490+
77491+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77492+ }
77493+
77494+ printk("%s\n", buf);
77495+
77496+ return;
77497+}
77498+
77499+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
77500+{
77501+ int logtype;
77502+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
77503+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
77504+ void *voidptr = NULL;
77505+ int num1 = 0, num2 = 0;
77506+ unsigned long ulong1 = 0, ulong2 = 0;
77507+ struct dentry *dentry = NULL;
77508+ struct vfsmount *mnt = NULL;
77509+ struct file *file = NULL;
77510+ struct task_struct *task = NULL;
77511+ struct vm_area_struct *vma = NULL;
77512+ const struct cred *cred, *pcred;
77513+ va_list ap;
77514+
77515+ BEGIN_LOCKS(audit);
77516+ logtype = gr_log_start(audit);
77517+ if (logtype == FLOODING) {
77518+ END_LOCKS(audit);
77519+ return;
77520+ }
77521+ va_start(ap, argtypes);
77522+ switch (argtypes) {
77523+ case GR_TTYSNIFF:
77524+ task = va_arg(ap, struct task_struct *);
77525+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
77526+ break;
77527+ case GR_SYSCTL_HIDDEN:
77528+ str1 = va_arg(ap, char *);
77529+ gr_log_middle_varargs(audit, msg, result, str1);
77530+ break;
77531+ case GR_RBAC:
77532+ dentry = va_arg(ap, struct dentry *);
77533+ mnt = va_arg(ap, struct vfsmount *);
77534+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
77535+ break;
77536+ case GR_RBAC_STR:
77537+ dentry = va_arg(ap, struct dentry *);
77538+ mnt = va_arg(ap, struct vfsmount *);
77539+ str1 = va_arg(ap, char *);
77540+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
77541+ break;
77542+ case GR_STR_RBAC:
77543+ str1 = va_arg(ap, char *);
77544+ dentry = va_arg(ap, struct dentry *);
77545+ mnt = va_arg(ap, struct vfsmount *);
77546+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
77547+ break;
77548+ case GR_RBAC_MODE2:
77549+ dentry = va_arg(ap, struct dentry *);
77550+ mnt = va_arg(ap, struct vfsmount *);
77551+ str1 = va_arg(ap, char *);
77552+ str2 = va_arg(ap, char *);
77553+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
77554+ break;
77555+ case GR_RBAC_MODE3:
77556+ dentry = va_arg(ap, struct dentry *);
77557+ mnt = va_arg(ap, struct vfsmount *);
77558+ str1 = va_arg(ap, char *);
77559+ str2 = va_arg(ap, char *);
77560+ str3 = va_arg(ap, char *);
77561+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
77562+ break;
77563+ case GR_FILENAME:
77564+ dentry = va_arg(ap, struct dentry *);
77565+ mnt = va_arg(ap, struct vfsmount *);
77566+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
77567+ break;
77568+ case GR_STR_FILENAME:
77569+ str1 = va_arg(ap, char *);
77570+ dentry = va_arg(ap, struct dentry *);
77571+ mnt = va_arg(ap, struct vfsmount *);
77572+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
77573+ break;
77574+ case GR_FILENAME_STR:
77575+ dentry = va_arg(ap, struct dentry *);
77576+ mnt = va_arg(ap, struct vfsmount *);
77577+ str1 = va_arg(ap, char *);
77578+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
77579+ break;
77580+ case GR_FILENAME_TWO_INT:
77581+ dentry = va_arg(ap, struct dentry *);
77582+ mnt = va_arg(ap, struct vfsmount *);
77583+ num1 = va_arg(ap, int);
77584+ num2 = va_arg(ap, int);
77585+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
77586+ break;
77587+ case GR_FILENAME_TWO_INT_STR:
77588+ dentry = va_arg(ap, struct dentry *);
77589+ mnt = va_arg(ap, struct vfsmount *);
77590+ num1 = va_arg(ap, int);
77591+ num2 = va_arg(ap, int);
77592+ str1 = va_arg(ap, char *);
77593+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
77594+ break;
77595+ case GR_TEXTREL:
77596+ file = va_arg(ap, struct file *);
77597+ ulong1 = va_arg(ap, unsigned long);
77598+ ulong2 = va_arg(ap, unsigned long);
77599+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
77600+ break;
77601+ case GR_PTRACE:
77602+ task = va_arg(ap, struct task_struct *);
77603+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
77604+ break;
77605+ case GR_RESOURCE:
77606+ task = va_arg(ap, struct task_struct *);
77607+ cred = __task_cred(task);
77608+ pcred = __task_cred(task->real_parent);
77609+ ulong1 = va_arg(ap, unsigned long);
77610+ str1 = va_arg(ap, char *);
77611+ ulong2 = va_arg(ap, unsigned long);
77612+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77613+ break;
77614+ case GR_CAP:
77615+ task = va_arg(ap, struct task_struct *);
77616+ cred = __task_cred(task);
77617+ pcred = __task_cred(task->real_parent);
77618+ str1 = va_arg(ap, char *);
77619+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77620+ break;
77621+ case GR_SIG:
77622+ str1 = va_arg(ap, char *);
77623+ voidptr = va_arg(ap, void *);
77624+ gr_log_middle_varargs(audit, msg, str1, voidptr);
77625+ break;
77626+ case GR_SIG2:
77627+ task = va_arg(ap, struct task_struct *);
77628+ cred = __task_cred(task);
77629+ pcred = __task_cred(task->real_parent);
77630+ num1 = va_arg(ap, int);
77631+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77632+ break;
77633+ case GR_CRASH1:
77634+ task = va_arg(ap, struct task_struct *);
77635+ cred = __task_cred(task);
77636+ pcred = __task_cred(task->real_parent);
77637+ ulong1 = va_arg(ap, unsigned long);
77638+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
77639+ break;
77640+ case GR_CRASH2:
77641+ task = va_arg(ap, struct task_struct *);
77642+ cred = __task_cred(task);
77643+ pcred = __task_cred(task->real_parent);
77644+ ulong1 = va_arg(ap, unsigned long);
77645+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
77646+ break;
77647+ case GR_RWXMAP:
77648+ file = va_arg(ap, struct file *);
77649+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
77650+ break;
77651+ case GR_RWXMAPVMA:
77652+ vma = va_arg(ap, struct vm_area_struct *);
77653+ if (vma->vm_file)
77654+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
77655+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
77656+ str1 = "<stack>";
77657+ else if (vma->vm_start <= current->mm->brk &&
77658+ vma->vm_end >= current->mm->start_brk)
77659+ str1 = "<heap>";
77660+ else
77661+ str1 = "<anonymous mapping>";
77662+ gr_log_middle_varargs(audit, msg, str1);
77663+ break;
77664+ case GR_PSACCT:
77665+ {
77666+ unsigned int wday, cday;
77667+ __u8 whr, chr;
77668+ __u8 wmin, cmin;
77669+ __u8 wsec, csec;
77670+ char cur_tty[64] = { 0 };
77671+ char parent_tty[64] = { 0 };
77672+
77673+ task = va_arg(ap, struct task_struct *);
77674+ wday = va_arg(ap, unsigned int);
77675+ cday = va_arg(ap, unsigned int);
77676+ whr = va_arg(ap, int);
77677+ chr = va_arg(ap, int);
77678+ wmin = va_arg(ap, int);
77679+ cmin = va_arg(ap, int);
77680+ wsec = va_arg(ap, int);
77681+ csec = va_arg(ap, int);
77682+ ulong1 = va_arg(ap, unsigned long);
77683+ cred = __task_cred(task);
77684+ pcred = __task_cred(task->real_parent);
77685+
77686+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77687+ }
77688+ break;
77689+ default:
77690+ gr_log_middle(audit, msg, ap);
77691+ }
77692+ va_end(ap);
77693+ // these don't need DEFAULTSECARGS printed on the end
77694+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
77695+ gr_log_end(audit, 0);
77696+ else
77697+ gr_log_end(audit, 1);
77698+ END_LOCKS(audit);
77699+}
77700diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
77701new file mode 100644
77702index 0000000..0e39d8c
77703--- /dev/null
77704+++ b/grsecurity/grsec_mem.c
77705@@ -0,0 +1,48 @@
77706+#include <linux/kernel.h>
77707+#include <linux/sched.h>
77708+#include <linux/mm.h>
77709+#include <linux/mman.h>
77710+#include <linux/module.h>
77711+#include <linux/grinternal.h>
77712+
77713+void gr_handle_msr_write(void)
77714+{
77715+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
77716+ return;
77717+}
77718+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
77719+
77720+void
77721+gr_handle_ioperm(void)
77722+{
77723+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
77724+ return;
77725+}
77726+
77727+void
77728+gr_handle_iopl(void)
77729+{
77730+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
77731+ return;
77732+}
77733+
77734+void
77735+gr_handle_mem_readwrite(u64 from, u64 to)
77736+{
77737+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
77738+ return;
77739+}
77740+
77741+void
77742+gr_handle_vm86(void)
77743+{
77744+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
77745+ return;
77746+}
77747+
77748+void
77749+gr_log_badprocpid(const char *entry)
77750+{
77751+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
77752+ return;
77753+}
77754diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
77755new file mode 100644
77756index 0000000..6f9eb73
77757--- /dev/null
77758+++ b/grsecurity/grsec_mount.c
77759@@ -0,0 +1,65 @@
77760+#include <linux/kernel.h>
77761+#include <linux/sched.h>
77762+#include <linux/mount.h>
77763+#include <linux/major.h>
77764+#include <linux/grsecurity.h>
77765+#include <linux/grinternal.h>
77766+
77767+void
77768+gr_log_remount(const char *devname, const int retval)
77769+{
77770+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77771+ if (grsec_enable_mount && (retval >= 0))
77772+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
77773+#endif
77774+ return;
77775+}
77776+
77777+void
77778+gr_log_unmount(const char *devname, const int retval)
77779+{
77780+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77781+ if (grsec_enable_mount && (retval >= 0))
77782+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
77783+#endif
77784+ return;
77785+}
77786+
77787+void
77788+gr_log_mount(const char *from, struct path *to, const int retval)
77789+{
77790+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77791+ if (grsec_enable_mount && (retval >= 0))
77792+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
77793+#endif
77794+ return;
77795+}
77796+
77797+int
77798+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
77799+{
77800+#ifdef CONFIG_GRKERNSEC_ROFS
77801+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
77802+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
77803+ return -EPERM;
77804+ } else
77805+ return 0;
77806+#endif
77807+ return 0;
77808+}
77809+
77810+int
77811+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
77812+{
77813+#ifdef CONFIG_GRKERNSEC_ROFS
77814+ struct inode *inode = dentry->d_inode;
77815+
77816+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
77817+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
77818+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
77819+ return -EPERM;
77820+ } else
77821+ return 0;
77822+#endif
77823+ return 0;
77824+}
77825diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
77826new file mode 100644
77827index 0000000..6ee9d50
77828--- /dev/null
77829+++ b/grsecurity/grsec_pax.c
77830@@ -0,0 +1,45 @@
77831+#include <linux/kernel.h>
77832+#include <linux/sched.h>
77833+#include <linux/mm.h>
77834+#include <linux/file.h>
77835+#include <linux/grinternal.h>
77836+#include <linux/grsecurity.h>
77837+
77838+void
77839+gr_log_textrel(struct vm_area_struct * vma)
77840+{
77841+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77842+ if (grsec_enable_log_rwxmaps)
77843+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
77844+#endif
77845+ return;
77846+}
77847+
77848+void gr_log_ptgnustack(struct file *file)
77849+{
77850+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77851+ if (grsec_enable_log_rwxmaps)
77852+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
77853+#endif
77854+ return;
77855+}
77856+
77857+void
77858+gr_log_rwxmmap(struct file *file)
77859+{
77860+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77861+ if (grsec_enable_log_rwxmaps)
77862+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
77863+#endif
77864+ return;
77865+}
77866+
77867+void
77868+gr_log_rwxmprotect(struct vm_area_struct *vma)
77869+{
77870+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77871+ if (grsec_enable_log_rwxmaps)
77872+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
77873+#endif
77874+ return;
77875+}
77876diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
77877new file mode 100644
77878index 0000000..2005a3a
77879--- /dev/null
77880+++ b/grsecurity/grsec_proc.c
77881@@ -0,0 +1,20 @@
77882+#include <linux/kernel.h>
77883+#include <linux/sched.h>
77884+#include <linux/grsecurity.h>
77885+#include <linux/grinternal.h>
77886+
77887+int gr_proc_is_restricted(void)
77888+{
77889+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77890+ const struct cred *cred = current_cred();
77891+#endif
77892+
77893+#ifdef CONFIG_GRKERNSEC_PROC_USER
77894+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
77895+ return -EACCES;
77896+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77897+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
77898+ return -EACCES;
77899+#endif
77900+ return 0;
77901+}
77902diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
77903new file mode 100644
77904index 0000000..f7f29aa
77905--- /dev/null
77906+++ b/grsecurity/grsec_ptrace.c
77907@@ -0,0 +1,30 @@
77908+#include <linux/kernel.h>
77909+#include <linux/sched.h>
77910+#include <linux/grinternal.h>
77911+#include <linux/security.h>
77912+
77913+void
77914+gr_audit_ptrace(struct task_struct *task)
77915+{
77916+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77917+ if (grsec_enable_audit_ptrace)
77918+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
77919+#endif
77920+ return;
77921+}
77922+
77923+int
77924+gr_ptrace_readexec(struct file *file, int unsafe_flags)
77925+{
77926+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77927+ const struct dentry *dentry = file->f_path.dentry;
77928+ const struct vfsmount *mnt = file->f_path.mnt;
77929+
77930+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
77931+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
77932+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
77933+ return -EACCES;
77934+ }
77935+#endif
77936+ return 0;
77937+}
77938diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
77939new file mode 100644
77940index 0000000..3860c7e
77941--- /dev/null
77942+++ b/grsecurity/grsec_sig.c
77943@@ -0,0 +1,236 @@
77944+#include <linux/kernel.h>
77945+#include <linux/sched.h>
77946+#include <linux/fs.h>
77947+#include <linux/delay.h>
77948+#include <linux/grsecurity.h>
77949+#include <linux/grinternal.h>
77950+#include <linux/hardirq.h>
77951+
77952+char *signames[] = {
77953+ [SIGSEGV] = "Segmentation fault",
77954+ [SIGILL] = "Illegal instruction",
77955+ [SIGABRT] = "Abort",
77956+ [SIGBUS] = "Invalid alignment/Bus error"
77957+};
77958+
77959+void
77960+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
77961+{
77962+#ifdef CONFIG_GRKERNSEC_SIGNAL
77963+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
77964+ (sig == SIGABRT) || (sig == SIGBUS))) {
77965+ if (task_pid_nr(t) == task_pid_nr(current)) {
77966+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
77967+ } else {
77968+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
77969+ }
77970+ }
77971+#endif
77972+ return;
77973+}
77974+
77975+int
77976+gr_handle_signal(const struct task_struct *p, const int sig)
77977+{
77978+#ifdef CONFIG_GRKERNSEC
77979+ /* ignore the 0 signal for protected task checks */
77980+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
77981+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
77982+ return -EPERM;
77983+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
77984+ return -EPERM;
77985+ }
77986+#endif
77987+ return 0;
77988+}
77989+
77990+#ifdef CONFIG_GRKERNSEC
77991+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
77992+
77993+int gr_fake_force_sig(int sig, struct task_struct *t)
77994+{
77995+ unsigned long int flags;
77996+ int ret, blocked, ignored;
77997+ struct k_sigaction *action;
77998+
77999+ spin_lock_irqsave(&t->sighand->siglock, flags);
78000+ action = &t->sighand->action[sig-1];
78001+ ignored = action->sa.sa_handler == SIG_IGN;
78002+ blocked = sigismember(&t->blocked, sig);
78003+ if (blocked || ignored) {
78004+ action->sa.sa_handler = SIG_DFL;
78005+ if (blocked) {
78006+ sigdelset(&t->blocked, sig);
78007+ recalc_sigpending_and_wake(t);
78008+ }
78009+ }
78010+ if (action->sa.sa_handler == SIG_DFL)
78011+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
78012+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
78013+
78014+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
78015+
78016+ return ret;
78017+}
78018+#endif
78019+
78020+#define GR_USER_BAN_TIME (15 * 60)
78021+#define GR_DAEMON_BRUTE_TIME (30 * 60)
78022+
78023+void gr_handle_brute_attach(int dumpable)
78024+{
78025+#ifdef CONFIG_GRKERNSEC_BRUTE
78026+ struct task_struct *p = current;
78027+ kuid_t uid = GLOBAL_ROOT_UID;
78028+ int daemon = 0;
78029+
78030+ if (!grsec_enable_brute)
78031+ return;
78032+
78033+ rcu_read_lock();
78034+ read_lock(&tasklist_lock);
78035+ read_lock(&grsec_exec_file_lock);
78036+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
78037+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
78038+ p->real_parent->brute = 1;
78039+ daemon = 1;
78040+ } else {
78041+ const struct cred *cred = __task_cred(p), *cred2;
78042+ struct task_struct *tsk, *tsk2;
78043+
78044+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
78045+ struct user_struct *user;
78046+
78047+ uid = cred->uid;
78048+
78049+ /* this is put upon execution past expiration */
78050+ user = find_user(uid);
78051+ if (user == NULL)
78052+ goto unlock;
78053+ user->suid_banned = 1;
78054+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
78055+ if (user->suid_ban_expires == ~0UL)
78056+ user->suid_ban_expires--;
78057+
78058+ /* only kill other threads of the same binary, from the same user */
78059+ do_each_thread(tsk2, tsk) {
78060+ cred2 = __task_cred(tsk);
78061+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
78062+ gr_fake_force_sig(SIGKILL, tsk);
78063+ } while_each_thread(tsk2, tsk);
78064+ }
78065+ }
78066+unlock:
78067+ read_unlock(&grsec_exec_file_lock);
78068+ read_unlock(&tasklist_lock);
78069+ rcu_read_unlock();
78070+
78071+ if (gr_is_global_nonroot(uid))
78072+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
78073+ else if (daemon)
78074+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
78075+
78076+#endif
78077+ return;
78078+}
78079+
78080+void gr_handle_brute_check(void)
78081+{
78082+#ifdef CONFIG_GRKERNSEC_BRUTE
78083+ struct task_struct *p = current;
78084+
78085+ if (unlikely(p->brute)) {
78086+ if (!grsec_enable_brute)
78087+ p->brute = 0;
78088+ else if (time_before(get_seconds(), p->brute_expires))
78089+ msleep(30 * 1000);
78090+ }
78091+#endif
78092+ return;
78093+}
78094+
78095+void gr_handle_kernel_exploit(void)
78096+{
78097+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78098+ const struct cred *cred;
78099+ struct task_struct *tsk, *tsk2;
78100+ struct user_struct *user;
78101+ kuid_t uid;
78102+
78103+ if (in_irq() || in_serving_softirq() || in_nmi())
78104+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
78105+
78106+ uid = current_uid();
78107+
78108+ if (gr_is_global_root(uid))
78109+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
78110+ else {
78111+ /* kill all the processes of this user, hold a reference
78112+ to their creds struct, and prevent them from creating
78113+ another process until system reset
78114+ */
78115+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
78116+ GR_GLOBAL_UID(uid));
78117+ /* we intentionally leak this ref */
78118+ user = get_uid(current->cred->user);
78119+ if (user)
78120+ user->kernel_banned = 1;
78121+
78122+ /* kill all processes of this user */
78123+ read_lock(&tasklist_lock);
78124+ do_each_thread(tsk2, tsk) {
78125+ cred = __task_cred(tsk);
78126+ if (uid_eq(cred->uid, uid))
78127+ gr_fake_force_sig(SIGKILL, tsk);
78128+ } while_each_thread(tsk2, tsk);
78129+ read_unlock(&tasklist_lock);
78130+ }
78131+#endif
78132+}
78133+
78134+#ifdef CONFIG_GRKERNSEC_BRUTE
78135+static bool suid_ban_expired(struct user_struct *user)
78136+{
78137+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
78138+ user->suid_banned = 0;
78139+ user->suid_ban_expires = 0;
78140+ free_uid(user);
78141+ return true;
78142+ }
78143+
78144+ return false;
78145+}
78146+#endif
78147+
78148+int gr_process_kernel_exec_ban(void)
78149+{
78150+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78151+ if (unlikely(current->cred->user->kernel_banned))
78152+ return -EPERM;
78153+#endif
78154+ return 0;
78155+}
78156+
78157+int gr_process_kernel_setuid_ban(struct user_struct *user)
78158+{
78159+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78160+ if (unlikely(user->kernel_banned))
78161+ gr_fake_force_sig(SIGKILL, current);
78162+#endif
78163+ return 0;
78164+}
78165+
78166+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
78167+{
78168+#ifdef CONFIG_GRKERNSEC_BRUTE
78169+ struct user_struct *user = current->cred->user;
78170+ if (unlikely(user->suid_banned)) {
78171+ if (suid_ban_expired(user))
78172+ return 0;
78173+ /* disallow execution of suid binaries only */
78174+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
78175+ return -EPERM;
78176+ }
78177+#endif
78178+ return 0;
78179+}
78180diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
78181new file mode 100644
78182index 0000000..e3650b6
78183--- /dev/null
78184+++ b/grsecurity/grsec_sock.c
78185@@ -0,0 +1,244 @@
78186+#include <linux/kernel.h>
78187+#include <linux/module.h>
78188+#include <linux/sched.h>
78189+#include <linux/file.h>
78190+#include <linux/net.h>
78191+#include <linux/in.h>
78192+#include <linux/ip.h>
78193+#include <net/sock.h>
78194+#include <net/inet_sock.h>
78195+#include <linux/grsecurity.h>
78196+#include <linux/grinternal.h>
78197+#include <linux/gracl.h>
78198+
78199+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
78200+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
78201+
78202+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
78203+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
78204+
78205+#ifdef CONFIG_UNIX_MODULE
78206+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
78207+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
78208+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
78209+EXPORT_SYMBOL_GPL(gr_handle_create);
78210+#endif
78211+
78212+#ifdef CONFIG_GRKERNSEC
78213+#define gr_conn_table_size 32749
78214+struct conn_table_entry {
78215+ struct conn_table_entry *next;
78216+ struct signal_struct *sig;
78217+};
78218+
78219+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
78220+DEFINE_SPINLOCK(gr_conn_table_lock);
78221+
78222+extern const char * gr_socktype_to_name(unsigned char type);
78223+extern const char * gr_proto_to_name(unsigned char proto);
78224+extern const char * gr_sockfamily_to_name(unsigned char family);
78225+
78226+static __inline__ int
78227+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
78228+{
78229+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
78230+}
78231+
78232+static __inline__ int
78233+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
78234+ __u16 sport, __u16 dport)
78235+{
78236+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
78237+ sig->gr_sport == sport && sig->gr_dport == dport))
78238+ return 1;
78239+ else
78240+ return 0;
78241+}
78242+
78243+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
78244+{
78245+ struct conn_table_entry **match;
78246+ unsigned int index;
78247+
78248+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78249+ sig->gr_sport, sig->gr_dport,
78250+ gr_conn_table_size);
78251+
78252+ newent->sig = sig;
78253+
78254+ match = &gr_conn_table[index];
78255+ newent->next = *match;
78256+ *match = newent;
78257+
78258+ return;
78259+}
78260+
78261+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
78262+{
78263+ struct conn_table_entry *match, *last = NULL;
78264+ unsigned int index;
78265+
78266+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78267+ sig->gr_sport, sig->gr_dport,
78268+ gr_conn_table_size);
78269+
78270+ match = gr_conn_table[index];
78271+ while (match && !conn_match(match->sig,
78272+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
78273+ sig->gr_dport)) {
78274+ last = match;
78275+ match = match->next;
78276+ }
78277+
78278+ if (match) {
78279+ if (last)
78280+ last->next = match->next;
78281+ else
78282+ gr_conn_table[index] = NULL;
78283+ kfree(match);
78284+ }
78285+
78286+ return;
78287+}
78288+
78289+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
78290+ __u16 sport, __u16 dport)
78291+{
78292+ struct conn_table_entry *match;
78293+ unsigned int index;
78294+
78295+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
78296+
78297+ match = gr_conn_table[index];
78298+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
78299+ match = match->next;
78300+
78301+ if (match)
78302+ return match->sig;
78303+ else
78304+ return NULL;
78305+}
78306+
78307+#endif
78308+
78309+void gr_update_task_in_ip_table(const struct inet_sock *inet)
78310+{
78311+#ifdef CONFIG_GRKERNSEC
78312+ struct signal_struct *sig = current->signal;
78313+ struct conn_table_entry *newent;
78314+
78315+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
78316+ if (newent == NULL)
78317+ return;
78318+ /* no bh lock needed since we are called with bh disabled */
78319+ spin_lock(&gr_conn_table_lock);
78320+ gr_del_task_from_ip_table_nolock(sig);
78321+ sig->gr_saddr = inet->inet_rcv_saddr;
78322+ sig->gr_daddr = inet->inet_daddr;
78323+ sig->gr_sport = inet->inet_sport;
78324+ sig->gr_dport = inet->inet_dport;
78325+ gr_add_to_task_ip_table_nolock(sig, newent);
78326+ spin_unlock(&gr_conn_table_lock);
78327+#endif
78328+ return;
78329+}
78330+
78331+void gr_del_task_from_ip_table(struct task_struct *task)
78332+{
78333+#ifdef CONFIG_GRKERNSEC
78334+ spin_lock_bh(&gr_conn_table_lock);
78335+ gr_del_task_from_ip_table_nolock(task->signal);
78336+ spin_unlock_bh(&gr_conn_table_lock);
78337+#endif
78338+ return;
78339+}
78340+
78341+void
78342+gr_attach_curr_ip(const struct sock *sk)
78343+{
78344+#ifdef CONFIG_GRKERNSEC
78345+ struct signal_struct *p, *set;
78346+ const struct inet_sock *inet = inet_sk(sk);
78347+
78348+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
78349+ return;
78350+
78351+ set = current->signal;
78352+
78353+ spin_lock_bh(&gr_conn_table_lock);
78354+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
78355+ inet->inet_dport, inet->inet_sport);
78356+ if (unlikely(p != NULL)) {
78357+ set->curr_ip = p->curr_ip;
78358+ set->used_accept = 1;
78359+ gr_del_task_from_ip_table_nolock(p);
78360+ spin_unlock_bh(&gr_conn_table_lock);
78361+ return;
78362+ }
78363+ spin_unlock_bh(&gr_conn_table_lock);
78364+
78365+ set->curr_ip = inet->inet_daddr;
78366+ set->used_accept = 1;
78367+#endif
78368+ return;
78369+}
78370+
78371+int
78372+gr_handle_sock_all(const int family, const int type, const int protocol)
78373+{
78374+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78375+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
78376+ (family != AF_UNIX)) {
78377+ if (family == AF_INET)
78378+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
78379+ else
78380+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
78381+ return -EACCES;
78382+ }
78383+#endif
78384+ return 0;
78385+}
78386+
78387+int
78388+gr_handle_sock_server(const struct sockaddr *sck)
78389+{
78390+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78391+ if (grsec_enable_socket_server &&
78392+ in_group_p(grsec_socket_server_gid) &&
78393+ sck && (sck->sa_family != AF_UNIX) &&
78394+ (sck->sa_family != AF_LOCAL)) {
78395+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78396+ return -EACCES;
78397+ }
78398+#endif
78399+ return 0;
78400+}
78401+
78402+int
78403+gr_handle_sock_server_other(const struct sock *sck)
78404+{
78405+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78406+ if (grsec_enable_socket_server &&
78407+ in_group_p(grsec_socket_server_gid) &&
78408+ sck && (sck->sk_family != AF_UNIX) &&
78409+ (sck->sk_family != AF_LOCAL)) {
78410+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78411+ return -EACCES;
78412+ }
78413+#endif
78414+ return 0;
78415+}
78416+
78417+int
78418+gr_handle_sock_client(const struct sockaddr *sck)
78419+{
78420+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78421+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
78422+ sck && (sck->sa_family != AF_UNIX) &&
78423+ (sck->sa_family != AF_LOCAL)) {
78424+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
78425+ return -EACCES;
78426+ }
78427+#endif
78428+ return 0;
78429+}
78430diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
78431new file mode 100644
78432index 0000000..cce889e
78433--- /dev/null
78434+++ b/grsecurity/grsec_sysctl.c
78435@@ -0,0 +1,488 @@
78436+#include <linux/kernel.h>
78437+#include <linux/sched.h>
78438+#include <linux/sysctl.h>
78439+#include <linux/grsecurity.h>
78440+#include <linux/grinternal.h>
78441+
78442+int
78443+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
78444+{
78445+#ifdef CONFIG_GRKERNSEC_SYSCTL
78446+ if (dirname == NULL || name == NULL)
78447+ return 0;
78448+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
78449+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
78450+ return -EACCES;
78451+ }
78452+#endif
78453+ return 0;
78454+}
78455+
78456+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
78457+static int __maybe_unused __read_only one = 1;
78458+#endif
78459+
78460+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
78461+ defined(CONFIG_GRKERNSEC_DENYUSB)
78462+struct ctl_table grsecurity_table[] = {
78463+#ifdef CONFIG_GRKERNSEC_SYSCTL
78464+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
78465+#ifdef CONFIG_GRKERNSEC_IO
78466+ {
78467+ .procname = "disable_priv_io",
78468+ .data = &grsec_disable_privio,
78469+ .maxlen = sizeof(int),
78470+ .mode = 0600,
78471+ .proc_handler = &proc_dointvec,
78472+ },
78473+#endif
78474+#endif
78475+#ifdef CONFIG_GRKERNSEC_LINK
78476+ {
78477+ .procname = "linking_restrictions",
78478+ .data = &grsec_enable_link,
78479+ .maxlen = sizeof(int),
78480+ .mode = 0600,
78481+ .proc_handler = &proc_dointvec,
78482+ },
78483+#endif
78484+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78485+ {
78486+ .procname = "enforce_symlinksifowner",
78487+ .data = &grsec_enable_symlinkown,
78488+ .maxlen = sizeof(int),
78489+ .mode = 0600,
78490+ .proc_handler = &proc_dointvec,
78491+ },
78492+ {
78493+ .procname = "symlinkown_gid",
78494+ .data = &grsec_symlinkown_gid,
78495+ .maxlen = sizeof(int),
78496+ .mode = 0600,
78497+ .proc_handler = &proc_dointvec,
78498+ },
78499+#endif
78500+#ifdef CONFIG_GRKERNSEC_BRUTE
78501+ {
78502+ .procname = "deter_bruteforce",
78503+ .data = &grsec_enable_brute,
78504+ .maxlen = sizeof(int),
78505+ .mode = 0600,
78506+ .proc_handler = &proc_dointvec,
78507+ },
78508+#endif
78509+#ifdef CONFIG_GRKERNSEC_FIFO
78510+ {
78511+ .procname = "fifo_restrictions",
78512+ .data = &grsec_enable_fifo,
78513+ .maxlen = sizeof(int),
78514+ .mode = 0600,
78515+ .proc_handler = &proc_dointvec,
78516+ },
78517+#endif
78518+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78519+ {
78520+ .procname = "ptrace_readexec",
78521+ .data = &grsec_enable_ptrace_readexec,
78522+ .maxlen = sizeof(int),
78523+ .mode = 0600,
78524+ .proc_handler = &proc_dointvec,
78525+ },
78526+#endif
78527+#ifdef CONFIG_GRKERNSEC_SETXID
78528+ {
78529+ .procname = "consistent_setxid",
78530+ .data = &grsec_enable_setxid,
78531+ .maxlen = sizeof(int),
78532+ .mode = 0600,
78533+ .proc_handler = &proc_dointvec,
78534+ },
78535+#endif
78536+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78537+ {
78538+ .procname = "ip_blackhole",
78539+ .data = &grsec_enable_blackhole,
78540+ .maxlen = sizeof(int),
78541+ .mode = 0600,
78542+ .proc_handler = &proc_dointvec,
78543+ },
78544+ {
78545+ .procname = "lastack_retries",
78546+ .data = &grsec_lastack_retries,
78547+ .maxlen = sizeof(int),
78548+ .mode = 0600,
78549+ .proc_handler = &proc_dointvec,
78550+ },
78551+#endif
78552+#ifdef CONFIG_GRKERNSEC_EXECLOG
78553+ {
78554+ .procname = "exec_logging",
78555+ .data = &grsec_enable_execlog,
78556+ .maxlen = sizeof(int),
78557+ .mode = 0600,
78558+ .proc_handler = &proc_dointvec,
78559+ },
78560+#endif
78561+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78562+ {
78563+ .procname = "rwxmap_logging",
78564+ .data = &grsec_enable_log_rwxmaps,
78565+ .maxlen = sizeof(int),
78566+ .mode = 0600,
78567+ .proc_handler = &proc_dointvec,
78568+ },
78569+#endif
78570+#ifdef CONFIG_GRKERNSEC_SIGNAL
78571+ {
78572+ .procname = "signal_logging",
78573+ .data = &grsec_enable_signal,
78574+ .maxlen = sizeof(int),
78575+ .mode = 0600,
78576+ .proc_handler = &proc_dointvec,
78577+ },
78578+#endif
78579+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78580+ {
78581+ .procname = "forkfail_logging",
78582+ .data = &grsec_enable_forkfail,
78583+ .maxlen = sizeof(int),
78584+ .mode = 0600,
78585+ .proc_handler = &proc_dointvec,
78586+ },
78587+#endif
78588+#ifdef CONFIG_GRKERNSEC_TIME
78589+ {
78590+ .procname = "timechange_logging",
78591+ .data = &grsec_enable_time,
78592+ .maxlen = sizeof(int),
78593+ .mode = 0600,
78594+ .proc_handler = &proc_dointvec,
78595+ },
78596+#endif
78597+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78598+ {
78599+ .procname = "chroot_deny_shmat",
78600+ .data = &grsec_enable_chroot_shmat,
78601+ .maxlen = sizeof(int),
78602+ .mode = 0600,
78603+ .proc_handler = &proc_dointvec,
78604+ },
78605+#endif
78606+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78607+ {
78608+ .procname = "chroot_deny_unix",
78609+ .data = &grsec_enable_chroot_unix,
78610+ .maxlen = sizeof(int),
78611+ .mode = 0600,
78612+ .proc_handler = &proc_dointvec,
78613+ },
78614+#endif
78615+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78616+ {
78617+ .procname = "chroot_deny_mount",
78618+ .data = &grsec_enable_chroot_mount,
78619+ .maxlen = sizeof(int),
78620+ .mode = 0600,
78621+ .proc_handler = &proc_dointvec,
78622+ },
78623+#endif
78624+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78625+ {
78626+ .procname = "chroot_deny_fchdir",
78627+ .data = &grsec_enable_chroot_fchdir,
78628+ .maxlen = sizeof(int),
78629+ .mode = 0600,
78630+ .proc_handler = &proc_dointvec,
78631+ },
78632+#endif
78633+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78634+ {
78635+ .procname = "chroot_deny_chroot",
78636+ .data = &grsec_enable_chroot_double,
78637+ .maxlen = sizeof(int),
78638+ .mode = 0600,
78639+ .proc_handler = &proc_dointvec,
78640+ },
78641+#endif
78642+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78643+ {
78644+ .procname = "chroot_deny_pivot",
78645+ .data = &grsec_enable_chroot_pivot,
78646+ .maxlen = sizeof(int),
78647+ .mode = 0600,
78648+ .proc_handler = &proc_dointvec,
78649+ },
78650+#endif
78651+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78652+ {
78653+ .procname = "chroot_enforce_chdir",
78654+ .data = &grsec_enable_chroot_chdir,
78655+ .maxlen = sizeof(int),
78656+ .mode = 0600,
78657+ .proc_handler = &proc_dointvec,
78658+ },
78659+#endif
78660+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78661+ {
78662+ .procname = "chroot_deny_chmod",
78663+ .data = &grsec_enable_chroot_chmod,
78664+ .maxlen = sizeof(int),
78665+ .mode = 0600,
78666+ .proc_handler = &proc_dointvec,
78667+ },
78668+#endif
78669+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78670+ {
78671+ .procname = "chroot_deny_mknod",
78672+ .data = &grsec_enable_chroot_mknod,
78673+ .maxlen = sizeof(int),
78674+ .mode = 0600,
78675+ .proc_handler = &proc_dointvec,
78676+ },
78677+#endif
78678+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78679+ {
78680+ .procname = "chroot_restrict_nice",
78681+ .data = &grsec_enable_chroot_nice,
78682+ .maxlen = sizeof(int),
78683+ .mode = 0600,
78684+ .proc_handler = &proc_dointvec,
78685+ },
78686+#endif
78687+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78688+ {
78689+ .procname = "chroot_execlog",
78690+ .data = &grsec_enable_chroot_execlog,
78691+ .maxlen = sizeof(int),
78692+ .mode = 0600,
78693+ .proc_handler = &proc_dointvec,
78694+ },
78695+#endif
78696+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78697+ {
78698+ .procname = "chroot_caps",
78699+ .data = &grsec_enable_chroot_caps,
78700+ .maxlen = sizeof(int),
78701+ .mode = 0600,
78702+ .proc_handler = &proc_dointvec,
78703+ },
78704+#endif
78705+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
78706+ {
78707+ .procname = "chroot_deny_bad_rename",
78708+ .data = &grsec_enable_chroot_rename,
78709+ .maxlen = sizeof(int),
78710+ .mode = 0600,
78711+ .proc_handler = &proc_dointvec,
78712+ },
78713+#endif
78714+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78715+ {
78716+ .procname = "chroot_deny_sysctl",
78717+ .data = &grsec_enable_chroot_sysctl,
78718+ .maxlen = sizeof(int),
78719+ .mode = 0600,
78720+ .proc_handler = &proc_dointvec,
78721+ },
78722+#endif
78723+#ifdef CONFIG_GRKERNSEC_TPE
78724+ {
78725+ .procname = "tpe",
78726+ .data = &grsec_enable_tpe,
78727+ .maxlen = sizeof(int),
78728+ .mode = 0600,
78729+ .proc_handler = &proc_dointvec,
78730+ },
78731+ {
78732+ .procname = "tpe_gid",
78733+ .data = &grsec_tpe_gid,
78734+ .maxlen = sizeof(int),
78735+ .mode = 0600,
78736+ .proc_handler = &proc_dointvec,
78737+ },
78738+#endif
78739+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78740+ {
78741+ .procname = "tpe_invert",
78742+ .data = &grsec_enable_tpe_invert,
78743+ .maxlen = sizeof(int),
78744+ .mode = 0600,
78745+ .proc_handler = &proc_dointvec,
78746+ },
78747+#endif
78748+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78749+ {
78750+ .procname = "tpe_restrict_all",
78751+ .data = &grsec_enable_tpe_all,
78752+ .maxlen = sizeof(int),
78753+ .mode = 0600,
78754+ .proc_handler = &proc_dointvec,
78755+ },
78756+#endif
78757+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78758+ {
78759+ .procname = "socket_all",
78760+ .data = &grsec_enable_socket_all,
78761+ .maxlen = sizeof(int),
78762+ .mode = 0600,
78763+ .proc_handler = &proc_dointvec,
78764+ },
78765+ {
78766+ .procname = "socket_all_gid",
78767+ .data = &grsec_socket_all_gid,
78768+ .maxlen = sizeof(int),
78769+ .mode = 0600,
78770+ .proc_handler = &proc_dointvec,
78771+ },
78772+#endif
78773+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78774+ {
78775+ .procname = "socket_client",
78776+ .data = &grsec_enable_socket_client,
78777+ .maxlen = sizeof(int),
78778+ .mode = 0600,
78779+ .proc_handler = &proc_dointvec,
78780+ },
78781+ {
78782+ .procname = "socket_client_gid",
78783+ .data = &grsec_socket_client_gid,
78784+ .maxlen = sizeof(int),
78785+ .mode = 0600,
78786+ .proc_handler = &proc_dointvec,
78787+ },
78788+#endif
78789+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78790+ {
78791+ .procname = "socket_server",
78792+ .data = &grsec_enable_socket_server,
78793+ .maxlen = sizeof(int),
78794+ .mode = 0600,
78795+ .proc_handler = &proc_dointvec,
78796+ },
78797+ {
78798+ .procname = "socket_server_gid",
78799+ .data = &grsec_socket_server_gid,
78800+ .maxlen = sizeof(int),
78801+ .mode = 0600,
78802+ .proc_handler = &proc_dointvec,
78803+ },
78804+#endif
78805+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78806+ {
78807+ .procname = "audit_group",
78808+ .data = &grsec_enable_group,
78809+ .maxlen = sizeof(int),
78810+ .mode = 0600,
78811+ .proc_handler = &proc_dointvec,
78812+ },
78813+ {
78814+ .procname = "audit_gid",
78815+ .data = &grsec_audit_gid,
78816+ .maxlen = sizeof(int),
78817+ .mode = 0600,
78818+ .proc_handler = &proc_dointvec,
78819+ },
78820+#endif
78821+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78822+ {
78823+ .procname = "audit_chdir",
78824+ .data = &grsec_enable_chdir,
78825+ .maxlen = sizeof(int),
78826+ .mode = 0600,
78827+ .proc_handler = &proc_dointvec,
78828+ },
78829+#endif
78830+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78831+ {
78832+ .procname = "audit_mount",
78833+ .data = &grsec_enable_mount,
78834+ .maxlen = sizeof(int),
78835+ .mode = 0600,
78836+ .proc_handler = &proc_dointvec,
78837+ },
78838+#endif
78839+#ifdef CONFIG_GRKERNSEC_DMESG
78840+ {
78841+ .procname = "dmesg",
78842+ .data = &grsec_enable_dmesg,
78843+ .maxlen = sizeof(int),
78844+ .mode = 0600,
78845+ .proc_handler = &proc_dointvec,
78846+ },
78847+#endif
78848+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78849+ {
78850+ .procname = "chroot_findtask",
78851+ .data = &grsec_enable_chroot_findtask,
78852+ .maxlen = sizeof(int),
78853+ .mode = 0600,
78854+ .proc_handler = &proc_dointvec,
78855+ },
78856+#endif
78857+#ifdef CONFIG_GRKERNSEC_RESLOG
78858+ {
78859+ .procname = "resource_logging",
78860+ .data = &grsec_resource_logging,
78861+ .maxlen = sizeof(int),
78862+ .mode = 0600,
78863+ .proc_handler = &proc_dointvec,
78864+ },
78865+#endif
78866+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78867+ {
78868+ .procname = "audit_ptrace",
78869+ .data = &grsec_enable_audit_ptrace,
78870+ .maxlen = sizeof(int),
78871+ .mode = 0600,
78872+ .proc_handler = &proc_dointvec,
78873+ },
78874+#endif
78875+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78876+ {
78877+ .procname = "harden_ptrace",
78878+ .data = &grsec_enable_harden_ptrace,
78879+ .maxlen = sizeof(int),
78880+ .mode = 0600,
78881+ .proc_handler = &proc_dointvec,
78882+ },
78883+#endif
78884+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78885+ {
78886+ .procname = "harden_ipc",
78887+ .data = &grsec_enable_harden_ipc,
78888+ .maxlen = sizeof(int),
78889+ .mode = 0600,
78890+ .proc_handler = &proc_dointvec,
78891+ },
78892+#endif
78893+ {
78894+ .procname = "grsec_lock",
78895+ .data = &grsec_lock,
78896+ .maxlen = sizeof(int),
78897+ .mode = 0600,
78898+ .proc_handler = &proc_dointvec,
78899+ },
78900+#endif
78901+#ifdef CONFIG_GRKERNSEC_ROFS
78902+ {
78903+ .procname = "romount_protect",
78904+ .data = &grsec_enable_rofs,
78905+ .maxlen = sizeof(int),
78906+ .mode = 0600,
78907+ .proc_handler = &proc_dointvec_minmax,
78908+ .extra1 = &one,
78909+ .extra2 = &one,
78910+ },
78911+#endif
78912+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
78913+ {
78914+ .procname = "deny_new_usb",
78915+ .data = &grsec_deny_new_usb,
78916+ .maxlen = sizeof(int),
78917+ .mode = 0600,
78918+ .proc_handler = &proc_dointvec,
78919+ },
78920+#endif
78921+ { }
78922+};
78923+#endif
78924diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
78925new file mode 100644
78926index 0000000..61b514e
78927--- /dev/null
78928+++ b/grsecurity/grsec_time.c
78929@@ -0,0 +1,16 @@
78930+#include <linux/kernel.h>
78931+#include <linux/sched.h>
78932+#include <linux/grinternal.h>
78933+#include <linux/module.h>
78934+
78935+void
78936+gr_log_timechange(void)
78937+{
78938+#ifdef CONFIG_GRKERNSEC_TIME
78939+ if (grsec_enable_time)
78940+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
78941+#endif
78942+ return;
78943+}
78944+
78945+EXPORT_SYMBOL_GPL(gr_log_timechange);
78946diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
78947new file mode 100644
78948index 0000000..d1953de
78949--- /dev/null
78950+++ b/grsecurity/grsec_tpe.c
78951@@ -0,0 +1,78 @@
78952+#include <linux/kernel.h>
78953+#include <linux/sched.h>
78954+#include <linux/file.h>
78955+#include <linux/fs.h>
78956+#include <linux/grinternal.h>
78957+
78958+extern int gr_acl_tpe_check(void);
78959+
78960+int
78961+gr_tpe_allow(const struct file *file)
78962+{
78963+#ifdef CONFIG_GRKERNSEC
78964+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
78965+ struct inode *file_inode = file->f_path.dentry->d_inode;
78966+ const struct cred *cred = current_cred();
78967+ char *msg = NULL;
78968+ char *msg2 = NULL;
78969+
78970+ // never restrict root
78971+ if (gr_is_global_root(cred->uid))
78972+ return 1;
78973+
78974+ if (grsec_enable_tpe) {
78975+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78976+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
78977+ msg = "not being in trusted group";
78978+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
78979+ msg = "being in untrusted group";
78980+#else
78981+ if (in_group_p(grsec_tpe_gid))
78982+ msg = "being in untrusted group";
78983+#endif
78984+ }
78985+ if (!msg && gr_acl_tpe_check())
78986+ msg = "being in untrusted role";
78987+
78988+ // not in any affected group/role
78989+ if (!msg)
78990+ goto next_check;
78991+
78992+ if (gr_is_global_nonroot(inode->i_uid))
78993+ msg2 = "file in non-root-owned directory";
78994+ else if (inode->i_mode & S_IWOTH)
78995+ msg2 = "file in world-writable directory";
78996+ else if (inode->i_mode & S_IWGRP)
78997+ msg2 = "file in group-writable directory";
78998+ else if (file_inode->i_mode & S_IWOTH)
78999+ msg2 = "file is world-writable";
79000+
79001+ if (msg && msg2) {
79002+ char fullmsg[70] = {0};
79003+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
79004+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
79005+ return 0;
79006+ }
79007+ msg = NULL;
79008+next_check:
79009+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79010+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
79011+ return 1;
79012+
79013+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
79014+ msg = "directory not owned by user";
79015+ else if (inode->i_mode & S_IWOTH)
79016+ msg = "file in world-writable directory";
79017+ else if (inode->i_mode & S_IWGRP)
79018+ msg = "file in group-writable directory";
79019+ else if (file_inode->i_mode & S_IWOTH)
79020+ msg = "file is world-writable";
79021+
79022+ if (msg) {
79023+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
79024+ return 0;
79025+ }
79026+#endif
79027+#endif
79028+ return 1;
79029+}
79030diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
79031new file mode 100644
79032index 0000000..ae02d8e
79033--- /dev/null
79034+++ b/grsecurity/grsec_usb.c
79035@@ -0,0 +1,15 @@
79036+#include <linux/kernel.h>
79037+#include <linux/grinternal.h>
79038+#include <linux/module.h>
79039+
79040+int gr_handle_new_usb(void)
79041+{
79042+#ifdef CONFIG_GRKERNSEC_DENYUSB
79043+ if (grsec_deny_new_usb) {
79044+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
79045+ return 1;
79046+ }
79047+#endif
79048+ return 0;
79049+}
79050+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
79051diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
79052new file mode 100644
79053index 0000000..158b330
79054--- /dev/null
79055+++ b/grsecurity/grsum.c
79056@@ -0,0 +1,64 @@
79057+#include <linux/err.h>
79058+#include <linux/kernel.h>
79059+#include <linux/sched.h>
79060+#include <linux/mm.h>
79061+#include <linux/scatterlist.h>
79062+#include <linux/crypto.h>
79063+#include <linux/gracl.h>
79064+
79065+
79066+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
79067+#error "crypto and sha256 must be built into the kernel"
79068+#endif
79069+
79070+int
79071+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
79072+{
79073+ struct crypto_hash *tfm;
79074+ struct hash_desc desc;
79075+ struct scatterlist sg[2];
79076+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
79077+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
79078+ unsigned long *sumptr = (unsigned long *)sum;
79079+ int cryptres;
79080+ int retval = 1;
79081+ volatile int mismatched = 0;
79082+ volatile int dummy = 0;
79083+ unsigned int i;
79084+
79085+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
79086+ if (IS_ERR(tfm)) {
79087+ /* should never happen, since sha256 should be built in */
79088+ memset(entry->pw, 0, GR_PW_LEN);
79089+ return 1;
79090+ }
79091+
79092+ sg_init_table(sg, 2);
79093+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
79094+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
79095+
79096+ desc.tfm = tfm;
79097+ desc.flags = 0;
79098+
79099+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
79100+ temp_sum);
79101+
79102+ memset(entry->pw, 0, GR_PW_LEN);
79103+
79104+ if (cryptres)
79105+ goto out;
79106+
79107+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
79108+ if (sumptr[i] != tmpsumptr[i])
79109+ mismatched = 1;
79110+ else
79111+ dummy = 1; // waste a cycle
79112+
79113+ if (!mismatched)
79114+ retval = dummy - 1;
79115+
79116+out:
79117+ crypto_free_hash(tfm);
79118+
79119+ return retval;
79120+}
79121diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
79122index 77ff547..181834f 100644
79123--- a/include/asm-generic/4level-fixup.h
79124+++ b/include/asm-generic/4level-fixup.h
79125@@ -13,8 +13,10 @@
79126 #define pmd_alloc(mm, pud, address) \
79127 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
79128 NULL: pmd_offset(pud, address))
79129+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
79130
79131 #define pud_alloc(mm, pgd, address) (pgd)
79132+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
79133 #define pud_offset(pgd, start) (pgd)
79134 #define pud_none(pud) 0
79135 #define pud_bad(pud) 0
79136diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
79137index b7babf0..1e4b4f1 100644
79138--- a/include/asm-generic/atomic-long.h
79139+++ b/include/asm-generic/atomic-long.h
79140@@ -22,6 +22,12 @@
79141
79142 typedef atomic64_t atomic_long_t;
79143
79144+#ifdef CONFIG_PAX_REFCOUNT
79145+typedef atomic64_unchecked_t atomic_long_unchecked_t;
79146+#else
79147+typedef atomic64_t atomic_long_unchecked_t;
79148+#endif
79149+
79150 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
79151
79152 static inline long atomic_long_read(atomic_long_t *l)
79153@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79154 return (long)atomic64_read(v);
79155 }
79156
79157+#ifdef CONFIG_PAX_REFCOUNT
79158+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79159+{
79160+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79161+
79162+ return (long)atomic64_read_unchecked(v);
79163+}
79164+#endif
79165+
79166 static inline void atomic_long_set(atomic_long_t *l, long i)
79167 {
79168 atomic64_t *v = (atomic64_t *)l;
79169@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79170 atomic64_set(v, i);
79171 }
79172
79173+#ifdef CONFIG_PAX_REFCOUNT
79174+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79175+{
79176+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79177+
79178+ atomic64_set_unchecked(v, i);
79179+}
79180+#endif
79181+
79182 static inline void atomic_long_inc(atomic_long_t *l)
79183 {
79184 atomic64_t *v = (atomic64_t *)l;
79185@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79186 atomic64_inc(v);
79187 }
79188
79189+#ifdef CONFIG_PAX_REFCOUNT
79190+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79191+{
79192+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79193+
79194+ atomic64_inc_unchecked(v);
79195+}
79196+#endif
79197+
79198 static inline void atomic_long_dec(atomic_long_t *l)
79199 {
79200 atomic64_t *v = (atomic64_t *)l;
79201@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79202 atomic64_dec(v);
79203 }
79204
79205+#ifdef CONFIG_PAX_REFCOUNT
79206+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79207+{
79208+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79209+
79210+ atomic64_dec_unchecked(v);
79211+}
79212+#endif
79213+
79214 static inline void atomic_long_add(long i, atomic_long_t *l)
79215 {
79216 atomic64_t *v = (atomic64_t *)l;
79217@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79218 atomic64_add(i, v);
79219 }
79220
79221+#ifdef CONFIG_PAX_REFCOUNT
79222+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79223+{
79224+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79225+
79226+ atomic64_add_unchecked(i, v);
79227+}
79228+#endif
79229+
79230 static inline void atomic_long_sub(long i, atomic_long_t *l)
79231 {
79232 atomic64_t *v = (atomic64_t *)l;
79233@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79234 atomic64_sub(i, v);
79235 }
79236
79237+#ifdef CONFIG_PAX_REFCOUNT
79238+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79239+{
79240+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79241+
79242+ atomic64_sub_unchecked(i, v);
79243+}
79244+#endif
79245+
79246 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79247 {
79248 atomic64_t *v = (atomic64_t *)l;
79249@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79250 return atomic64_add_negative(i, v);
79251 }
79252
79253-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79254+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79255 {
79256 atomic64_t *v = (atomic64_t *)l;
79257
79258 return (long)atomic64_add_return(i, v);
79259 }
79260
79261+#ifdef CONFIG_PAX_REFCOUNT
79262+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79263+{
79264+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79265+
79266+ return (long)atomic64_add_return_unchecked(i, v);
79267+}
79268+#endif
79269+
79270 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79271 {
79272 atomic64_t *v = (atomic64_t *)l;
79273@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79274 return (long)atomic64_inc_return(v);
79275 }
79276
79277+#ifdef CONFIG_PAX_REFCOUNT
79278+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79279+{
79280+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79281+
79282+ return (long)atomic64_inc_return_unchecked(v);
79283+}
79284+#endif
79285+
79286 static inline long atomic_long_dec_return(atomic_long_t *l)
79287 {
79288 atomic64_t *v = (atomic64_t *)l;
79289@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79290
79291 typedef atomic_t atomic_long_t;
79292
79293+#ifdef CONFIG_PAX_REFCOUNT
79294+typedef atomic_unchecked_t atomic_long_unchecked_t;
79295+#else
79296+typedef atomic_t atomic_long_unchecked_t;
79297+#endif
79298+
79299 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
79300 static inline long atomic_long_read(atomic_long_t *l)
79301 {
79302@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79303 return (long)atomic_read(v);
79304 }
79305
79306+#ifdef CONFIG_PAX_REFCOUNT
79307+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79308+{
79309+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79310+
79311+ return (long)atomic_read_unchecked(v);
79312+}
79313+#endif
79314+
79315 static inline void atomic_long_set(atomic_long_t *l, long i)
79316 {
79317 atomic_t *v = (atomic_t *)l;
79318@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79319 atomic_set(v, i);
79320 }
79321
79322+#ifdef CONFIG_PAX_REFCOUNT
79323+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79324+{
79325+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79326+
79327+ atomic_set_unchecked(v, i);
79328+}
79329+#endif
79330+
79331 static inline void atomic_long_inc(atomic_long_t *l)
79332 {
79333 atomic_t *v = (atomic_t *)l;
79334@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79335 atomic_inc(v);
79336 }
79337
79338+#ifdef CONFIG_PAX_REFCOUNT
79339+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79340+{
79341+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79342+
79343+ atomic_inc_unchecked(v);
79344+}
79345+#endif
79346+
79347 static inline void atomic_long_dec(atomic_long_t *l)
79348 {
79349 atomic_t *v = (atomic_t *)l;
79350@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79351 atomic_dec(v);
79352 }
79353
79354+#ifdef CONFIG_PAX_REFCOUNT
79355+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79356+{
79357+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79358+
79359+ atomic_dec_unchecked(v);
79360+}
79361+#endif
79362+
79363 static inline void atomic_long_add(long i, atomic_long_t *l)
79364 {
79365 atomic_t *v = (atomic_t *)l;
79366@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79367 atomic_add(i, v);
79368 }
79369
79370+#ifdef CONFIG_PAX_REFCOUNT
79371+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79372+{
79373+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79374+
79375+ atomic_add_unchecked(i, v);
79376+}
79377+#endif
79378+
79379 static inline void atomic_long_sub(long i, atomic_long_t *l)
79380 {
79381 atomic_t *v = (atomic_t *)l;
79382@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79383 atomic_sub(i, v);
79384 }
79385
79386+#ifdef CONFIG_PAX_REFCOUNT
79387+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79388+{
79389+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79390+
79391+ atomic_sub_unchecked(i, v);
79392+}
79393+#endif
79394+
79395 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79396 {
79397 atomic_t *v = (atomic_t *)l;
79398@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79399 return atomic_add_negative(i, v);
79400 }
79401
79402-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79403+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79404 {
79405 atomic_t *v = (atomic_t *)l;
79406
79407 return (long)atomic_add_return(i, v);
79408 }
79409
79410+#ifdef CONFIG_PAX_REFCOUNT
79411+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79412+{
79413+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79414+
79415+ return (long)atomic_add_return_unchecked(i, v);
79416+}
79417+
79418+#endif
79419+
79420 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79421 {
79422 atomic_t *v = (atomic_t *)l;
79423@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79424 return (long)atomic_inc_return(v);
79425 }
79426
79427+#ifdef CONFIG_PAX_REFCOUNT
79428+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79429+{
79430+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79431+
79432+ return (long)atomic_inc_return_unchecked(v);
79433+}
79434+#endif
79435+
79436 static inline long atomic_long_dec_return(atomic_long_t *l)
79437 {
79438 atomic_t *v = (atomic_t *)l;
79439@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79440
79441 #endif /* BITS_PER_LONG == 64 */
79442
79443+#ifdef CONFIG_PAX_REFCOUNT
79444+static inline void pax_refcount_needs_these_functions(void)
79445+{
79446+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
79447+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
79448+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
79449+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
79450+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
79451+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
79452+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
79453+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
79454+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
79455+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
79456+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
79457+#ifdef CONFIG_X86
79458+ atomic_clear_mask_unchecked(0, NULL);
79459+ atomic_set_mask_unchecked(0, NULL);
79460+#endif
79461+
79462+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
79463+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
79464+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
79465+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
79466+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
79467+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
79468+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
79469+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
79470+}
79471+#else
79472+#define atomic_read_unchecked(v) atomic_read(v)
79473+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
79474+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
79475+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
79476+#define atomic_inc_unchecked(v) atomic_inc(v)
79477+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
79478+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
79479+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
79480+#define atomic_dec_unchecked(v) atomic_dec(v)
79481+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
79482+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
79483+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
79484+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
79485+
79486+#define atomic_long_read_unchecked(v) atomic_long_read(v)
79487+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
79488+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
79489+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
79490+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
79491+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
79492+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
79493+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
79494+#endif
79495+
79496 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
79497diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
79498index 30ad9c8..c70c170 100644
79499--- a/include/asm-generic/atomic64.h
79500+++ b/include/asm-generic/atomic64.h
79501@@ -16,6 +16,8 @@ typedef struct {
79502 long long counter;
79503 } atomic64_t;
79504
79505+typedef atomic64_t atomic64_unchecked_t;
79506+
79507 #define ATOMIC64_INIT(i) { (i) }
79508
79509 extern long long atomic64_read(const atomic64_t *v);
79510@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
79511 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
79512 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
79513
79514+#define atomic64_read_unchecked(v) atomic64_read(v)
79515+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
79516+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
79517+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
79518+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
79519+#define atomic64_inc_unchecked(v) atomic64_inc(v)
79520+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
79521+#define atomic64_dec_unchecked(v) atomic64_dec(v)
79522+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
79523+
79524 #endif /* _ASM_GENERIC_ATOMIC64_H */
79525diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
79526index f5c40b0..e902f9d 100644
79527--- a/include/asm-generic/barrier.h
79528+++ b/include/asm-generic/barrier.h
79529@@ -82,7 +82,7 @@
79530 do { \
79531 compiletime_assert_atomic_type(*p); \
79532 smp_mb(); \
79533- ACCESS_ONCE(*p) = (v); \
79534+ ACCESS_ONCE_RW(*p) = (v); \
79535 } while (0)
79536
79537 #define smp_load_acquire(p) \
79538diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
79539index a60a7cc..0fe12f2 100644
79540--- a/include/asm-generic/bitops/__fls.h
79541+++ b/include/asm-generic/bitops/__fls.h
79542@@ -9,7 +9,7 @@
79543 *
79544 * Undefined if no set bit exists, so code should check against 0 first.
79545 */
79546-static __always_inline unsigned long __fls(unsigned long word)
79547+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
79548 {
79549 int num = BITS_PER_LONG - 1;
79550
79551diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
79552index 0576d1f..dad6c71 100644
79553--- a/include/asm-generic/bitops/fls.h
79554+++ b/include/asm-generic/bitops/fls.h
79555@@ -9,7 +9,7 @@
79556 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
79557 */
79558
79559-static __always_inline int fls(int x)
79560+static __always_inline int __intentional_overflow(-1) fls(int x)
79561 {
79562 int r = 32;
79563
79564diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
79565index b097cf8..3d40e14 100644
79566--- a/include/asm-generic/bitops/fls64.h
79567+++ b/include/asm-generic/bitops/fls64.h
79568@@ -15,7 +15,7 @@
79569 * at position 64.
79570 */
79571 #if BITS_PER_LONG == 32
79572-static __always_inline int fls64(__u64 x)
79573+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79574 {
79575 __u32 h = x >> 32;
79576 if (h)
79577@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
79578 return fls(x);
79579 }
79580 #elif BITS_PER_LONG == 64
79581-static __always_inline int fls64(__u64 x)
79582+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79583 {
79584 if (x == 0)
79585 return 0;
79586diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
79587index 1bfcfe5..e04c5c9 100644
79588--- a/include/asm-generic/cache.h
79589+++ b/include/asm-generic/cache.h
79590@@ -6,7 +6,7 @@
79591 * cache lines need to provide their own cache.h.
79592 */
79593
79594-#define L1_CACHE_SHIFT 5
79595-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
79596+#define L1_CACHE_SHIFT 5UL
79597+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
79598
79599 #endif /* __ASM_GENERIC_CACHE_H */
79600diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
79601index 0d68a1e..b74a761 100644
79602--- a/include/asm-generic/emergency-restart.h
79603+++ b/include/asm-generic/emergency-restart.h
79604@@ -1,7 +1,7 @@
79605 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
79606 #define _ASM_GENERIC_EMERGENCY_RESTART_H
79607
79608-static inline void machine_emergency_restart(void)
79609+static inline __noreturn void machine_emergency_restart(void)
79610 {
79611 machine_restart(NULL);
79612 }
79613diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
79614index 90f99c7..00ce236 100644
79615--- a/include/asm-generic/kmap_types.h
79616+++ b/include/asm-generic/kmap_types.h
79617@@ -2,9 +2,9 @@
79618 #define _ASM_GENERIC_KMAP_TYPES_H
79619
79620 #ifdef __WITH_KM_FENCE
79621-# define KM_TYPE_NR 41
79622+# define KM_TYPE_NR 42
79623 #else
79624-# define KM_TYPE_NR 20
79625+# define KM_TYPE_NR 21
79626 #endif
79627
79628 #endif
79629diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
79630index 9ceb03b..62b0b8f 100644
79631--- a/include/asm-generic/local.h
79632+++ b/include/asm-generic/local.h
79633@@ -23,24 +23,37 @@ typedef struct
79634 atomic_long_t a;
79635 } local_t;
79636
79637+typedef struct {
79638+ atomic_long_unchecked_t a;
79639+} local_unchecked_t;
79640+
79641 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
79642
79643 #define local_read(l) atomic_long_read(&(l)->a)
79644+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
79645 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
79646+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
79647 #define local_inc(l) atomic_long_inc(&(l)->a)
79648+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
79649 #define local_dec(l) atomic_long_dec(&(l)->a)
79650+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
79651 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
79652+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
79653 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
79654+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
79655
79656 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
79657 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
79658 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
79659 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
79660 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
79661+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
79662 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
79663 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
79664+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
79665
79666 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79667+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79668 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
79669 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
79670 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
79671diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
79672index 725612b..9cc513a 100644
79673--- a/include/asm-generic/pgtable-nopmd.h
79674+++ b/include/asm-generic/pgtable-nopmd.h
79675@@ -1,14 +1,19 @@
79676 #ifndef _PGTABLE_NOPMD_H
79677 #define _PGTABLE_NOPMD_H
79678
79679-#ifndef __ASSEMBLY__
79680-
79681 #include <asm-generic/pgtable-nopud.h>
79682
79683-struct mm_struct;
79684-
79685 #define __PAGETABLE_PMD_FOLDED
79686
79687+#define PMD_SHIFT PUD_SHIFT
79688+#define PTRS_PER_PMD 1
79689+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
79690+#define PMD_MASK (~(PMD_SIZE-1))
79691+
79692+#ifndef __ASSEMBLY__
79693+
79694+struct mm_struct;
79695+
79696 /*
79697 * Having the pmd type consist of a pud gets the size right, and allows
79698 * us to conceptually access the pud entry that this pmd is folded into
79699@@ -16,11 +21,6 @@ struct mm_struct;
79700 */
79701 typedef struct { pud_t pud; } pmd_t;
79702
79703-#define PMD_SHIFT PUD_SHIFT
79704-#define PTRS_PER_PMD 1
79705-#define PMD_SIZE (1UL << PMD_SHIFT)
79706-#define PMD_MASK (~(PMD_SIZE-1))
79707-
79708 /*
79709 * The "pud_xxx()" functions here are trivial for a folded two-level
79710 * setup: the pmd is never bad, and a pmd always exists (as it's folded
79711diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
79712index 810431d..0ec4804f 100644
79713--- a/include/asm-generic/pgtable-nopud.h
79714+++ b/include/asm-generic/pgtable-nopud.h
79715@@ -1,10 +1,15 @@
79716 #ifndef _PGTABLE_NOPUD_H
79717 #define _PGTABLE_NOPUD_H
79718
79719-#ifndef __ASSEMBLY__
79720-
79721 #define __PAGETABLE_PUD_FOLDED
79722
79723+#define PUD_SHIFT PGDIR_SHIFT
79724+#define PTRS_PER_PUD 1
79725+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
79726+#define PUD_MASK (~(PUD_SIZE-1))
79727+
79728+#ifndef __ASSEMBLY__
79729+
79730 /*
79731 * Having the pud type consist of a pgd gets the size right, and allows
79732 * us to conceptually access the pgd entry that this pud is folded into
79733@@ -12,11 +17,6 @@
79734 */
79735 typedef struct { pgd_t pgd; } pud_t;
79736
79737-#define PUD_SHIFT PGDIR_SHIFT
79738-#define PTRS_PER_PUD 1
79739-#define PUD_SIZE (1UL << PUD_SHIFT)
79740-#define PUD_MASK (~(PUD_SIZE-1))
79741-
79742 /*
79743 * The "pgd_xxx()" functions here are trivial for a folded two-level
79744 * setup: the pud is never bad, and a pud always exists (as it's folded
79745@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
79746 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
79747
79748 #define pgd_populate(mm, pgd, pud) do { } while (0)
79749+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
79750 /*
79751 * (puds are folded into pgds so this doesn't get actually called,
79752 * but the define is needed for a generic inline function.)
79753diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
79754index 177d597..2826237 100644
79755--- a/include/asm-generic/pgtable.h
79756+++ b/include/asm-generic/pgtable.h
79757@@ -839,6 +839,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
79758 }
79759 #endif /* CONFIG_NUMA_BALANCING */
79760
79761+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
79762+#ifdef CONFIG_PAX_KERNEXEC
79763+#error KERNEXEC requires pax_open_kernel
79764+#else
79765+static inline unsigned long pax_open_kernel(void) { return 0; }
79766+#endif
79767+#endif
79768+
79769+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
79770+#ifdef CONFIG_PAX_KERNEXEC
79771+#error KERNEXEC requires pax_close_kernel
79772+#else
79773+static inline unsigned long pax_close_kernel(void) { return 0; }
79774+#endif
79775+#endif
79776+
79777 #endif /* CONFIG_MMU */
79778
79779 #endif /* !__ASSEMBLY__ */
79780diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
79781index 72d8803..cb9749c 100644
79782--- a/include/asm-generic/uaccess.h
79783+++ b/include/asm-generic/uaccess.h
79784@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
79785 return __clear_user(to, n);
79786 }
79787
79788+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
79789+#ifdef CONFIG_PAX_MEMORY_UDEREF
79790+#error UDEREF requires pax_open_userland
79791+#else
79792+static inline unsigned long pax_open_userland(void) { return 0; }
79793+#endif
79794+#endif
79795+
79796+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
79797+#ifdef CONFIG_PAX_MEMORY_UDEREF
79798+#error UDEREF requires pax_close_userland
79799+#else
79800+static inline unsigned long pax_close_userland(void) { return 0; }
79801+#endif
79802+#endif
79803+
79804 #endif /* __ASM_GENERIC_UACCESS_H */
79805diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
79806index bee5d68..8d362d1 100644
79807--- a/include/asm-generic/vmlinux.lds.h
79808+++ b/include/asm-generic/vmlinux.lds.h
79809@@ -234,6 +234,7 @@
79810 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
79811 VMLINUX_SYMBOL(__start_rodata) = .; \
79812 *(.rodata) *(.rodata.*) \
79813+ *(.data..read_only) \
79814 *(__vermagic) /* Kernel version magic */ \
79815 . = ALIGN(8); \
79816 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
79817@@ -726,17 +727,18 @@
79818 * section in the linker script will go there too. @phdr should have
79819 * a leading colon.
79820 *
79821- * Note that this macros defines __per_cpu_load as an absolute symbol.
79822+ * Note that this macros defines per_cpu_load as an absolute symbol.
79823 * If there is no need to put the percpu section at a predetermined
79824 * address, use PERCPU_SECTION.
79825 */
79826 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
79827- VMLINUX_SYMBOL(__per_cpu_load) = .; \
79828- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
79829+ per_cpu_load = .; \
79830+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
79831 - LOAD_OFFSET) { \
79832+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
79833 PERCPU_INPUT(cacheline) \
79834 } phdr \
79835- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
79836+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
79837
79838 /**
79839 * PERCPU_SECTION - define output section for percpu area, simple version
79840diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
79841index 623a59c..1e79ab9 100644
79842--- a/include/crypto/algapi.h
79843+++ b/include/crypto/algapi.h
79844@@ -34,7 +34,7 @@ struct crypto_type {
79845 unsigned int maskclear;
79846 unsigned int maskset;
79847 unsigned int tfmsize;
79848-};
79849+} __do_const;
79850
79851 struct crypto_instance {
79852 struct crypto_alg alg;
79853diff --git a/include/drm/drmP.h b/include/drm/drmP.h
79854index e1b2e8b..2697bd2 100644
79855--- a/include/drm/drmP.h
79856+++ b/include/drm/drmP.h
79857@@ -59,6 +59,7 @@
79858
79859 #include <asm/mman.h>
79860 #include <asm/pgalloc.h>
79861+#include <asm/local.h>
79862 #include <asm/uaccess.h>
79863
79864 #include <uapi/drm/drm.h>
79865@@ -223,10 +224,12 @@ void drm_err(const char *format, ...);
79866 * \param cmd command.
79867 * \param arg argument.
79868 */
79869-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
79870+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
79871+ struct drm_file *file_priv);
79872+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
79873 struct drm_file *file_priv);
79874
79875-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
79876+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
79877 unsigned long arg);
79878
79879 #define DRM_IOCTL_NR(n) _IOC_NR(n)
79880@@ -242,10 +245,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
79881 struct drm_ioctl_desc {
79882 unsigned int cmd;
79883 int flags;
79884- drm_ioctl_t *func;
79885+ drm_ioctl_t func;
79886 unsigned int cmd_drv;
79887 const char *name;
79888-};
79889+} __do_const;
79890
79891 /**
79892 * Creates a driver or general drm_ioctl_desc array entry for the given
79893@@ -629,7 +632,8 @@ struct drm_info_list {
79894 int (*show)(struct seq_file*, void*); /** show callback */
79895 u32 driver_features; /**< Required driver features for this entry */
79896 void *data;
79897-};
79898+} __do_const;
79899+typedef struct drm_info_list __no_const drm_info_list_no_const;
79900
79901 /**
79902 * debugfs node structure. This structure represents a debugfs file.
79903@@ -713,7 +717,7 @@ struct drm_device {
79904
79905 /** \name Usage Counters */
79906 /*@{ */
79907- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
79908+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
79909 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
79910 int buf_use; /**< Buffers in use -- cannot alloc */
79911 atomic_t buf_alloc; /**< Buffer allocation in progress */
79912diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
79913index 7adbb65..2a1eb1f 100644
79914--- a/include/drm/drm_crtc_helper.h
79915+++ b/include/drm/drm_crtc_helper.h
79916@@ -116,7 +116,7 @@ struct drm_encoder_helper_funcs {
79917 struct drm_connector *connector);
79918 /* disable encoder when not in use - more explicit than dpms off */
79919 void (*disable)(struct drm_encoder *encoder);
79920-};
79921+} __no_const;
79922
79923 /**
79924 * drm_connector_helper_funcs - helper operations for connectors
79925diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
79926index d016dc5..3951fe0 100644
79927--- a/include/drm/i915_pciids.h
79928+++ b/include/drm/i915_pciids.h
79929@@ -37,7 +37,7 @@
79930 */
79931 #define INTEL_VGA_DEVICE(id, info) { \
79932 0x8086, id, \
79933- ~0, ~0, \
79934+ PCI_ANY_ID, PCI_ANY_ID, \
79935 0x030000, 0xff0000, \
79936 (unsigned long) info }
79937
79938diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
79939index 72dcbe8..8db58d7 100644
79940--- a/include/drm/ttm/ttm_memory.h
79941+++ b/include/drm/ttm/ttm_memory.h
79942@@ -48,7 +48,7 @@
79943
79944 struct ttm_mem_shrink {
79945 int (*do_shrink) (struct ttm_mem_shrink *);
79946-};
79947+} __no_const;
79948
79949 /**
79950 * struct ttm_mem_global - Global memory accounting structure.
79951diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
79952index 49a8284..9643967 100644
79953--- a/include/drm/ttm/ttm_page_alloc.h
79954+++ b/include/drm/ttm/ttm_page_alloc.h
79955@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
79956 */
79957 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
79958
79959+struct device;
79960 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
79961 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
79962
79963diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
79964index 4b840e8..155d235 100644
79965--- a/include/keys/asymmetric-subtype.h
79966+++ b/include/keys/asymmetric-subtype.h
79967@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
79968 /* Verify the signature on a key of this subtype (optional) */
79969 int (*verify_signature)(const struct key *key,
79970 const struct public_key_signature *sig);
79971-};
79972+} __do_const;
79973
79974 /**
79975 * asymmetric_key_subtype - Get the subtype from an asymmetric key
79976diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
79977index c1da539..1dcec55 100644
79978--- a/include/linux/atmdev.h
79979+++ b/include/linux/atmdev.h
79980@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
79981 #endif
79982
79983 struct k_atm_aal_stats {
79984-#define __HANDLE_ITEM(i) atomic_t i
79985+#define __HANDLE_ITEM(i) atomic_unchecked_t i
79986 __AAL_STAT_ITEMS
79987 #undef __HANDLE_ITEM
79988 };
79989@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
79990 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
79991 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
79992 struct module *owner;
79993-};
79994+} __do_const ;
79995
79996 struct atmphy_ops {
79997 int (*start)(struct atm_dev *dev);
79998diff --git a/include/linux/atomic.h b/include/linux/atomic.h
79999index 5b08a85..60922fb 100644
80000--- a/include/linux/atomic.h
80001+++ b/include/linux/atomic.h
80002@@ -12,7 +12,7 @@
80003 * Atomically adds @a to @v, so long as @v was not already @u.
80004 * Returns non-zero if @v was not @u, and zero otherwise.
80005 */
80006-static inline int atomic_add_unless(atomic_t *v, int a, int u)
80007+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
80008 {
80009 return __atomic_add_unless(v, a, u) != u;
80010 }
80011diff --git a/include/linux/audit.h b/include/linux/audit.h
80012index af84234..4177a40 100644
80013--- a/include/linux/audit.h
80014+++ b/include/linux/audit.h
80015@@ -225,7 +225,7 @@ static inline void audit_ptrace(struct task_struct *t)
80016 extern unsigned int audit_serial(void);
80017 extern int auditsc_get_stamp(struct audit_context *ctx,
80018 struct timespec *t, unsigned int *serial);
80019-extern int audit_set_loginuid(kuid_t loginuid);
80020+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
80021
80022 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
80023 {
80024diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
80025index 576e463..28fd926 100644
80026--- a/include/linux/binfmts.h
80027+++ b/include/linux/binfmts.h
80028@@ -44,7 +44,7 @@ struct linux_binprm {
80029 unsigned interp_flags;
80030 unsigned interp_data;
80031 unsigned long loader, exec;
80032-};
80033+} __randomize_layout;
80034
80035 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
80036 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
80037@@ -77,8 +77,10 @@ struct linux_binfmt {
80038 int (*load_binary)(struct linux_binprm *);
80039 int (*load_shlib)(struct file *);
80040 int (*core_dump)(struct coredump_params *cprm);
80041+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
80042+ void (*handle_mmap)(struct file *);
80043 unsigned long min_coredump; /* minimal dump size */
80044-};
80045+} __do_const __randomize_layout;
80046
80047 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
80048
80049diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
80050index 202e403..16e6617 100644
80051--- a/include/linux/bitmap.h
80052+++ b/include/linux/bitmap.h
80053@@ -302,7 +302,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
80054 return __bitmap_full(src, nbits);
80055 }
80056
80057-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
80058+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
80059 {
80060 if (small_const_nbits(nbits))
80061 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
80062diff --git a/include/linux/bitops.h b/include/linux/bitops.h
80063index 5d858e0..336c1d9 100644
80064--- a/include/linux/bitops.h
80065+++ b/include/linux/bitops.h
80066@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
80067 * @word: value to rotate
80068 * @shift: bits to roll
80069 */
80070-static inline __u32 rol32(__u32 word, unsigned int shift)
80071+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
80072 {
80073 return (word << shift) | (word >> (32 - shift));
80074 }
80075@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
80076 * @word: value to rotate
80077 * @shift: bits to roll
80078 */
80079-static inline __u32 ror32(__u32 word, unsigned int shift)
80080+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
80081 {
80082 return (word >> shift) | (word << (32 - shift));
80083 }
80084@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
80085 return (__s32)(value << shift) >> shift;
80086 }
80087
80088-static inline unsigned fls_long(unsigned long l)
80089+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
80090 {
80091 if (sizeof(l) == 4)
80092 return fls(l);
80093diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
80094index 92f4b4b..483d537 100644
80095--- a/include/linux/blkdev.h
80096+++ b/include/linux/blkdev.h
80097@@ -1613,7 +1613,7 @@ struct block_device_operations {
80098 /* this callback is with swap_lock and sometimes page table lock held */
80099 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
80100 struct module *owner;
80101-};
80102+} __do_const;
80103
80104 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
80105 unsigned long);
80106diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
80107index afc1343..9735539 100644
80108--- a/include/linux/blktrace_api.h
80109+++ b/include/linux/blktrace_api.h
80110@@ -25,7 +25,7 @@ struct blk_trace {
80111 struct dentry *dropped_file;
80112 struct dentry *msg_file;
80113 struct list_head running_list;
80114- atomic_t dropped;
80115+ atomic_unchecked_t dropped;
80116 };
80117
80118 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
80119diff --git a/include/linux/cache.h b/include/linux/cache.h
80120index 17e7e82..1d7da26 100644
80121--- a/include/linux/cache.h
80122+++ b/include/linux/cache.h
80123@@ -16,6 +16,14 @@
80124 #define __read_mostly
80125 #endif
80126
80127+#ifndef __read_only
80128+#ifdef CONFIG_PAX_KERNEXEC
80129+#error KERNEXEC requires __read_only
80130+#else
80131+#define __read_only __read_mostly
80132+#endif
80133+#endif
80134+
80135 #ifndef ____cacheline_aligned
80136 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
80137 #endif
80138diff --git a/include/linux/capability.h b/include/linux/capability.h
80139index aa93e5e..985a1b0 100644
80140--- a/include/linux/capability.h
80141+++ b/include/linux/capability.h
80142@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
80143 extern bool capable(int cap);
80144 extern bool ns_capable(struct user_namespace *ns, int cap);
80145 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
80146+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
80147 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
80148+extern bool capable_nolog(int cap);
80149+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
80150
80151 /* audit system wants to get cap info from files as well */
80152 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
80153
80154+extern int is_privileged_binary(const struct dentry *dentry);
80155+
80156 #endif /* !_LINUX_CAPABILITY_H */
80157diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
80158index 8609d57..86e4d79 100644
80159--- a/include/linux/cdrom.h
80160+++ b/include/linux/cdrom.h
80161@@ -87,7 +87,6 @@ struct cdrom_device_ops {
80162
80163 /* driver specifications */
80164 const int capability; /* capability flags */
80165- int n_minors; /* number of active minor devices */
80166 /* handle uniform packets for scsi type devices (scsi,atapi) */
80167 int (*generic_packet) (struct cdrom_device_info *,
80168 struct packet_command *);
80169diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
80170index 4ce9056..86caac6 100644
80171--- a/include/linux/cleancache.h
80172+++ b/include/linux/cleancache.h
80173@@ -31,7 +31,7 @@ struct cleancache_ops {
80174 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
80175 void (*invalidate_inode)(int, struct cleancache_filekey);
80176 void (*invalidate_fs)(int);
80177-};
80178+} __no_const;
80179
80180 extern struct cleancache_ops *
80181 cleancache_register_ops(struct cleancache_ops *ops);
80182diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
80183index d936409..ce9f842 100644
80184--- a/include/linux/clk-provider.h
80185+++ b/include/linux/clk-provider.h
80186@@ -191,6 +191,7 @@ struct clk_ops {
80187 void (*init)(struct clk_hw *hw);
80188 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
80189 };
80190+typedef struct clk_ops __no_const clk_ops_no_const;
80191
80192 /**
80193 * struct clk_init_data - holds init data that's common to all clocks and is
80194diff --git a/include/linux/compat.h b/include/linux/compat.h
80195index 7450ca2..a824b81 100644
80196--- a/include/linux/compat.h
80197+++ b/include/linux/compat.h
80198@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
80199 compat_size_t __user *len_ptr);
80200
80201 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
80202-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
80203+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
80204 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
80205 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
80206 compat_ssize_t msgsz, int msgflg);
80207@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
80208 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80209 compat_ulong_t addr, compat_ulong_t data);
80210 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
80211- compat_long_t addr, compat_long_t data);
80212+ compat_ulong_t addr, compat_ulong_t data);
80213
80214 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
80215 /*
80216diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
80217index d1a5582..4424efa 100644
80218--- a/include/linux/compiler-gcc4.h
80219+++ b/include/linux/compiler-gcc4.h
80220@@ -39,9 +39,34 @@
80221 # define __compiletime_warning(message) __attribute__((warning(message)))
80222 # define __compiletime_error(message) __attribute__((error(message)))
80223 #endif /* __CHECKER__ */
80224+
80225+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80226+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80227+#define __bos0(ptr) __bos((ptr), 0)
80228+#define __bos1(ptr) __bos((ptr), 1)
80229 #endif /* GCC_VERSION >= 40300 */
80230
80231 #if GCC_VERSION >= 40500
80232+
80233+#ifdef RANDSTRUCT_PLUGIN
80234+#define __randomize_layout __attribute__((randomize_layout))
80235+#define __no_randomize_layout __attribute__((no_randomize_layout))
80236+#endif
80237+
80238+#ifdef CONSTIFY_PLUGIN
80239+#define __no_const __attribute__((no_const))
80240+#define __do_const __attribute__((do_const))
80241+#endif
80242+
80243+#ifdef SIZE_OVERFLOW_PLUGIN
80244+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80245+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80246+#endif
80247+
80248+#ifdef LATENT_ENTROPY_PLUGIN
80249+#define __latent_entropy __attribute__((latent_entropy))
80250+#endif
80251+
80252 /*
80253 * Mark a position in code as unreachable. This can be used to
80254 * suppress control flow warnings after asm blocks that transfer
80255diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
80256index c8c5659..d09f2ad 100644
80257--- a/include/linux/compiler-gcc5.h
80258+++ b/include/linux/compiler-gcc5.h
80259@@ -28,6 +28,28 @@
80260 # define __compiletime_error(message) __attribute__((error(message)))
80261 #endif /* __CHECKER__ */
80262
80263+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80264+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80265+#define __bos0(ptr) __bos((ptr), 0)
80266+#define __bos1(ptr) __bos((ptr), 1)
80267+
80268+#ifdef CONSTIFY_PLUGIN
80269+#error not yet
80270+#define __no_const __attribute__((no_const))
80271+#define __do_const __attribute__((do_const))
80272+#endif
80273+
80274+#ifdef SIZE_OVERFLOW_PLUGIN
80275+#error not yet
80276+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80277+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80278+#endif
80279+
80280+#ifdef LATENT_ENTROPY_PLUGIN
80281+#error not yet
80282+#define __latent_entropy __attribute__((latent_entropy))
80283+#endif
80284+
80285 /*
80286 * Mark a position in code as unreachable. This can be used to
80287 * suppress control flow warnings after asm blocks that transfer
80288diff --git a/include/linux/compiler.h b/include/linux/compiler.h
80289index fa6a314..752a6ef 100644
80290--- a/include/linux/compiler.h
80291+++ b/include/linux/compiler.h
80292@@ -5,11 +5,14 @@
80293
80294 #ifdef __CHECKER__
80295 # define __user __attribute__((noderef, address_space(1)))
80296+# define __force_user __force __user
80297 # define __kernel __attribute__((address_space(0)))
80298+# define __force_kernel __force __kernel
80299 # define __safe __attribute__((safe))
80300 # define __force __attribute__((force))
80301 # define __nocast __attribute__((nocast))
80302 # define __iomem __attribute__((noderef, address_space(2)))
80303+# define __force_iomem __force __iomem
80304 # define __must_hold(x) __attribute__((context(x,1,1)))
80305 # define __acquires(x) __attribute__((context(x,0,1)))
80306 # define __releases(x) __attribute__((context(x,1,0)))
80307@@ -17,20 +20,37 @@
80308 # define __release(x) __context__(x,-1)
80309 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
80310 # define __percpu __attribute__((noderef, address_space(3)))
80311+# define __force_percpu __force __percpu
80312 #ifdef CONFIG_SPARSE_RCU_POINTER
80313 # define __rcu __attribute__((noderef, address_space(4)))
80314+# define __force_rcu __force __rcu
80315 #else
80316 # define __rcu
80317+# define __force_rcu
80318 #endif
80319 extern void __chk_user_ptr(const volatile void __user *);
80320 extern void __chk_io_ptr(const volatile void __iomem *);
80321 #else
80322-# define __user
80323-# define __kernel
80324+# ifdef CHECKER_PLUGIN
80325+//# define __user
80326+//# define __force_user
80327+//# define __kernel
80328+//# define __force_kernel
80329+# else
80330+# ifdef STRUCTLEAK_PLUGIN
80331+# define __user __attribute__((user))
80332+# else
80333+# define __user
80334+# endif
80335+# define __force_user
80336+# define __kernel
80337+# define __force_kernel
80338+# endif
80339 # define __safe
80340 # define __force
80341 # define __nocast
80342 # define __iomem
80343+# define __force_iomem
80344 # define __chk_user_ptr(x) (void)0
80345 # define __chk_io_ptr(x) (void)0
80346 # define __builtin_warning(x, y...) (1)
80347@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
80348 # define __release(x) (void)0
80349 # define __cond_lock(x,c) (c)
80350 # define __percpu
80351+# define __force_percpu
80352 # define __rcu
80353+# define __force_rcu
80354 #endif
80355
80356 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
80357@@ -201,32 +223,32 @@ static __always_inline void data_access_exceeds_word_size(void)
80358 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
80359 {
80360 switch (size) {
80361- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
80362- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
80363- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
80364+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
80365+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
80366+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
80367 #ifdef CONFIG_64BIT
80368- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
80369+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
80370 #endif
80371 default:
80372 barrier();
80373- __builtin_memcpy((void *)res, (const void *)p, size);
80374+ __builtin_memcpy(res, (const void *)p, size);
80375 data_access_exceeds_word_size();
80376 barrier();
80377 }
80378 }
80379
80380-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
80381+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
80382 {
80383 switch (size) {
80384- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
80385- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
80386- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
80387+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
80388+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
80389+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
80390 #ifdef CONFIG_64BIT
80391- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
80392+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
80393 #endif
80394 default:
80395 barrier();
80396- __builtin_memcpy((void *)p, (const void *)res, size);
80397+ __builtin_memcpy((void *)p, res, size);
80398 data_access_exceeds_word_size();
80399 barrier();
80400 }
80401@@ -360,6 +382,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80402 # define __attribute_const__ /* unimplemented */
80403 #endif
80404
80405+#ifndef __randomize_layout
80406+# define __randomize_layout
80407+#endif
80408+
80409+#ifndef __no_randomize_layout
80410+# define __no_randomize_layout
80411+#endif
80412+
80413+#ifndef __no_const
80414+# define __no_const
80415+#endif
80416+
80417+#ifndef __do_const
80418+# define __do_const
80419+#endif
80420+
80421+#ifndef __size_overflow
80422+# define __size_overflow(...)
80423+#endif
80424+
80425+#ifndef __intentional_overflow
80426+# define __intentional_overflow(...)
80427+#endif
80428+
80429+#ifndef __latent_entropy
80430+# define __latent_entropy
80431+#endif
80432+
80433 /*
80434 * Tell gcc if a function is cold. The compiler will assume any path
80435 * directly leading to the call is unlikely.
80436@@ -369,6 +419,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80437 #define __cold
80438 #endif
80439
80440+#ifndef __alloc_size
80441+#define __alloc_size(...)
80442+#endif
80443+
80444+#ifndef __bos
80445+#define __bos(ptr, arg)
80446+#endif
80447+
80448+#ifndef __bos0
80449+#define __bos0(ptr)
80450+#endif
80451+
80452+#ifndef __bos1
80453+#define __bos1(ptr)
80454+#endif
80455+
80456 /* Simple shorthand for a section definition */
80457 #ifndef __section
80458 # define __section(S) __attribute__ ((__section__(#S)))
80459@@ -462,8 +528,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80460 */
80461 #define __ACCESS_ONCE(x) ({ \
80462 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
80463- (volatile typeof(x) *)&(x); })
80464+ (volatile const typeof(x) *)&(x); })
80465 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
80466+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
80467
80468 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
80469 #ifdef CONFIG_KPROBES
80470diff --git a/include/linux/completion.h b/include/linux/completion.h
80471index 5d5aaae..0ea9b84 100644
80472--- a/include/linux/completion.h
80473+++ b/include/linux/completion.h
80474@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
80475
80476 extern void wait_for_completion(struct completion *);
80477 extern void wait_for_completion_io(struct completion *);
80478-extern int wait_for_completion_interruptible(struct completion *x);
80479-extern int wait_for_completion_killable(struct completion *x);
80480+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
80481+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
80482 extern unsigned long wait_for_completion_timeout(struct completion *x,
80483- unsigned long timeout);
80484+ unsigned long timeout) __intentional_overflow(-1);
80485 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
80486- unsigned long timeout);
80487+ unsigned long timeout) __intentional_overflow(-1);
80488 extern long wait_for_completion_interruptible_timeout(
80489- struct completion *x, unsigned long timeout);
80490+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80491 extern long wait_for_completion_killable_timeout(
80492- struct completion *x, unsigned long timeout);
80493+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80494 extern bool try_wait_for_completion(struct completion *x);
80495 extern bool completion_done(struct completion *x);
80496
80497diff --git a/include/linux/configfs.h b/include/linux/configfs.h
80498index 34025df..d94bbbc 100644
80499--- a/include/linux/configfs.h
80500+++ b/include/linux/configfs.h
80501@@ -125,7 +125,7 @@ struct configfs_attribute {
80502 const char *ca_name;
80503 struct module *ca_owner;
80504 umode_t ca_mode;
80505-};
80506+} __do_const;
80507
80508 /*
80509 * Users often need to create attribute structures for their configurable
80510diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
80511index 4d078ce..c970f4d 100644
80512--- a/include/linux/cpufreq.h
80513+++ b/include/linux/cpufreq.h
80514@@ -206,6 +206,7 @@ struct global_attr {
80515 ssize_t (*store)(struct kobject *a, struct attribute *b,
80516 const char *c, size_t count);
80517 };
80518+typedef struct global_attr __no_const global_attr_no_const;
80519
80520 #define define_one_global_ro(_name) \
80521 static struct global_attr _name = \
80522@@ -277,7 +278,7 @@ struct cpufreq_driver {
80523 bool boost_supported;
80524 bool boost_enabled;
80525 int (*set_boost)(int state);
80526-};
80527+} __do_const;
80528
80529 /* flags */
80530 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
80531diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
80532index ab70f3b..3ef7771 100644
80533--- a/include/linux/cpuidle.h
80534+++ b/include/linux/cpuidle.h
80535@@ -50,7 +50,8 @@ struct cpuidle_state {
80536 int index);
80537
80538 int (*enter_dead) (struct cpuidle_device *dev, int index);
80539-};
80540+} __do_const;
80541+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
80542
80543 /* Idle State Flags */
80544 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
80545@@ -206,7 +207,7 @@ struct cpuidle_governor {
80546 void (*reflect) (struct cpuidle_device *dev, int index);
80547
80548 struct module *owner;
80549-};
80550+} __do_const;
80551
80552 #ifdef CONFIG_CPU_IDLE
80553 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
80554diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
80555index b950e9d..63810aa 100644
80556--- a/include/linux/cpumask.h
80557+++ b/include/linux/cpumask.h
80558@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80559 }
80560
80561 /* Valid inputs for n are -1 and 0. */
80562-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80563+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80564 {
80565 return n+1;
80566 }
80567
80568-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80569+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80570 {
80571 return n+1;
80572 }
80573
80574-static inline unsigned int cpumask_next_and(int n,
80575+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
80576 const struct cpumask *srcp,
80577 const struct cpumask *andp)
80578 {
80579@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80580 *
80581 * Returns >= nr_cpu_ids if no further cpus set.
80582 */
80583-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80584+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80585 {
80586 /* -1 is a legal arg here. */
80587 if (n != -1)
80588@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80589 *
80590 * Returns >= nr_cpu_ids if no further cpus unset.
80591 */
80592-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80593+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80594 {
80595 /* -1 is a legal arg here. */
80596 if (n != -1)
80597@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80598 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
80599 }
80600
80601-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
80602+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
80603 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
80604 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
80605
80606@@ -464,7 +464,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
80607 * cpumask_weight - Count of bits in *srcp
80608 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
80609 */
80610-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
80611+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
80612 {
80613 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
80614 }
80615diff --git a/include/linux/cred.h b/include/linux/cred.h
80616index 2fb2ca2..d6a3340 100644
80617--- a/include/linux/cred.h
80618+++ b/include/linux/cred.h
80619@@ -35,7 +35,7 @@ struct group_info {
80620 int nblocks;
80621 kgid_t small_block[NGROUPS_SMALL];
80622 kgid_t *blocks[0];
80623-};
80624+} __randomize_layout;
80625
80626 /**
80627 * get_group_info - Get a reference to a group info structure
80628@@ -137,7 +137,7 @@ struct cred {
80629 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
80630 struct group_info *group_info; /* supplementary groups for euid/fsgid */
80631 struct rcu_head rcu; /* RCU deletion hook */
80632-};
80633+} __randomize_layout;
80634
80635 extern void __put_cred(struct cred *);
80636 extern void exit_creds(struct task_struct *);
80637@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
80638 static inline void validate_process_creds(void)
80639 {
80640 }
80641+static inline void validate_task_creds(struct task_struct *task)
80642+{
80643+}
80644 #endif
80645
80646 /**
80647@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
80648
80649 #define task_uid(task) (task_cred_xxx((task), uid))
80650 #define task_euid(task) (task_cred_xxx((task), euid))
80651+#define task_securebits(task) (task_cred_xxx((task), securebits))
80652
80653 #define current_cred_xxx(xxx) \
80654 ({ \
80655diff --git a/include/linux/crypto.h b/include/linux/crypto.h
80656index 9c8776d..8c526c2 100644
80657--- a/include/linux/crypto.h
80658+++ b/include/linux/crypto.h
80659@@ -626,7 +626,7 @@ struct cipher_tfm {
80660 const u8 *key, unsigned int keylen);
80661 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80662 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80663-};
80664+} __no_const;
80665
80666 struct hash_tfm {
80667 int (*init)(struct hash_desc *desc);
80668@@ -647,13 +647,13 @@ struct compress_tfm {
80669 int (*cot_decompress)(struct crypto_tfm *tfm,
80670 const u8 *src, unsigned int slen,
80671 u8 *dst, unsigned int *dlen);
80672-};
80673+} __no_const;
80674
80675 struct rng_tfm {
80676 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
80677 unsigned int dlen);
80678 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
80679-};
80680+} __no_const;
80681
80682 #define crt_ablkcipher crt_u.ablkcipher
80683 #define crt_aead crt_u.aead
80684diff --git a/include/linux/ctype.h b/include/linux/ctype.h
80685index 653589e..4ef254a 100644
80686--- a/include/linux/ctype.h
80687+++ b/include/linux/ctype.h
80688@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
80689 * Fast implementation of tolower() for internal usage. Do not use in your
80690 * code.
80691 */
80692-static inline char _tolower(const char c)
80693+static inline unsigned char _tolower(const unsigned char c)
80694 {
80695 return c | 0x20;
80696 }
80697diff --git a/include/linux/dcache.h b/include/linux/dcache.h
80698index 5a81398..6bbee30 100644
80699--- a/include/linux/dcache.h
80700+++ b/include/linux/dcache.h
80701@@ -123,6 +123,9 @@ struct dentry {
80702 unsigned long d_time; /* used by d_revalidate */
80703 void *d_fsdata; /* fs-specific data */
80704
80705+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
80706+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
80707+#endif
80708 struct list_head d_lru; /* LRU list */
80709 struct list_head d_child; /* child of parent list */
80710 struct list_head d_subdirs; /* our children */
80711@@ -133,7 +136,7 @@ struct dentry {
80712 struct hlist_node d_alias; /* inode alias list */
80713 struct rcu_head d_rcu;
80714 } d_u;
80715-};
80716+} __randomize_layout;
80717
80718 /*
80719 * dentry->d_lock spinlock nesting subclasses:
80720diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
80721index 7925bf0..d5143d2 100644
80722--- a/include/linux/decompress/mm.h
80723+++ b/include/linux/decompress/mm.h
80724@@ -77,7 +77,7 @@ static void free(void *where)
80725 * warnings when not needed (indeed large_malloc / large_free are not
80726 * needed by inflate */
80727
80728-#define malloc(a) kmalloc(a, GFP_KERNEL)
80729+#define malloc(a) kmalloc((a), GFP_KERNEL)
80730 #define free(a) kfree(a)
80731
80732 #define large_malloc(a) vmalloc(a)
80733diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
80734index ce447f0..83c66bd 100644
80735--- a/include/linux/devfreq.h
80736+++ b/include/linux/devfreq.h
80737@@ -114,7 +114,7 @@ struct devfreq_governor {
80738 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
80739 int (*event_handler)(struct devfreq *devfreq,
80740 unsigned int event, void *data);
80741-};
80742+} __do_const;
80743
80744 /**
80745 * struct devfreq - Device devfreq structure
80746diff --git a/include/linux/device.h b/include/linux/device.h
80747index fb50673..ec0b35b 100644
80748--- a/include/linux/device.h
80749+++ b/include/linux/device.h
80750@@ -311,7 +311,7 @@ struct subsys_interface {
80751 struct list_head node;
80752 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
80753 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
80754-};
80755+} __do_const;
80756
80757 int subsys_interface_register(struct subsys_interface *sif);
80758 void subsys_interface_unregister(struct subsys_interface *sif);
80759@@ -507,7 +507,7 @@ struct device_type {
80760 void (*release)(struct device *dev);
80761
80762 const struct dev_pm_ops *pm;
80763-};
80764+} __do_const;
80765
80766 /* interface for exporting device attributes */
80767 struct device_attribute {
80768@@ -517,11 +517,12 @@ struct device_attribute {
80769 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
80770 const char *buf, size_t count);
80771 };
80772+typedef struct device_attribute __no_const device_attribute_no_const;
80773
80774 struct dev_ext_attribute {
80775 struct device_attribute attr;
80776 void *var;
80777-};
80778+} __do_const;
80779
80780 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
80781 char *buf);
80782diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
80783index c3007cb..43efc8c 100644
80784--- a/include/linux/dma-mapping.h
80785+++ b/include/linux/dma-mapping.h
80786@@ -60,7 +60,7 @@ struct dma_map_ops {
80787 u64 (*get_required_mask)(struct device *dev);
80788 #endif
80789 int is_phys;
80790-};
80791+} __do_const;
80792
80793 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
80794
80795diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
80796index 40cd75e..38572a9 100644
80797--- a/include/linux/dmaengine.h
80798+++ b/include/linux/dmaengine.h
80799@@ -1137,9 +1137,9 @@ struct dma_pinned_list {
80800 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
80801 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
80802
80803-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80804+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80805 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
80806-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80807+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80808 struct dma_pinned_list *pinned_list, struct page *page,
80809 unsigned int offset, size_t len);
80810
80811diff --git a/include/linux/efi.h b/include/linux/efi.h
80812index 0238d61..34a758f 100644
80813--- a/include/linux/efi.h
80814+++ b/include/linux/efi.h
80815@@ -1054,6 +1054,7 @@ struct efivar_operations {
80816 efi_set_variable_nonblocking_t *set_variable_nonblocking;
80817 efi_query_variable_store_t *query_variable_store;
80818 };
80819+typedef struct efivar_operations __no_const efivar_operations_no_const;
80820
80821 struct efivars {
80822 /*
80823diff --git a/include/linux/elf.h b/include/linux/elf.h
80824index 20fa8d8..3d0dd18 100644
80825--- a/include/linux/elf.h
80826+++ b/include/linux/elf.h
80827@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
80828 #define elf_note elf32_note
80829 #define elf_addr_t Elf32_Off
80830 #define Elf_Half Elf32_Half
80831+#define elf_dyn Elf32_Dyn
80832
80833 #else
80834
80835@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
80836 #define elf_note elf64_note
80837 #define elf_addr_t Elf64_Off
80838 #define Elf_Half Elf64_Half
80839+#define elf_dyn Elf64_Dyn
80840
80841 #endif
80842
80843diff --git a/include/linux/err.h b/include/linux/err.h
80844index a729120..6ede2c9 100644
80845--- a/include/linux/err.h
80846+++ b/include/linux/err.h
80847@@ -20,12 +20,12 @@
80848
80849 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
80850
80851-static inline void * __must_check ERR_PTR(long error)
80852+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
80853 {
80854 return (void *) error;
80855 }
80856
80857-static inline long __must_check PTR_ERR(__force const void *ptr)
80858+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
80859 {
80860 return (long) ptr;
80861 }
80862diff --git a/include/linux/extcon.h b/include/linux/extcon.h
80863index 36f49c4..a2a1f4c 100644
80864--- a/include/linux/extcon.h
80865+++ b/include/linux/extcon.h
80866@@ -135,7 +135,7 @@ struct extcon_dev {
80867 /* /sys/class/extcon/.../mutually_exclusive/... */
80868 struct attribute_group attr_g_muex;
80869 struct attribute **attrs_muex;
80870- struct device_attribute *d_attrs_muex;
80871+ device_attribute_no_const *d_attrs_muex;
80872 };
80873
80874 /**
80875diff --git a/include/linux/fb.h b/include/linux/fb.h
80876index 09bb7a1..d98870a 100644
80877--- a/include/linux/fb.h
80878+++ b/include/linux/fb.h
80879@@ -305,7 +305,7 @@ struct fb_ops {
80880 /* called at KDB enter and leave time to prepare the console */
80881 int (*fb_debug_enter)(struct fb_info *info);
80882 int (*fb_debug_leave)(struct fb_info *info);
80883-};
80884+} __do_const;
80885
80886 #ifdef CONFIG_FB_TILEBLITTING
80887 #define FB_TILE_CURSOR_NONE 0
80888diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
80889index 230f87b..1fd0485 100644
80890--- a/include/linux/fdtable.h
80891+++ b/include/linux/fdtable.h
80892@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
80893 void put_files_struct(struct files_struct *fs);
80894 void reset_files_struct(struct files_struct *);
80895 int unshare_files(struct files_struct **);
80896-struct files_struct *dup_fd(struct files_struct *, int *);
80897+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
80898 void do_close_on_exec(struct files_struct *);
80899 int iterate_fd(struct files_struct *, unsigned,
80900 int (*)(const void *, struct file *, unsigned),
80901diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
80902index 8293262..2b3b8bd 100644
80903--- a/include/linux/frontswap.h
80904+++ b/include/linux/frontswap.h
80905@@ -11,7 +11,7 @@ struct frontswap_ops {
80906 int (*load)(unsigned, pgoff_t, struct page *);
80907 void (*invalidate_page)(unsigned, pgoff_t);
80908 void (*invalidate_area)(unsigned);
80909-};
80910+} __no_const;
80911
80912 extern bool frontswap_enabled;
80913 extern struct frontswap_ops *
80914diff --git a/include/linux/fs.h b/include/linux/fs.h
80915index 42efe13..72d42ee 100644
80916--- a/include/linux/fs.h
80917+++ b/include/linux/fs.h
80918@@ -413,7 +413,7 @@ struct address_space {
80919 spinlock_t private_lock; /* for use by the address_space */
80920 struct list_head private_list; /* ditto */
80921 void *private_data; /* ditto */
80922-} __attribute__((aligned(sizeof(long))));
80923+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
80924 /*
80925 * On most architectures that alignment is already the case; but
80926 * must be enforced here for CRIS, to let the least significant bit
80927@@ -456,7 +456,7 @@ struct block_device {
80928 int bd_fsfreeze_count;
80929 /* Mutex for freeze */
80930 struct mutex bd_fsfreeze_mutex;
80931-};
80932+} __randomize_layout;
80933
80934 /*
80935 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
80936@@ -642,7 +642,7 @@ struct inode {
80937 #endif
80938
80939 void *i_private; /* fs or device private pointer */
80940-};
80941+} __randomize_layout;
80942
80943 static inline int inode_unhashed(struct inode *inode)
80944 {
80945@@ -837,7 +837,7 @@ struct file {
80946 struct list_head f_tfile_llink;
80947 #endif /* #ifdef CONFIG_EPOLL */
80948 struct address_space *f_mapping;
80949-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
80950+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
80951
80952 struct file_handle {
80953 __u32 handle_bytes;
80954@@ -962,7 +962,7 @@ struct file_lock {
80955 int state; /* state of grant or error if -ve */
80956 } afs;
80957 } fl_u;
80958-};
80959+} __randomize_layout;
80960
80961 /* The following constant reflects the upper bound of the file/locking space */
80962 #ifndef OFFSET_MAX
80963@@ -1305,7 +1305,7 @@ struct super_block {
80964 * Indicates how deep in a filesystem stack this SB is
80965 */
80966 int s_stack_depth;
80967-};
80968+} __randomize_layout;
80969
80970 extern struct timespec current_fs_time(struct super_block *sb);
80971
80972@@ -1536,7 +1536,8 @@ struct file_operations {
80973 long (*fallocate)(struct file *file, int mode, loff_t offset,
80974 loff_t len);
80975 void (*show_fdinfo)(struct seq_file *m, struct file *f);
80976-};
80977+} __do_const __randomize_layout;
80978+typedef struct file_operations __no_const file_operations_no_const;
80979
80980 struct inode_operations {
80981 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
80982@@ -2854,4 +2855,14 @@ static inline bool dir_relax(struct inode *inode)
80983 return !IS_DEADDIR(inode);
80984 }
80985
80986+static inline bool is_sidechannel_device(const struct inode *inode)
80987+{
80988+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
80989+ umode_t mode = inode->i_mode;
80990+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
80991+#else
80992+ return false;
80993+#endif
80994+}
80995+
80996 #endif /* _LINUX_FS_H */
80997diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
80998index 0efc3e6..fd23610 100644
80999--- a/include/linux/fs_struct.h
81000+++ b/include/linux/fs_struct.h
81001@@ -6,13 +6,13 @@
81002 #include <linux/seqlock.h>
81003
81004 struct fs_struct {
81005- int users;
81006+ atomic_t users;
81007 spinlock_t lock;
81008 seqcount_t seq;
81009 int umask;
81010 int in_exec;
81011 struct path root, pwd;
81012-};
81013+} __randomize_layout;
81014
81015 extern struct kmem_cache *fs_cachep;
81016
81017diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
81018index 7714849..a4a5c7a 100644
81019--- a/include/linux/fscache-cache.h
81020+++ b/include/linux/fscache-cache.h
81021@@ -113,7 +113,7 @@ struct fscache_operation {
81022 fscache_operation_release_t release;
81023 };
81024
81025-extern atomic_t fscache_op_debug_id;
81026+extern atomic_unchecked_t fscache_op_debug_id;
81027 extern void fscache_op_work_func(struct work_struct *work);
81028
81029 extern void fscache_enqueue_operation(struct fscache_operation *);
81030@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
81031 INIT_WORK(&op->work, fscache_op_work_func);
81032 atomic_set(&op->usage, 1);
81033 op->state = FSCACHE_OP_ST_INITIALISED;
81034- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
81035+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
81036 op->processor = processor;
81037 op->release = release;
81038 INIT_LIST_HEAD(&op->pend_link);
81039diff --git a/include/linux/fscache.h b/include/linux/fscache.h
81040index 115bb81..e7b812b 100644
81041--- a/include/linux/fscache.h
81042+++ b/include/linux/fscache.h
81043@@ -152,7 +152,7 @@ struct fscache_cookie_def {
81044 * - this is mandatory for any object that may have data
81045 */
81046 void (*now_uncached)(void *cookie_netfs_data);
81047-};
81048+} __do_const;
81049
81050 /*
81051 * fscache cached network filesystem type
81052diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
81053index 7ee1774..72505b8 100644
81054--- a/include/linux/fsnotify.h
81055+++ b/include/linux/fsnotify.h
81056@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
81057 struct inode *inode = file_inode(file);
81058 __u32 mask = FS_ACCESS;
81059
81060+ if (is_sidechannel_device(inode))
81061+ return;
81062+
81063 if (S_ISDIR(inode->i_mode))
81064 mask |= FS_ISDIR;
81065
81066@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
81067 struct inode *inode = file_inode(file);
81068 __u32 mask = FS_MODIFY;
81069
81070+ if (is_sidechannel_device(inode))
81071+ return;
81072+
81073 if (S_ISDIR(inode->i_mode))
81074 mask |= FS_ISDIR;
81075
81076@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
81077 */
81078 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
81079 {
81080- return kstrdup(name, GFP_KERNEL);
81081+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
81082 }
81083
81084 /*
81085diff --git a/include/linux/genhd.h b/include/linux/genhd.h
81086index ec274e0..e678159 100644
81087--- a/include/linux/genhd.h
81088+++ b/include/linux/genhd.h
81089@@ -194,7 +194,7 @@ struct gendisk {
81090 struct kobject *slave_dir;
81091
81092 struct timer_rand_state *random;
81093- atomic_t sync_io; /* RAID */
81094+ atomic_unchecked_t sync_io; /* RAID */
81095 struct disk_events *ev;
81096 #ifdef CONFIG_BLK_DEV_INTEGRITY
81097 struct blk_integrity *integrity;
81098@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
81099 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
81100
81101 /* drivers/char/random.c */
81102-extern void add_disk_randomness(struct gendisk *disk);
81103+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
81104 extern void rand_initialize_disk(struct gendisk *disk);
81105
81106 static inline sector_t get_start_sect(struct block_device *bdev)
81107diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
81108index 667c311..abac2a7 100644
81109--- a/include/linux/genl_magic_func.h
81110+++ b/include/linux/genl_magic_func.h
81111@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
81112 },
81113
81114 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
81115-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
81116+static struct genl_ops ZZZ_genl_ops[] = {
81117 #include GENL_MAGIC_INCLUDE_FILE
81118 };
81119
81120diff --git a/include/linux/gfp.h b/include/linux/gfp.h
81121index b840e3b..aeaeef9 100644
81122--- a/include/linux/gfp.h
81123+++ b/include/linux/gfp.h
81124@@ -34,6 +34,13 @@ struct vm_area_struct;
81125 #define ___GFP_NO_KSWAPD 0x400000u
81126 #define ___GFP_OTHER_NODE 0x800000u
81127 #define ___GFP_WRITE 0x1000000u
81128+
81129+#ifdef CONFIG_PAX_USERCOPY_SLABS
81130+#define ___GFP_USERCOPY 0x2000000u
81131+#else
81132+#define ___GFP_USERCOPY 0
81133+#endif
81134+
81135 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
81136
81137 /*
81138@@ -90,6 +97,7 @@ struct vm_area_struct;
81139 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
81140 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
81141 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
81142+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
81143
81144 /*
81145 * This may seem redundant, but it's a way of annotating false positives vs.
81146@@ -97,7 +105,7 @@ struct vm_area_struct;
81147 */
81148 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
81149
81150-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
81151+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
81152 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
81153
81154 /* This equals 0, but use constants in case they ever change */
81155@@ -152,6 +160,8 @@ struct vm_area_struct;
81156 /* 4GB DMA on some platforms */
81157 #define GFP_DMA32 __GFP_DMA32
81158
81159+#define GFP_USERCOPY __GFP_USERCOPY
81160+
81161 /* Convert GFP flags to their corresponding migrate type */
81162 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
81163 {
81164diff --git a/include/linux/gracl.h b/include/linux/gracl.h
81165new file mode 100644
81166index 0000000..91858e4
81167--- /dev/null
81168+++ b/include/linux/gracl.h
81169@@ -0,0 +1,342 @@
81170+#ifndef GR_ACL_H
81171+#define GR_ACL_H
81172+
81173+#include <linux/grdefs.h>
81174+#include <linux/resource.h>
81175+#include <linux/capability.h>
81176+#include <linux/dcache.h>
81177+#include <asm/resource.h>
81178+
81179+/* Major status information */
81180+
81181+#define GR_VERSION "grsecurity 3.1"
81182+#define GRSECURITY_VERSION 0x3100
81183+
81184+enum {
81185+ GR_SHUTDOWN = 0,
81186+ GR_ENABLE = 1,
81187+ GR_SPROLE = 2,
81188+ GR_OLDRELOAD = 3,
81189+ GR_SEGVMOD = 4,
81190+ GR_STATUS = 5,
81191+ GR_UNSPROLE = 6,
81192+ GR_PASSSET = 7,
81193+ GR_SPROLEPAM = 8,
81194+ GR_RELOAD = 9,
81195+};
81196+
81197+/* Password setup definitions
81198+ * kernel/grhash.c */
81199+enum {
81200+ GR_PW_LEN = 128,
81201+ GR_SALT_LEN = 16,
81202+ GR_SHA_LEN = 32,
81203+};
81204+
81205+enum {
81206+ GR_SPROLE_LEN = 64,
81207+};
81208+
81209+enum {
81210+ GR_NO_GLOB = 0,
81211+ GR_REG_GLOB,
81212+ GR_CREATE_GLOB
81213+};
81214+
81215+#define GR_NLIMITS 32
81216+
81217+/* Begin Data Structures */
81218+
81219+struct sprole_pw {
81220+ unsigned char *rolename;
81221+ unsigned char salt[GR_SALT_LEN];
81222+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
81223+};
81224+
81225+struct name_entry {
81226+ __u32 key;
81227+ u64 inode;
81228+ dev_t device;
81229+ char *name;
81230+ __u16 len;
81231+ __u8 deleted;
81232+ struct name_entry *prev;
81233+ struct name_entry *next;
81234+};
81235+
81236+struct inodev_entry {
81237+ struct name_entry *nentry;
81238+ struct inodev_entry *prev;
81239+ struct inodev_entry *next;
81240+};
81241+
81242+struct acl_role_db {
81243+ struct acl_role_label **r_hash;
81244+ __u32 r_size;
81245+};
81246+
81247+struct inodev_db {
81248+ struct inodev_entry **i_hash;
81249+ __u32 i_size;
81250+};
81251+
81252+struct name_db {
81253+ struct name_entry **n_hash;
81254+ __u32 n_size;
81255+};
81256+
81257+struct crash_uid {
81258+ uid_t uid;
81259+ unsigned long expires;
81260+};
81261+
81262+struct gr_hash_struct {
81263+ void **table;
81264+ void **nametable;
81265+ void *first;
81266+ __u32 table_size;
81267+ __u32 used_size;
81268+ int type;
81269+};
81270+
81271+/* Userspace Grsecurity ACL data structures */
81272+
81273+struct acl_subject_label {
81274+ char *filename;
81275+ u64 inode;
81276+ dev_t device;
81277+ __u32 mode;
81278+ kernel_cap_t cap_mask;
81279+ kernel_cap_t cap_lower;
81280+ kernel_cap_t cap_invert_audit;
81281+
81282+ struct rlimit res[GR_NLIMITS];
81283+ __u32 resmask;
81284+
81285+ __u8 user_trans_type;
81286+ __u8 group_trans_type;
81287+ uid_t *user_transitions;
81288+ gid_t *group_transitions;
81289+ __u16 user_trans_num;
81290+ __u16 group_trans_num;
81291+
81292+ __u32 sock_families[2];
81293+ __u32 ip_proto[8];
81294+ __u32 ip_type;
81295+ struct acl_ip_label **ips;
81296+ __u32 ip_num;
81297+ __u32 inaddr_any_override;
81298+
81299+ __u32 crashes;
81300+ unsigned long expires;
81301+
81302+ struct acl_subject_label *parent_subject;
81303+ struct gr_hash_struct *hash;
81304+ struct acl_subject_label *prev;
81305+ struct acl_subject_label *next;
81306+
81307+ struct acl_object_label **obj_hash;
81308+ __u32 obj_hash_size;
81309+ __u16 pax_flags;
81310+};
81311+
81312+struct role_allowed_ip {
81313+ __u32 addr;
81314+ __u32 netmask;
81315+
81316+ struct role_allowed_ip *prev;
81317+ struct role_allowed_ip *next;
81318+};
81319+
81320+struct role_transition {
81321+ char *rolename;
81322+
81323+ struct role_transition *prev;
81324+ struct role_transition *next;
81325+};
81326+
81327+struct acl_role_label {
81328+ char *rolename;
81329+ uid_t uidgid;
81330+ __u16 roletype;
81331+
81332+ __u16 auth_attempts;
81333+ unsigned long expires;
81334+
81335+ struct acl_subject_label *root_label;
81336+ struct gr_hash_struct *hash;
81337+
81338+ struct acl_role_label *prev;
81339+ struct acl_role_label *next;
81340+
81341+ struct role_transition *transitions;
81342+ struct role_allowed_ip *allowed_ips;
81343+ uid_t *domain_children;
81344+ __u16 domain_child_num;
81345+
81346+ umode_t umask;
81347+
81348+ struct acl_subject_label **subj_hash;
81349+ __u32 subj_hash_size;
81350+};
81351+
81352+struct user_acl_role_db {
81353+ struct acl_role_label **r_table;
81354+ __u32 num_pointers; /* Number of allocations to track */
81355+ __u32 num_roles; /* Number of roles */
81356+ __u32 num_domain_children; /* Number of domain children */
81357+ __u32 num_subjects; /* Number of subjects */
81358+ __u32 num_objects; /* Number of objects */
81359+};
81360+
81361+struct acl_object_label {
81362+ char *filename;
81363+ u64 inode;
81364+ dev_t device;
81365+ __u32 mode;
81366+
81367+ struct acl_subject_label *nested;
81368+ struct acl_object_label *globbed;
81369+
81370+ /* next two structures not used */
81371+
81372+ struct acl_object_label *prev;
81373+ struct acl_object_label *next;
81374+};
81375+
81376+struct acl_ip_label {
81377+ char *iface;
81378+ __u32 addr;
81379+ __u32 netmask;
81380+ __u16 low, high;
81381+ __u8 mode;
81382+ __u32 type;
81383+ __u32 proto[8];
81384+
81385+ /* next two structures not used */
81386+
81387+ struct acl_ip_label *prev;
81388+ struct acl_ip_label *next;
81389+};
81390+
81391+struct gr_arg {
81392+ struct user_acl_role_db role_db;
81393+ unsigned char pw[GR_PW_LEN];
81394+ unsigned char salt[GR_SALT_LEN];
81395+ unsigned char sum[GR_SHA_LEN];
81396+ unsigned char sp_role[GR_SPROLE_LEN];
81397+ struct sprole_pw *sprole_pws;
81398+ dev_t segv_device;
81399+ u64 segv_inode;
81400+ uid_t segv_uid;
81401+ __u16 num_sprole_pws;
81402+ __u16 mode;
81403+};
81404+
81405+struct gr_arg_wrapper {
81406+ struct gr_arg *arg;
81407+ __u32 version;
81408+ __u32 size;
81409+};
81410+
81411+struct subject_map {
81412+ struct acl_subject_label *user;
81413+ struct acl_subject_label *kernel;
81414+ struct subject_map *prev;
81415+ struct subject_map *next;
81416+};
81417+
81418+struct acl_subj_map_db {
81419+ struct subject_map **s_hash;
81420+ __u32 s_size;
81421+};
81422+
81423+struct gr_policy_state {
81424+ struct sprole_pw **acl_special_roles;
81425+ __u16 num_sprole_pws;
81426+ struct acl_role_label *kernel_role;
81427+ struct acl_role_label *role_list;
81428+ struct acl_role_label *default_role;
81429+ struct acl_role_db acl_role_set;
81430+ struct acl_subj_map_db subj_map_set;
81431+ struct name_db name_set;
81432+ struct inodev_db inodev_set;
81433+};
81434+
81435+struct gr_alloc_state {
81436+ unsigned long alloc_stack_next;
81437+ unsigned long alloc_stack_size;
81438+ void **alloc_stack;
81439+};
81440+
81441+struct gr_reload_state {
81442+ struct gr_policy_state oldpolicy;
81443+ struct gr_alloc_state oldalloc;
81444+ struct gr_policy_state newpolicy;
81445+ struct gr_alloc_state newalloc;
81446+ struct gr_policy_state *oldpolicy_ptr;
81447+ struct gr_alloc_state *oldalloc_ptr;
81448+ unsigned char oldmode;
81449+};
81450+
81451+/* End Data Structures Section */
81452+
81453+/* Hash functions generated by empirical testing by Brad Spengler
81454+ Makes good use of the low bits of the inode. Generally 0-1 times
81455+ in loop for successful match. 0-3 for unsuccessful match.
81456+ Shift/add algorithm with modulus of table size and an XOR*/
81457+
81458+static __inline__ unsigned int
81459+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
81460+{
81461+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
81462+}
81463+
81464+ static __inline__ unsigned int
81465+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
81466+{
81467+ return ((const unsigned long)userp % sz);
81468+}
81469+
81470+static __inline__ unsigned int
81471+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
81472+{
81473+ unsigned int rem;
81474+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
81475+ return rem;
81476+}
81477+
81478+static __inline__ unsigned int
81479+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
81480+{
81481+ return full_name_hash((const unsigned char *)name, len) % sz;
81482+}
81483+
81484+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
81485+ subj = NULL; \
81486+ iter = 0; \
81487+ while (iter < role->subj_hash_size) { \
81488+ if (subj == NULL) \
81489+ subj = role->subj_hash[iter]; \
81490+ if (subj == NULL) { \
81491+ iter++; \
81492+ continue; \
81493+ }
81494+
81495+#define FOR_EACH_SUBJECT_END(subj,iter) \
81496+ subj = subj->next; \
81497+ if (subj == NULL) \
81498+ iter++; \
81499+ }
81500+
81501+
81502+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
81503+ subj = role->hash->first; \
81504+ while (subj != NULL) {
81505+
81506+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
81507+ subj = subj->next; \
81508+ }
81509+
81510+#endif
81511+
81512diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
81513new file mode 100644
81514index 0000000..af64092
81515--- /dev/null
81516+++ b/include/linux/gracl_compat.h
81517@@ -0,0 +1,156 @@
81518+#ifndef GR_ACL_COMPAT_H
81519+#define GR_ACL_COMPAT_H
81520+
81521+#include <linux/resource.h>
81522+#include <asm/resource.h>
81523+
81524+struct sprole_pw_compat {
81525+ compat_uptr_t rolename;
81526+ unsigned char salt[GR_SALT_LEN];
81527+ unsigned char sum[GR_SHA_LEN];
81528+};
81529+
81530+struct gr_hash_struct_compat {
81531+ compat_uptr_t table;
81532+ compat_uptr_t nametable;
81533+ compat_uptr_t first;
81534+ __u32 table_size;
81535+ __u32 used_size;
81536+ int type;
81537+};
81538+
81539+struct acl_subject_label_compat {
81540+ compat_uptr_t filename;
81541+ compat_u64 inode;
81542+ __u32 device;
81543+ __u32 mode;
81544+ kernel_cap_t cap_mask;
81545+ kernel_cap_t cap_lower;
81546+ kernel_cap_t cap_invert_audit;
81547+
81548+ struct compat_rlimit res[GR_NLIMITS];
81549+ __u32 resmask;
81550+
81551+ __u8 user_trans_type;
81552+ __u8 group_trans_type;
81553+ compat_uptr_t user_transitions;
81554+ compat_uptr_t group_transitions;
81555+ __u16 user_trans_num;
81556+ __u16 group_trans_num;
81557+
81558+ __u32 sock_families[2];
81559+ __u32 ip_proto[8];
81560+ __u32 ip_type;
81561+ compat_uptr_t ips;
81562+ __u32 ip_num;
81563+ __u32 inaddr_any_override;
81564+
81565+ __u32 crashes;
81566+ compat_ulong_t expires;
81567+
81568+ compat_uptr_t parent_subject;
81569+ compat_uptr_t hash;
81570+ compat_uptr_t prev;
81571+ compat_uptr_t next;
81572+
81573+ compat_uptr_t obj_hash;
81574+ __u32 obj_hash_size;
81575+ __u16 pax_flags;
81576+};
81577+
81578+struct role_allowed_ip_compat {
81579+ __u32 addr;
81580+ __u32 netmask;
81581+
81582+ compat_uptr_t prev;
81583+ compat_uptr_t next;
81584+};
81585+
81586+struct role_transition_compat {
81587+ compat_uptr_t rolename;
81588+
81589+ compat_uptr_t prev;
81590+ compat_uptr_t next;
81591+};
81592+
81593+struct acl_role_label_compat {
81594+ compat_uptr_t rolename;
81595+ uid_t uidgid;
81596+ __u16 roletype;
81597+
81598+ __u16 auth_attempts;
81599+ compat_ulong_t expires;
81600+
81601+ compat_uptr_t root_label;
81602+ compat_uptr_t hash;
81603+
81604+ compat_uptr_t prev;
81605+ compat_uptr_t next;
81606+
81607+ compat_uptr_t transitions;
81608+ compat_uptr_t allowed_ips;
81609+ compat_uptr_t domain_children;
81610+ __u16 domain_child_num;
81611+
81612+ umode_t umask;
81613+
81614+ compat_uptr_t subj_hash;
81615+ __u32 subj_hash_size;
81616+};
81617+
81618+struct user_acl_role_db_compat {
81619+ compat_uptr_t r_table;
81620+ __u32 num_pointers;
81621+ __u32 num_roles;
81622+ __u32 num_domain_children;
81623+ __u32 num_subjects;
81624+ __u32 num_objects;
81625+};
81626+
81627+struct acl_object_label_compat {
81628+ compat_uptr_t filename;
81629+ compat_u64 inode;
81630+ __u32 device;
81631+ __u32 mode;
81632+
81633+ compat_uptr_t nested;
81634+ compat_uptr_t globbed;
81635+
81636+ compat_uptr_t prev;
81637+ compat_uptr_t next;
81638+};
81639+
81640+struct acl_ip_label_compat {
81641+ compat_uptr_t iface;
81642+ __u32 addr;
81643+ __u32 netmask;
81644+ __u16 low, high;
81645+ __u8 mode;
81646+ __u32 type;
81647+ __u32 proto[8];
81648+
81649+ compat_uptr_t prev;
81650+ compat_uptr_t next;
81651+};
81652+
81653+struct gr_arg_compat {
81654+ struct user_acl_role_db_compat role_db;
81655+ unsigned char pw[GR_PW_LEN];
81656+ unsigned char salt[GR_SALT_LEN];
81657+ unsigned char sum[GR_SHA_LEN];
81658+ unsigned char sp_role[GR_SPROLE_LEN];
81659+ compat_uptr_t sprole_pws;
81660+ __u32 segv_device;
81661+ compat_u64 segv_inode;
81662+ uid_t segv_uid;
81663+ __u16 num_sprole_pws;
81664+ __u16 mode;
81665+};
81666+
81667+struct gr_arg_wrapper_compat {
81668+ compat_uptr_t arg;
81669+ __u32 version;
81670+ __u32 size;
81671+};
81672+
81673+#endif
81674diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
81675new file mode 100644
81676index 0000000..323ecf2
81677--- /dev/null
81678+++ b/include/linux/gralloc.h
81679@@ -0,0 +1,9 @@
81680+#ifndef __GRALLOC_H
81681+#define __GRALLOC_H
81682+
81683+void acl_free_all(void);
81684+int acl_alloc_stack_init(unsigned long size);
81685+void *acl_alloc(unsigned long len);
81686+void *acl_alloc_num(unsigned long num, unsigned long len);
81687+
81688+#endif
81689diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
81690new file mode 100644
81691index 0000000..be66033
81692--- /dev/null
81693+++ b/include/linux/grdefs.h
81694@@ -0,0 +1,140 @@
81695+#ifndef GRDEFS_H
81696+#define GRDEFS_H
81697+
81698+/* Begin grsecurity status declarations */
81699+
81700+enum {
81701+ GR_READY = 0x01,
81702+ GR_STATUS_INIT = 0x00 // disabled state
81703+};
81704+
81705+/* Begin ACL declarations */
81706+
81707+/* Role flags */
81708+
81709+enum {
81710+ GR_ROLE_USER = 0x0001,
81711+ GR_ROLE_GROUP = 0x0002,
81712+ GR_ROLE_DEFAULT = 0x0004,
81713+ GR_ROLE_SPECIAL = 0x0008,
81714+ GR_ROLE_AUTH = 0x0010,
81715+ GR_ROLE_NOPW = 0x0020,
81716+ GR_ROLE_GOD = 0x0040,
81717+ GR_ROLE_LEARN = 0x0080,
81718+ GR_ROLE_TPE = 0x0100,
81719+ GR_ROLE_DOMAIN = 0x0200,
81720+ GR_ROLE_PAM = 0x0400,
81721+ GR_ROLE_PERSIST = 0x0800
81722+};
81723+
81724+/* ACL Subject and Object mode flags */
81725+enum {
81726+ GR_DELETED = 0x80000000
81727+};
81728+
81729+/* ACL Object-only mode flags */
81730+enum {
81731+ GR_READ = 0x00000001,
81732+ GR_APPEND = 0x00000002,
81733+ GR_WRITE = 0x00000004,
81734+ GR_EXEC = 0x00000008,
81735+ GR_FIND = 0x00000010,
81736+ GR_INHERIT = 0x00000020,
81737+ GR_SETID = 0x00000040,
81738+ GR_CREATE = 0x00000080,
81739+ GR_DELETE = 0x00000100,
81740+ GR_LINK = 0x00000200,
81741+ GR_AUDIT_READ = 0x00000400,
81742+ GR_AUDIT_APPEND = 0x00000800,
81743+ GR_AUDIT_WRITE = 0x00001000,
81744+ GR_AUDIT_EXEC = 0x00002000,
81745+ GR_AUDIT_FIND = 0x00004000,
81746+ GR_AUDIT_INHERIT= 0x00008000,
81747+ GR_AUDIT_SETID = 0x00010000,
81748+ GR_AUDIT_CREATE = 0x00020000,
81749+ GR_AUDIT_DELETE = 0x00040000,
81750+ GR_AUDIT_LINK = 0x00080000,
81751+ GR_PTRACERD = 0x00100000,
81752+ GR_NOPTRACE = 0x00200000,
81753+ GR_SUPPRESS = 0x00400000,
81754+ GR_NOLEARN = 0x00800000,
81755+ GR_INIT_TRANSFER= 0x01000000
81756+};
81757+
81758+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
81759+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
81760+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
81761+
81762+/* ACL subject-only mode flags */
81763+enum {
81764+ GR_KILL = 0x00000001,
81765+ GR_VIEW = 0x00000002,
81766+ GR_PROTECTED = 0x00000004,
81767+ GR_LEARN = 0x00000008,
81768+ GR_OVERRIDE = 0x00000010,
81769+ /* just a placeholder, this mode is only used in userspace */
81770+ GR_DUMMY = 0x00000020,
81771+ GR_PROTSHM = 0x00000040,
81772+ GR_KILLPROC = 0x00000080,
81773+ GR_KILLIPPROC = 0x00000100,
81774+ /* just a placeholder, this mode is only used in userspace */
81775+ GR_NOTROJAN = 0x00000200,
81776+ GR_PROTPROCFD = 0x00000400,
81777+ GR_PROCACCT = 0x00000800,
81778+ GR_RELAXPTRACE = 0x00001000,
81779+ //GR_NESTED = 0x00002000,
81780+ GR_INHERITLEARN = 0x00004000,
81781+ GR_PROCFIND = 0x00008000,
81782+ GR_POVERRIDE = 0x00010000,
81783+ GR_KERNELAUTH = 0x00020000,
81784+ GR_ATSECURE = 0x00040000,
81785+ GR_SHMEXEC = 0x00080000
81786+};
81787+
81788+enum {
81789+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
81790+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
81791+ GR_PAX_ENABLE_MPROTECT = 0x0004,
81792+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
81793+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
81794+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
81795+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
81796+ GR_PAX_DISABLE_MPROTECT = 0x0400,
81797+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
81798+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
81799+};
81800+
81801+enum {
81802+ GR_ID_USER = 0x01,
81803+ GR_ID_GROUP = 0x02,
81804+};
81805+
81806+enum {
81807+ GR_ID_ALLOW = 0x01,
81808+ GR_ID_DENY = 0x02,
81809+};
81810+
81811+#define GR_CRASH_RES 31
81812+#define GR_UIDTABLE_MAX 500
81813+
81814+/* begin resource learning section */
81815+enum {
81816+ GR_RLIM_CPU_BUMP = 60,
81817+ GR_RLIM_FSIZE_BUMP = 50000,
81818+ GR_RLIM_DATA_BUMP = 10000,
81819+ GR_RLIM_STACK_BUMP = 1000,
81820+ GR_RLIM_CORE_BUMP = 10000,
81821+ GR_RLIM_RSS_BUMP = 500000,
81822+ GR_RLIM_NPROC_BUMP = 1,
81823+ GR_RLIM_NOFILE_BUMP = 5,
81824+ GR_RLIM_MEMLOCK_BUMP = 50000,
81825+ GR_RLIM_AS_BUMP = 500000,
81826+ GR_RLIM_LOCKS_BUMP = 2,
81827+ GR_RLIM_SIGPENDING_BUMP = 5,
81828+ GR_RLIM_MSGQUEUE_BUMP = 10000,
81829+ GR_RLIM_NICE_BUMP = 1,
81830+ GR_RLIM_RTPRIO_BUMP = 1,
81831+ GR_RLIM_RTTIME_BUMP = 1000000
81832+};
81833+
81834+#endif
81835diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
81836new file mode 100644
81837index 0000000..fb1de5d
81838--- /dev/null
81839+++ b/include/linux/grinternal.h
81840@@ -0,0 +1,230 @@
81841+#ifndef __GRINTERNAL_H
81842+#define __GRINTERNAL_H
81843+
81844+#ifdef CONFIG_GRKERNSEC
81845+
81846+#include <linux/fs.h>
81847+#include <linux/mnt_namespace.h>
81848+#include <linux/nsproxy.h>
81849+#include <linux/gracl.h>
81850+#include <linux/grdefs.h>
81851+#include <linux/grmsg.h>
81852+
81853+void gr_add_learn_entry(const char *fmt, ...)
81854+ __attribute__ ((format (printf, 1, 2)));
81855+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
81856+ const struct vfsmount *mnt);
81857+__u32 gr_check_create(const struct dentry *new_dentry,
81858+ const struct dentry *parent,
81859+ const struct vfsmount *mnt, const __u32 mode);
81860+int gr_check_protected_task(const struct task_struct *task);
81861+__u32 to_gr_audit(const __u32 reqmode);
81862+int gr_set_acls(const int type);
81863+int gr_acl_is_enabled(void);
81864+char gr_roletype_to_char(void);
81865+
81866+void gr_handle_alertkill(struct task_struct *task);
81867+char *gr_to_filename(const struct dentry *dentry,
81868+ const struct vfsmount *mnt);
81869+char *gr_to_filename1(const struct dentry *dentry,
81870+ const struct vfsmount *mnt);
81871+char *gr_to_filename2(const struct dentry *dentry,
81872+ const struct vfsmount *mnt);
81873+char *gr_to_filename3(const struct dentry *dentry,
81874+ const struct vfsmount *mnt);
81875+
81876+extern int grsec_enable_ptrace_readexec;
81877+extern int grsec_enable_harden_ptrace;
81878+extern int grsec_enable_link;
81879+extern int grsec_enable_fifo;
81880+extern int grsec_enable_execve;
81881+extern int grsec_enable_shm;
81882+extern int grsec_enable_execlog;
81883+extern int grsec_enable_signal;
81884+extern int grsec_enable_audit_ptrace;
81885+extern int grsec_enable_forkfail;
81886+extern int grsec_enable_time;
81887+extern int grsec_enable_rofs;
81888+extern int grsec_deny_new_usb;
81889+extern int grsec_enable_chroot_shmat;
81890+extern int grsec_enable_chroot_mount;
81891+extern int grsec_enable_chroot_double;
81892+extern int grsec_enable_chroot_pivot;
81893+extern int grsec_enable_chroot_chdir;
81894+extern int grsec_enable_chroot_chmod;
81895+extern int grsec_enable_chroot_mknod;
81896+extern int grsec_enable_chroot_fchdir;
81897+extern int grsec_enable_chroot_nice;
81898+extern int grsec_enable_chroot_execlog;
81899+extern int grsec_enable_chroot_caps;
81900+extern int grsec_enable_chroot_rename;
81901+extern int grsec_enable_chroot_sysctl;
81902+extern int grsec_enable_chroot_unix;
81903+extern int grsec_enable_symlinkown;
81904+extern kgid_t grsec_symlinkown_gid;
81905+extern int grsec_enable_tpe;
81906+extern kgid_t grsec_tpe_gid;
81907+extern int grsec_enable_tpe_all;
81908+extern int grsec_enable_tpe_invert;
81909+extern int grsec_enable_socket_all;
81910+extern kgid_t grsec_socket_all_gid;
81911+extern int grsec_enable_socket_client;
81912+extern kgid_t grsec_socket_client_gid;
81913+extern int grsec_enable_socket_server;
81914+extern kgid_t grsec_socket_server_gid;
81915+extern kgid_t grsec_audit_gid;
81916+extern int grsec_enable_group;
81917+extern int grsec_enable_log_rwxmaps;
81918+extern int grsec_enable_mount;
81919+extern int grsec_enable_chdir;
81920+extern int grsec_resource_logging;
81921+extern int grsec_enable_blackhole;
81922+extern int grsec_lastack_retries;
81923+extern int grsec_enable_brute;
81924+extern int grsec_enable_harden_ipc;
81925+extern int grsec_lock;
81926+
81927+extern spinlock_t grsec_alert_lock;
81928+extern unsigned long grsec_alert_wtime;
81929+extern unsigned long grsec_alert_fyet;
81930+
81931+extern spinlock_t grsec_audit_lock;
81932+
81933+extern rwlock_t grsec_exec_file_lock;
81934+
81935+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
81936+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
81937+ (tsk)->exec_file->f_path.mnt) : "/")
81938+
81939+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
81940+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
81941+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
81942+
81943+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
81944+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
81945+ (tsk)->exec_file->f_path.mnt) : "/")
81946+
81947+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
81948+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
81949+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
81950+
81951+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
81952+
81953+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
81954+
81955+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
81956+{
81957+ if (file1 && file2) {
81958+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
81959+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
81960+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
81961+ return true;
81962+ }
81963+
81964+ return false;
81965+}
81966+
81967+#define GR_CHROOT_CAPS {{ \
81968+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
81969+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
81970+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
81971+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
81972+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
81973+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
81974+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
81975+
81976+#define security_learn(normal_msg,args...) \
81977+({ \
81978+ read_lock(&grsec_exec_file_lock); \
81979+ gr_add_learn_entry(normal_msg "\n", ## args); \
81980+ read_unlock(&grsec_exec_file_lock); \
81981+})
81982+
81983+enum {
81984+ GR_DO_AUDIT,
81985+ GR_DONT_AUDIT,
81986+ /* used for non-audit messages that we shouldn't kill the task on */
81987+ GR_DONT_AUDIT_GOOD
81988+};
81989+
81990+enum {
81991+ GR_TTYSNIFF,
81992+ GR_RBAC,
81993+ GR_RBAC_STR,
81994+ GR_STR_RBAC,
81995+ GR_RBAC_MODE2,
81996+ GR_RBAC_MODE3,
81997+ GR_FILENAME,
81998+ GR_SYSCTL_HIDDEN,
81999+ GR_NOARGS,
82000+ GR_ONE_INT,
82001+ GR_ONE_INT_TWO_STR,
82002+ GR_ONE_STR,
82003+ GR_STR_INT,
82004+ GR_TWO_STR_INT,
82005+ GR_TWO_INT,
82006+ GR_TWO_U64,
82007+ GR_THREE_INT,
82008+ GR_FIVE_INT_TWO_STR,
82009+ GR_TWO_STR,
82010+ GR_THREE_STR,
82011+ GR_FOUR_STR,
82012+ GR_STR_FILENAME,
82013+ GR_FILENAME_STR,
82014+ GR_FILENAME_TWO_INT,
82015+ GR_FILENAME_TWO_INT_STR,
82016+ GR_TEXTREL,
82017+ GR_PTRACE,
82018+ GR_RESOURCE,
82019+ GR_CAP,
82020+ GR_SIG,
82021+ GR_SIG2,
82022+ GR_CRASH1,
82023+ GR_CRASH2,
82024+ GR_PSACCT,
82025+ GR_RWXMAP,
82026+ GR_RWXMAPVMA
82027+};
82028+
82029+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
82030+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
82031+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
82032+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
82033+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
82034+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
82035+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
82036+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
82037+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
82038+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
82039+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
82040+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
82041+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
82042+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
82043+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
82044+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
82045+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
82046+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
82047+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
82048+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
82049+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
82050+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
82051+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
82052+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
82053+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
82054+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
82055+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
82056+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
82057+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
82058+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
82059+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
82060+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
82061+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
82062+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
82063+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
82064+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
82065+
82066+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
82067+
82068+#endif
82069+
82070+#endif
82071diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
82072new file mode 100644
82073index 0000000..26ef560
82074--- /dev/null
82075+++ b/include/linux/grmsg.h
82076@@ -0,0 +1,118 @@
82077+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
82078+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
82079+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
82080+#define GR_STOPMOD_MSG "denied modification of module state by "
82081+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
82082+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
82083+#define GR_IOPERM_MSG "denied use of ioperm() by "
82084+#define GR_IOPL_MSG "denied use of iopl() by "
82085+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
82086+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
82087+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
82088+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
82089+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
82090+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
82091+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
82092+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
82093+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
82094+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
82095+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
82096+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
82097+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
82098+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
82099+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
82100+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
82101+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
82102+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
82103+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
82104+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
82105+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
82106+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
82107+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
82108+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
82109+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
82110+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
82111+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
82112+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
82113+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
82114+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
82115+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
82116+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
82117+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
82118+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
82119+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
82120+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
82121+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
82122+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
82123+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
82124+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
82125+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
82126+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
82127+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
82128+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
82129+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
82130+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
82131+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
82132+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
82133+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
82134+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
82135+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
82136+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
82137+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
82138+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
82139+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
82140+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
82141+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
82142+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
82143+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
82144+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
82145+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
82146+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
82147+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
82148+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
82149+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
82150+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
82151+#define GR_FAILFORK_MSG "failed fork with errno %s by "
82152+#define GR_NICE_CHROOT_MSG "denied priority change by "
82153+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
82154+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
82155+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
82156+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
82157+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
82158+#define GR_TIME_MSG "time set by "
82159+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
82160+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
82161+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
82162+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
82163+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
82164+#define GR_BIND_MSG "denied bind() by "
82165+#define GR_CONNECT_MSG "denied connect() by "
82166+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
82167+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
82168+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
82169+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
82170+#define GR_CAP_ACL_MSG "use of %s denied for "
82171+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
82172+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
82173+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
82174+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
82175+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
82176+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
82177+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
82178+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
82179+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
82180+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
82181+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
82182+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
82183+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
82184+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
82185+#define GR_VM86_MSG "denied use of vm86 by "
82186+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
82187+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
82188+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
82189+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
82190+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
82191+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
82192+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
82193+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
82194+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
82195diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
82196new file mode 100644
82197index 0000000..63c1850
82198--- /dev/null
82199+++ b/include/linux/grsecurity.h
82200@@ -0,0 +1,250 @@
82201+#ifndef GR_SECURITY_H
82202+#define GR_SECURITY_H
82203+#include <linux/fs.h>
82204+#include <linux/fs_struct.h>
82205+#include <linux/binfmts.h>
82206+#include <linux/gracl.h>
82207+
82208+/* notify of brain-dead configs */
82209+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82210+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
82211+#endif
82212+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82213+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
82214+#endif
82215+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
82216+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
82217+#endif
82218+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
82219+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
82220+#endif
82221+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
82222+#error "CONFIG_PAX enabled, but no PaX options are enabled."
82223+#endif
82224+
82225+int gr_handle_new_usb(void);
82226+
82227+void gr_handle_brute_attach(int dumpable);
82228+void gr_handle_brute_check(void);
82229+void gr_handle_kernel_exploit(void);
82230+
82231+char gr_roletype_to_char(void);
82232+
82233+int gr_proc_is_restricted(void);
82234+
82235+int gr_acl_enable_at_secure(void);
82236+
82237+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
82238+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
82239+
82240+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
82241+
82242+void gr_del_task_from_ip_table(struct task_struct *p);
82243+
82244+int gr_pid_is_chrooted(struct task_struct *p);
82245+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
82246+int gr_handle_chroot_nice(void);
82247+int gr_handle_chroot_sysctl(const int op);
82248+int gr_handle_chroot_setpriority(struct task_struct *p,
82249+ const int niceval);
82250+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
82251+int gr_chroot_fhandle(void);
82252+int gr_handle_chroot_chroot(const struct dentry *dentry,
82253+ const struct vfsmount *mnt);
82254+void gr_handle_chroot_chdir(const struct path *path);
82255+int gr_handle_chroot_chmod(const struct dentry *dentry,
82256+ const struct vfsmount *mnt, const int mode);
82257+int gr_handle_chroot_mknod(const struct dentry *dentry,
82258+ const struct vfsmount *mnt, const int mode);
82259+int gr_handle_chroot_mount(const struct dentry *dentry,
82260+ const struct vfsmount *mnt,
82261+ const char *dev_name);
82262+int gr_handle_chroot_pivot(void);
82263+int gr_handle_chroot_unix(const pid_t pid);
82264+
82265+int gr_handle_rawio(const struct inode *inode);
82266+
82267+void gr_handle_ioperm(void);
82268+void gr_handle_iopl(void);
82269+void gr_handle_msr_write(void);
82270+
82271+umode_t gr_acl_umask(void);
82272+
82273+int gr_tpe_allow(const struct file *file);
82274+
82275+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
82276+void gr_clear_chroot_entries(struct task_struct *task);
82277+
82278+void gr_log_forkfail(const int retval);
82279+void gr_log_timechange(void);
82280+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
82281+void gr_log_chdir(const struct dentry *dentry,
82282+ const struct vfsmount *mnt);
82283+void gr_log_chroot_exec(const struct dentry *dentry,
82284+ const struct vfsmount *mnt);
82285+void gr_log_remount(const char *devname, const int retval);
82286+void gr_log_unmount(const char *devname, const int retval);
82287+void gr_log_mount(const char *from, struct path *to, const int retval);
82288+void gr_log_textrel(struct vm_area_struct *vma);
82289+void gr_log_ptgnustack(struct file *file);
82290+void gr_log_rwxmmap(struct file *file);
82291+void gr_log_rwxmprotect(struct vm_area_struct *vma);
82292+
82293+int gr_handle_follow_link(const struct inode *parent,
82294+ const struct inode *inode,
82295+ const struct dentry *dentry,
82296+ const struct vfsmount *mnt);
82297+int gr_handle_fifo(const struct dentry *dentry,
82298+ const struct vfsmount *mnt,
82299+ const struct dentry *dir, const int flag,
82300+ const int acc_mode);
82301+int gr_handle_hardlink(const struct dentry *dentry,
82302+ const struct vfsmount *mnt,
82303+ struct inode *inode,
82304+ const int mode, const struct filename *to);
82305+
82306+int gr_is_capable(const int cap);
82307+int gr_is_capable_nolog(const int cap);
82308+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
82309+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
82310+
82311+void gr_copy_label(struct task_struct *tsk);
82312+void gr_handle_crash(struct task_struct *task, const int sig);
82313+int gr_handle_signal(const struct task_struct *p, const int sig);
82314+int gr_check_crash_uid(const kuid_t uid);
82315+int gr_check_protected_task(const struct task_struct *task);
82316+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
82317+int gr_acl_handle_mmap(const struct file *file,
82318+ const unsigned long prot);
82319+int gr_acl_handle_mprotect(const struct file *file,
82320+ const unsigned long prot);
82321+int gr_check_hidden_task(const struct task_struct *tsk);
82322+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
82323+ const struct vfsmount *mnt);
82324+__u32 gr_acl_handle_utime(const struct dentry *dentry,
82325+ const struct vfsmount *mnt);
82326+__u32 gr_acl_handle_access(const struct dentry *dentry,
82327+ const struct vfsmount *mnt, const int fmode);
82328+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
82329+ const struct vfsmount *mnt, umode_t *mode);
82330+__u32 gr_acl_handle_chown(const struct dentry *dentry,
82331+ const struct vfsmount *mnt);
82332+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
82333+ const struct vfsmount *mnt);
82334+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
82335+ const struct vfsmount *mnt);
82336+int gr_handle_ptrace(struct task_struct *task, const long request);
82337+int gr_handle_proc_ptrace(struct task_struct *task);
82338+__u32 gr_acl_handle_execve(const struct dentry *dentry,
82339+ const struct vfsmount *mnt);
82340+int gr_check_crash_exec(const struct file *filp);
82341+int gr_acl_is_enabled(void);
82342+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
82343+ const kgid_t gid);
82344+int gr_set_proc_label(const struct dentry *dentry,
82345+ const struct vfsmount *mnt,
82346+ const int unsafe_flags);
82347+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
82348+ const struct vfsmount *mnt);
82349+__u32 gr_acl_handle_open(const struct dentry *dentry,
82350+ const struct vfsmount *mnt, int acc_mode);
82351+__u32 gr_acl_handle_creat(const struct dentry *dentry,
82352+ const struct dentry *p_dentry,
82353+ const struct vfsmount *p_mnt,
82354+ int open_flags, int acc_mode, const int imode);
82355+void gr_handle_create(const struct dentry *dentry,
82356+ const struct vfsmount *mnt);
82357+void gr_handle_proc_create(const struct dentry *dentry,
82358+ const struct inode *inode);
82359+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
82360+ const struct dentry *parent_dentry,
82361+ const struct vfsmount *parent_mnt,
82362+ const int mode);
82363+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
82364+ const struct dentry *parent_dentry,
82365+ const struct vfsmount *parent_mnt);
82366+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
82367+ const struct vfsmount *mnt);
82368+void gr_handle_delete(const u64 ino, const dev_t dev);
82369+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
82370+ const struct vfsmount *mnt);
82371+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
82372+ const struct dentry *parent_dentry,
82373+ const struct vfsmount *parent_mnt,
82374+ const struct filename *from);
82375+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
82376+ const struct dentry *parent_dentry,
82377+ const struct vfsmount *parent_mnt,
82378+ const struct dentry *old_dentry,
82379+ const struct vfsmount *old_mnt, const struct filename *to);
82380+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
82381+int gr_acl_handle_rename(struct dentry *new_dentry,
82382+ struct dentry *parent_dentry,
82383+ const struct vfsmount *parent_mnt,
82384+ struct dentry *old_dentry,
82385+ struct inode *old_parent_inode,
82386+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
82387+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
82388+ struct dentry *old_dentry,
82389+ struct dentry *new_dentry,
82390+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
82391+__u32 gr_check_link(const struct dentry *new_dentry,
82392+ const struct dentry *parent_dentry,
82393+ const struct vfsmount *parent_mnt,
82394+ const struct dentry *old_dentry,
82395+ const struct vfsmount *old_mnt);
82396+int gr_acl_handle_filldir(const struct file *file, const char *name,
82397+ const unsigned int namelen, const u64 ino);
82398+
82399+__u32 gr_acl_handle_unix(const struct dentry *dentry,
82400+ const struct vfsmount *mnt);
82401+void gr_acl_handle_exit(void);
82402+void gr_acl_handle_psacct(struct task_struct *task, const long code);
82403+int gr_acl_handle_procpidmem(const struct task_struct *task);
82404+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
82405+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
82406+void gr_audit_ptrace(struct task_struct *task);
82407+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
82408+u64 gr_get_ino_from_dentry(struct dentry *dentry);
82409+void gr_put_exec_file(struct task_struct *task);
82410+
82411+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
82412+
82413+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82414+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82415+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
82416+ struct dentry *newdentry, struct vfsmount *newmnt);
82417+
82418+#ifdef CONFIG_GRKERNSEC_RESLOG
82419+extern void gr_log_resource(const struct task_struct *task, const int res,
82420+ const unsigned long wanted, const int gt);
82421+#else
82422+static inline void gr_log_resource(const struct task_struct *task, const int res,
82423+ const unsigned long wanted, const int gt)
82424+{
82425+}
82426+#endif
82427+
82428+#ifdef CONFIG_GRKERNSEC
82429+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
82430+void gr_handle_vm86(void);
82431+void gr_handle_mem_readwrite(u64 from, u64 to);
82432+
82433+void gr_log_badprocpid(const char *entry);
82434+
82435+extern int grsec_enable_dmesg;
82436+extern int grsec_disable_privio;
82437+
82438+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
82439+extern kgid_t grsec_proc_gid;
82440+#endif
82441+
82442+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82443+extern int grsec_enable_chroot_findtask;
82444+#endif
82445+#ifdef CONFIG_GRKERNSEC_SETXID
82446+extern int grsec_enable_setxid;
82447+#endif
82448+#endif
82449+
82450+#endif
82451diff --git a/include/linux/grsock.h b/include/linux/grsock.h
82452new file mode 100644
82453index 0000000..e7ffaaf
82454--- /dev/null
82455+++ b/include/linux/grsock.h
82456@@ -0,0 +1,19 @@
82457+#ifndef __GRSOCK_H
82458+#define __GRSOCK_H
82459+
82460+extern void gr_attach_curr_ip(const struct sock *sk);
82461+extern int gr_handle_sock_all(const int family, const int type,
82462+ const int protocol);
82463+extern int gr_handle_sock_server(const struct sockaddr *sck);
82464+extern int gr_handle_sock_server_other(const struct sock *sck);
82465+extern int gr_handle_sock_client(const struct sockaddr *sck);
82466+extern int gr_search_connect(struct socket * sock,
82467+ struct sockaddr_in * addr);
82468+extern int gr_search_bind(struct socket * sock,
82469+ struct sockaddr_in * addr);
82470+extern int gr_search_listen(struct socket * sock);
82471+extern int gr_search_accept(struct socket * sock);
82472+extern int gr_search_socket(const int domain, const int type,
82473+ const int protocol);
82474+
82475+#endif
82476diff --git a/include/linux/highmem.h b/include/linux/highmem.h
82477index 9286a46..373f27f 100644
82478--- a/include/linux/highmem.h
82479+++ b/include/linux/highmem.h
82480@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
82481 kunmap_atomic(kaddr);
82482 }
82483
82484+static inline void sanitize_highpage(struct page *page)
82485+{
82486+ void *kaddr;
82487+ unsigned long flags;
82488+
82489+ local_irq_save(flags);
82490+ kaddr = kmap_atomic(page);
82491+ clear_page(kaddr);
82492+ kunmap_atomic(kaddr);
82493+ local_irq_restore(flags);
82494+}
82495+
82496 static inline void zero_user_segments(struct page *page,
82497 unsigned start1, unsigned end1,
82498 unsigned start2, unsigned end2)
82499diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
82500index 1c7b89a..7dda400 100644
82501--- a/include/linux/hwmon-sysfs.h
82502+++ b/include/linux/hwmon-sysfs.h
82503@@ -25,7 +25,8 @@
82504 struct sensor_device_attribute{
82505 struct device_attribute dev_attr;
82506 int index;
82507-};
82508+} __do_const;
82509+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
82510 #define to_sensor_dev_attr(_dev_attr) \
82511 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
82512
82513@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
82514 struct device_attribute dev_attr;
82515 u8 index;
82516 u8 nr;
82517-};
82518+} __do_const;
82519+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
82520 #define to_sensor_dev_attr_2(_dev_attr) \
82521 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
82522
82523diff --git a/include/linux/i2c.h b/include/linux/i2c.h
82524index 7c76959..153e597 100644
82525--- a/include/linux/i2c.h
82526+++ b/include/linux/i2c.h
82527@@ -413,6 +413,7 @@ struct i2c_algorithm {
82528 int (*unreg_slave)(struct i2c_client *client);
82529 #endif
82530 };
82531+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
82532
82533 /**
82534 * struct i2c_bus_recovery_info - I2C bus recovery information
82535diff --git a/include/linux/i2o.h b/include/linux/i2o.h
82536index d23c3c2..eb63c81 100644
82537--- a/include/linux/i2o.h
82538+++ b/include/linux/i2o.h
82539@@ -565,7 +565,7 @@ struct i2o_controller {
82540 struct i2o_device *exec; /* Executive */
82541 #if BITS_PER_LONG == 64
82542 spinlock_t context_list_lock; /* lock for context_list */
82543- atomic_t context_list_counter; /* needed for unique contexts */
82544+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
82545 struct list_head context_list; /* list of context id's
82546 and pointers */
82547 #endif
82548diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
82549index aff7ad8..3942bbd 100644
82550--- a/include/linux/if_pppox.h
82551+++ b/include/linux/if_pppox.h
82552@@ -76,7 +76,7 @@ struct pppox_proto {
82553 int (*ioctl)(struct socket *sock, unsigned int cmd,
82554 unsigned long arg);
82555 struct module *owner;
82556-};
82557+} __do_const;
82558
82559 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
82560 extern void unregister_pppox_proto(int proto_num);
82561diff --git a/include/linux/init.h b/include/linux/init.h
82562index 2df8e8d..3e1280d 100644
82563--- a/include/linux/init.h
82564+++ b/include/linux/init.h
82565@@ -37,9 +37,17 @@
82566 * section.
82567 */
82568
82569+#define add_init_latent_entropy __latent_entropy
82570+
82571+#ifdef CONFIG_MEMORY_HOTPLUG
82572+#define add_meminit_latent_entropy
82573+#else
82574+#define add_meminit_latent_entropy __latent_entropy
82575+#endif
82576+
82577 /* These are for everybody (although not all archs will actually
82578 discard it in modules) */
82579-#define __init __section(.init.text) __cold notrace
82580+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
82581 #define __initdata __section(.init.data)
82582 #define __initconst __constsection(.init.rodata)
82583 #define __exitdata __section(.exit.data)
82584@@ -100,7 +108,7 @@
82585 #define __cpuexitconst
82586
82587 /* Used for MEMORY_HOTPLUG */
82588-#define __meminit __section(.meminit.text) __cold notrace
82589+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
82590 #define __meminitdata __section(.meminit.data)
82591 #define __meminitconst __constsection(.meminit.rodata)
82592 #define __memexit __section(.memexit.text) __exitused __cold notrace
82593diff --git a/include/linux/init_task.h b/include/linux/init_task.h
82594index 3037fc0..c6527ce 100644
82595--- a/include/linux/init_task.h
82596+++ b/include/linux/init_task.h
82597@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
82598
82599 #define INIT_TASK_COMM "swapper"
82600
82601+#ifdef CONFIG_X86
82602+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
82603+#else
82604+#define INIT_TASK_THREAD_INFO
82605+#endif
82606+
82607 #ifdef CONFIG_RT_MUTEXES
82608 # define INIT_RT_MUTEXES(tsk) \
82609 .pi_waiters = RB_ROOT, \
82610@@ -214,6 +220,7 @@ extern struct task_group root_task_group;
82611 RCU_POINTER_INITIALIZER(cred, &init_cred), \
82612 .comm = INIT_TASK_COMM, \
82613 .thread = INIT_THREAD, \
82614+ INIT_TASK_THREAD_INFO \
82615 .fs = &init_fs, \
82616 .files = &init_files, \
82617 .signal = &init_signals, \
82618diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
82619index d9b05b5..e5f5b7b 100644
82620--- a/include/linux/interrupt.h
82621+++ b/include/linux/interrupt.h
82622@@ -413,8 +413,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
82623
82624 struct softirq_action
82625 {
82626- void (*action)(struct softirq_action *);
82627-};
82628+ void (*action)(void);
82629+} __no_const;
82630
82631 asmlinkage void do_softirq(void);
82632 asmlinkage void __do_softirq(void);
82633@@ -428,7 +428,7 @@ static inline void do_softirq_own_stack(void)
82634 }
82635 #endif
82636
82637-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
82638+extern void open_softirq(int nr, void (*action)(void));
82639 extern void softirq_init(void);
82640 extern void __raise_softirq_irqoff(unsigned int nr);
82641
82642diff --git a/include/linux/iommu.h b/include/linux/iommu.h
82643index 38daa45..4de4317 100644
82644--- a/include/linux/iommu.h
82645+++ b/include/linux/iommu.h
82646@@ -147,7 +147,7 @@ struct iommu_ops {
82647
82648 unsigned long pgsize_bitmap;
82649 void *priv;
82650-};
82651+} __do_const;
82652
82653 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
82654 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
82655diff --git a/include/linux/ioport.h b/include/linux/ioport.h
82656index 2c525022..345b106 100644
82657--- a/include/linux/ioport.h
82658+++ b/include/linux/ioport.h
82659@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
82660 int adjust_resource(struct resource *res, resource_size_t start,
82661 resource_size_t size);
82662 resource_size_t resource_alignment(struct resource *res);
82663-static inline resource_size_t resource_size(const struct resource *res)
82664+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
82665 {
82666 return res->end - res->start + 1;
82667 }
82668diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
82669index 1eee6bc..9cf4912 100644
82670--- a/include/linux/ipc_namespace.h
82671+++ b/include/linux/ipc_namespace.h
82672@@ -60,7 +60,7 @@ struct ipc_namespace {
82673 struct user_namespace *user_ns;
82674
82675 struct ns_common ns;
82676-};
82677+} __randomize_layout;
82678
82679 extern struct ipc_namespace init_ipc_ns;
82680 extern atomic_t nr_ipc_ns;
82681diff --git a/include/linux/irq.h b/include/linux/irq.h
82682index d09ec7a..f373eb5 100644
82683--- a/include/linux/irq.h
82684+++ b/include/linux/irq.h
82685@@ -364,7 +364,8 @@ struct irq_chip {
82686 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
82687
82688 unsigned long flags;
82689-};
82690+} __do_const;
82691+typedef struct irq_chip __no_const irq_chip_no_const;
82692
82693 /*
82694 * irq_chip specific flags
82695diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
82696index 71d706d..817cdec 100644
82697--- a/include/linux/irqchip/arm-gic.h
82698+++ b/include/linux/irqchip/arm-gic.h
82699@@ -95,7 +95,7 @@
82700
82701 struct device_node;
82702
82703-extern struct irq_chip gic_arch_extn;
82704+extern irq_chip_no_const gic_arch_extn;
82705
82706 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
82707 u32 offset, struct device_node *);
82708diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
82709index faf433a..7dcb186 100644
82710--- a/include/linux/irqdesc.h
82711+++ b/include/linux/irqdesc.h
82712@@ -61,7 +61,7 @@ struct irq_desc {
82713 unsigned int irq_count; /* For detecting broken IRQs */
82714 unsigned long last_unhandled; /* Aging timer for unhandled count */
82715 unsigned int irqs_unhandled;
82716- atomic_t threads_handled;
82717+ atomic_unchecked_t threads_handled;
82718 int threads_handled_last;
82719 raw_spinlock_t lock;
82720 struct cpumask *percpu_enabled;
82721diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
82722index c367cbd..c9b79e6 100644
82723--- a/include/linux/jiffies.h
82724+++ b/include/linux/jiffies.h
82725@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
82726 /*
82727 * Convert various time units to each other:
82728 */
82729-extern unsigned int jiffies_to_msecs(const unsigned long j);
82730-extern unsigned int jiffies_to_usecs(const unsigned long j);
82731+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
82732+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
82733
82734-static inline u64 jiffies_to_nsecs(const unsigned long j)
82735+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
82736 {
82737 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
82738 }
82739
82740-extern unsigned long msecs_to_jiffies(const unsigned int m);
82741-extern unsigned long usecs_to_jiffies(const unsigned int u);
82742+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
82743+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
82744 extern unsigned long timespec_to_jiffies(const struct timespec *value);
82745 extern void jiffies_to_timespec(const unsigned long jiffies,
82746- struct timespec *value);
82747-extern unsigned long timeval_to_jiffies(const struct timeval *value);
82748+ struct timespec *value) __intentional_overflow(-1);
82749+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
82750 extern void jiffies_to_timeval(const unsigned long jiffies,
82751 struct timeval *value);
82752
82753diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
82754index 6883e19..e854fcb 100644
82755--- a/include/linux/kallsyms.h
82756+++ b/include/linux/kallsyms.h
82757@@ -15,7 +15,8 @@
82758
82759 struct module;
82760
82761-#ifdef CONFIG_KALLSYMS
82762+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
82763+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
82764 /* Lookup the address for a symbol. Returns 0 if not found. */
82765 unsigned long kallsyms_lookup_name(const char *name);
82766
82767@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
82768 /* Stupid that this does nothing, but I didn't create this mess. */
82769 #define __print_symbol(fmt, addr)
82770 #endif /*CONFIG_KALLSYMS*/
82771+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
82772+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
82773+extern unsigned long kallsyms_lookup_name(const char *name);
82774+extern void __print_symbol(const char *fmt, unsigned long address);
82775+extern int sprint_backtrace(char *buffer, unsigned long address);
82776+extern int sprint_symbol(char *buffer, unsigned long address);
82777+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
82778+const char *kallsyms_lookup(unsigned long addr,
82779+ unsigned long *symbolsize,
82780+ unsigned long *offset,
82781+ char **modname, char *namebuf);
82782+extern int kallsyms_lookup_size_offset(unsigned long addr,
82783+ unsigned long *symbolsize,
82784+ unsigned long *offset);
82785+#endif
82786
82787 /* This macro allows us to keep printk typechecking */
82788 static __printf(1, 2)
82789diff --git a/include/linux/kernel.h b/include/linux/kernel.h
82790index 64ce58b..6bcdbfa 100644
82791--- a/include/linux/kernel.h
82792+++ b/include/linux/kernel.h
82793@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
82794 /* Obsolete, do not use. Use kstrto<foo> instead */
82795
82796 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
82797-extern long simple_strtol(const char *,char **,unsigned int);
82798+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
82799 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
82800 extern long long simple_strtoll(const char *,char **,unsigned int);
82801
82802diff --git a/include/linux/key-type.h b/include/linux/key-type.h
82803index ff9f1d3..6712be5 100644
82804--- a/include/linux/key-type.h
82805+++ b/include/linux/key-type.h
82806@@ -152,7 +152,7 @@ struct key_type {
82807 /* internal fields */
82808 struct list_head link; /* link in types list */
82809 struct lock_class_key lock_class; /* key->sem lock class */
82810-};
82811+} __do_const;
82812
82813 extern struct key_type key_type_keyring;
82814
82815diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
82816index e465bb1..19f605fd 100644
82817--- a/include/linux/kgdb.h
82818+++ b/include/linux/kgdb.h
82819@@ -52,7 +52,7 @@ extern int kgdb_connected;
82820 extern int kgdb_io_module_registered;
82821
82822 extern atomic_t kgdb_setting_breakpoint;
82823-extern atomic_t kgdb_cpu_doing_single_step;
82824+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
82825
82826 extern struct task_struct *kgdb_usethread;
82827 extern struct task_struct *kgdb_contthread;
82828@@ -254,7 +254,7 @@ struct kgdb_arch {
82829 void (*correct_hw_break)(void);
82830
82831 void (*enable_nmi)(bool on);
82832-};
82833+} __do_const;
82834
82835 /**
82836 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
82837@@ -279,7 +279,7 @@ struct kgdb_io {
82838 void (*pre_exception) (void);
82839 void (*post_exception) (void);
82840 int is_console;
82841-};
82842+} __do_const;
82843
82844 extern struct kgdb_arch arch_kgdb_ops;
82845
82846diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
82847index e705467..a92471d 100644
82848--- a/include/linux/kmemleak.h
82849+++ b/include/linux/kmemleak.h
82850@@ -27,7 +27,7 @@
82851
82852 extern void kmemleak_init(void) __ref;
82853 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
82854- gfp_t gfp) __ref;
82855+ gfp_t gfp) __ref __size_overflow(2);
82856 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
82857 extern void kmemleak_free(const void *ptr) __ref;
82858 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
82859@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
82860 static inline void kmemleak_init(void)
82861 {
82862 }
82863-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
82864+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
82865 gfp_t gfp)
82866 {
82867 }
82868diff --git a/include/linux/kmod.h b/include/linux/kmod.h
82869index 0555cc6..40116ce 100644
82870--- a/include/linux/kmod.h
82871+++ b/include/linux/kmod.h
82872@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
82873 * usually useless though. */
82874 extern __printf(2, 3)
82875 int __request_module(bool wait, const char *name, ...);
82876+extern __printf(3, 4)
82877+int ___request_module(bool wait, char *param_name, const char *name, ...);
82878 #define request_module(mod...) __request_module(true, mod)
82879 #define request_module_nowait(mod...) __request_module(false, mod)
82880 #define try_then_request_module(x, mod...) \
82881@@ -57,6 +59,9 @@ struct subprocess_info {
82882 struct work_struct work;
82883 struct completion *complete;
82884 char *path;
82885+#ifdef CONFIG_GRKERNSEC
82886+ char *origpath;
82887+#endif
82888 char **argv;
82889 char **envp;
82890 int wait;
82891diff --git a/include/linux/kobject.h b/include/linux/kobject.h
82892index 2d61b90..a1d0a13 100644
82893--- a/include/linux/kobject.h
82894+++ b/include/linux/kobject.h
82895@@ -118,7 +118,7 @@ struct kobj_type {
82896 struct attribute **default_attrs;
82897 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
82898 const void *(*namespace)(struct kobject *kobj);
82899-};
82900+} __do_const;
82901
82902 struct kobj_uevent_env {
82903 char *argv[3];
82904@@ -142,6 +142,7 @@ struct kobj_attribute {
82905 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
82906 const char *buf, size_t count);
82907 };
82908+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
82909
82910 extern const struct sysfs_ops kobj_sysfs_ops;
82911
82912@@ -169,7 +170,7 @@ struct kset {
82913 spinlock_t list_lock;
82914 struct kobject kobj;
82915 const struct kset_uevent_ops *uevent_ops;
82916-};
82917+} __randomize_layout;
82918
82919 extern void kset_init(struct kset *kset);
82920 extern int __must_check kset_register(struct kset *kset);
82921diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
82922index df32d25..fb52e27 100644
82923--- a/include/linux/kobject_ns.h
82924+++ b/include/linux/kobject_ns.h
82925@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
82926 const void *(*netlink_ns)(struct sock *sk);
82927 const void *(*initial_ns)(void);
82928 void (*drop_ns)(void *);
82929-};
82930+} __do_const;
82931
82932 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
82933 int kobj_ns_type_registered(enum kobj_ns_type type);
82934diff --git a/include/linux/kref.h b/include/linux/kref.h
82935index 484604d..0f6c5b6 100644
82936--- a/include/linux/kref.h
82937+++ b/include/linux/kref.h
82938@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
82939 static inline int kref_sub(struct kref *kref, unsigned int count,
82940 void (*release)(struct kref *kref))
82941 {
82942- WARN_ON(release == NULL);
82943+ BUG_ON(release == NULL);
82944
82945 if (atomic_sub_and_test((int) count, &kref->refcount)) {
82946 release(kref);
82947diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
82948index 26f1060..bafc04a 100644
82949--- a/include/linux/kvm_host.h
82950+++ b/include/linux/kvm_host.h
82951@@ -470,7 +470,7 @@ static inline void kvm_irqfd_exit(void)
82952 {
82953 }
82954 #endif
82955-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
82956+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
82957 struct module *module);
82958 void kvm_exit(void);
82959
82960@@ -639,7 +639,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
82961 struct kvm_guest_debug *dbg);
82962 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
82963
82964-int kvm_arch_init(void *opaque);
82965+int kvm_arch_init(const void *opaque);
82966 void kvm_arch_exit(void);
82967
82968 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
82969diff --git a/include/linux/libata.h b/include/linux/libata.h
82970index 91f705d..24be831 100644
82971--- a/include/linux/libata.h
82972+++ b/include/linux/libata.h
82973@@ -979,7 +979,7 @@ struct ata_port_operations {
82974 * fields must be pointers.
82975 */
82976 const struct ata_port_operations *inherits;
82977-};
82978+} __do_const;
82979
82980 struct ata_port_info {
82981 unsigned long flags;
82982diff --git a/include/linux/linkage.h b/include/linux/linkage.h
82983index a6a42dd..6c5ebce 100644
82984--- a/include/linux/linkage.h
82985+++ b/include/linux/linkage.h
82986@@ -36,6 +36,7 @@
82987 #endif
82988
82989 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
82990+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
82991 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
82992
82993 /*
82994diff --git a/include/linux/list.h b/include/linux/list.h
82995index feb773c..98f3075 100644
82996--- a/include/linux/list.h
82997+++ b/include/linux/list.h
82998@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
82999 extern void list_del(struct list_head *entry);
83000 #endif
83001
83002+extern void __pax_list_add(struct list_head *new,
83003+ struct list_head *prev,
83004+ struct list_head *next);
83005+static inline void pax_list_add(struct list_head *new, struct list_head *head)
83006+{
83007+ __pax_list_add(new, head, head->next);
83008+}
83009+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
83010+{
83011+ __pax_list_add(new, head->prev, head);
83012+}
83013+extern void pax_list_del(struct list_head *entry);
83014+
83015 /**
83016 * list_replace - replace old entry by new one
83017 * @old : the element to be replaced
83018@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
83019 INIT_LIST_HEAD(entry);
83020 }
83021
83022+extern void pax_list_del_init(struct list_head *entry);
83023+
83024 /**
83025 * list_move - delete from one list and add as another's head
83026 * @list: the entry to move
83027diff --git a/include/linux/lockref.h b/include/linux/lockref.h
83028index 4bfde0e..d6e2e09 100644
83029--- a/include/linux/lockref.h
83030+++ b/include/linux/lockref.h
83031@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l)
83032 return ((int)l->count < 0);
83033 }
83034
83035+static inline unsigned int __lockref_read(struct lockref *lockref)
83036+{
83037+ return lockref->count;
83038+}
83039+
83040+static inline void __lockref_set(struct lockref *lockref, unsigned int count)
83041+{
83042+ lockref->count = count;
83043+}
83044+
83045+static inline void __lockref_inc(struct lockref *lockref)
83046+{
83047+
83048+#ifdef CONFIG_PAX_REFCOUNT
83049+ atomic_inc((atomic_t *)&lockref->count);
83050+#else
83051+ lockref->count++;
83052+#endif
83053+
83054+}
83055+
83056+static inline void __lockref_dec(struct lockref *lockref)
83057+{
83058+
83059+#ifdef CONFIG_PAX_REFCOUNT
83060+ atomic_dec((atomic_t *)&lockref->count);
83061+#else
83062+ lockref->count--;
83063+#endif
83064+
83065+}
83066+
83067 #endif /* __LINUX_LOCKREF_H */
83068diff --git a/include/linux/math64.h b/include/linux/math64.h
83069index c45c089..298841c 100644
83070--- a/include/linux/math64.h
83071+++ b/include/linux/math64.h
83072@@ -15,7 +15,7 @@
83073 * This is commonly provided by 32bit archs to provide an optimized 64bit
83074 * divide.
83075 */
83076-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83077+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83078 {
83079 *remainder = dividend % divisor;
83080 return dividend / divisor;
83081@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
83082 /**
83083 * div64_u64 - unsigned 64bit divide with 64bit divisor
83084 */
83085-static inline u64 div64_u64(u64 dividend, u64 divisor)
83086+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83087 {
83088 return dividend / divisor;
83089 }
83090@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
83091 #define div64_ul(x, y) div_u64((x), (y))
83092
83093 #ifndef div_u64_rem
83094-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83095+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83096 {
83097 *remainder = do_div(dividend, divisor);
83098 return dividend;
83099@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
83100 #endif
83101
83102 #ifndef div64_u64
83103-extern u64 div64_u64(u64 dividend, u64 divisor);
83104+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
83105 #endif
83106
83107 #ifndef div64_s64
83108@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
83109 * divide.
83110 */
83111 #ifndef div_u64
83112-static inline u64 div_u64(u64 dividend, u32 divisor)
83113+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
83114 {
83115 u32 remainder;
83116 return div_u64_rem(dividend, divisor, &remainder);
83117diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
83118index 3d385c8..deacb6a 100644
83119--- a/include/linux/mempolicy.h
83120+++ b/include/linux/mempolicy.h
83121@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
83122 }
83123
83124 #define vma_policy(vma) ((vma)->vm_policy)
83125+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83126+{
83127+ vma->vm_policy = pol;
83128+}
83129
83130 static inline void mpol_get(struct mempolicy *pol)
83131 {
83132@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
83133 }
83134
83135 #define vma_policy(vma) NULL
83136+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83137+{
83138+}
83139
83140 static inline int
83141 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
83142diff --git a/include/linux/mm.h b/include/linux/mm.h
83143index dd5ea30..cf81cd1 100644
83144--- a/include/linux/mm.h
83145+++ b/include/linux/mm.h
83146@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
83147
83148 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
83149 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
83150+
83151+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
83152+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
83153+#endif
83154+
83155 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
83156 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
83157 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
83158@@ -256,8 +261,8 @@ struct vm_operations_struct {
83159 /* called by access_process_vm when get_user_pages() fails, typically
83160 * for use by special VMAs that can switch between memory and hardware
83161 */
83162- int (*access)(struct vm_area_struct *vma, unsigned long addr,
83163- void *buf, int len, int write);
83164+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
83165+ void *buf, size_t len, int write);
83166
83167 /* Called by the /proc/PID/maps code to ask the vma whether it
83168 * has a special name. Returning non-NULL will also cause this
83169@@ -291,6 +296,7 @@ struct vm_operations_struct {
83170 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
83171 unsigned long size, pgoff_t pgoff);
83172 };
83173+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
83174
83175 struct mmu_gather;
83176 struct inode;
83177@@ -1183,8 +1189,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
83178 unsigned long *pfn);
83179 int follow_phys(struct vm_area_struct *vma, unsigned long address,
83180 unsigned int flags, unsigned long *prot, resource_size_t *phys);
83181-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83182- void *buf, int len, int write);
83183+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83184+ void *buf, size_t len, int write);
83185
83186 static inline void unmap_shared_mapping_range(struct address_space *mapping,
83187 loff_t const holebegin, loff_t const holelen)
83188@@ -1224,9 +1230,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
83189 }
83190 #endif
83191
83192-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
83193-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83194- void *buf, int len, int write);
83195+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
83196+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83197+ void *buf, size_t len, int write);
83198
83199 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
83200 unsigned long start, unsigned long nr_pages,
83201@@ -1258,34 +1264,6 @@ int set_page_dirty_lock(struct page *page);
83202 int clear_page_dirty_for_io(struct page *page);
83203 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
83204
83205-/* Is the vma a continuation of the stack vma above it? */
83206-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
83207-{
83208- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
83209-}
83210-
83211-static inline int stack_guard_page_start(struct vm_area_struct *vma,
83212- unsigned long addr)
83213-{
83214- return (vma->vm_flags & VM_GROWSDOWN) &&
83215- (vma->vm_start == addr) &&
83216- !vma_growsdown(vma->vm_prev, addr);
83217-}
83218-
83219-/* Is the vma a continuation of the stack vma below it? */
83220-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
83221-{
83222- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
83223-}
83224-
83225-static inline int stack_guard_page_end(struct vm_area_struct *vma,
83226- unsigned long addr)
83227-{
83228- return (vma->vm_flags & VM_GROWSUP) &&
83229- (vma->vm_end == addr) &&
83230- !vma_growsup(vma->vm_next, addr);
83231-}
83232-
83233 extern struct task_struct *task_of_stack(struct task_struct *task,
83234 struct vm_area_struct *vma, bool in_group);
83235
83236@@ -1403,8 +1381,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
83237 {
83238 return 0;
83239 }
83240+
83241+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
83242+ unsigned long address)
83243+{
83244+ return 0;
83245+}
83246 #else
83247 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83248+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83249 #endif
83250
83251 #ifdef __PAGETABLE_PMD_FOLDED
83252@@ -1413,8 +1398,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
83253 {
83254 return 0;
83255 }
83256+
83257+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
83258+ unsigned long address)
83259+{
83260+ return 0;
83261+}
83262 #else
83263 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
83264+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
83265 #endif
83266
83267 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
83268@@ -1432,11 +1424,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
83269 NULL: pud_offset(pgd, address);
83270 }
83271
83272+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
83273+{
83274+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
83275+ NULL: pud_offset(pgd, address);
83276+}
83277+
83278 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
83279 {
83280 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
83281 NULL: pmd_offset(pud, address);
83282 }
83283+
83284+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
83285+{
83286+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
83287+ NULL: pmd_offset(pud, address);
83288+}
83289 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
83290
83291 #if USE_SPLIT_PTE_PTLOCKS
83292@@ -1819,12 +1823,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
83293 bool *need_rmap_locks);
83294 extern void exit_mmap(struct mm_struct *);
83295
83296+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
83297+extern void gr_learn_resource(const struct task_struct *task, const int res,
83298+ const unsigned long wanted, const int gt);
83299+#else
83300+static inline void gr_learn_resource(const struct task_struct *task, const int res,
83301+ const unsigned long wanted, const int gt)
83302+{
83303+}
83304+#endif
83305+
83306 static inline int check_data_rlimit(unsigned long rlim,
83307 unsigned long new,
83308 unsigned long start,
83309 unsigned long end_data,
83310 unsigned long start_data)
83311 {
83312+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
83313 if (rlim < RLIM_INFINITY) {
83314 if (((new - start) + (end_data - start_data)) > rlim)
83315 return -ENOSPC;
83316@@ -1849,7 +1864,7 @@ extern int install_special_mapping(struct mm_struct *mm,
83317 unsigned long addr, unsigned long len,
83318 unsigned long flags, struct page **pages);
83319
83320-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
83321+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
83322
83323 extern unsigned long mmap_region(struct file *file, unsigned long addr,
83324 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
83325@@ -1857,6 +1872,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
83326 unsigned long len, unsigned long prot, unsigned long flags,
83327 unsigned long pgoff, unsigned long *populate);
83328 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
83329+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
83330
83331 #ifdef CONFIG_MMU
83332 extern int __mm_populate(unsigned long addr, unsigned long len,
83333@@ -1885,10 +1901,11 @@ struct vm_unmapped_area_info {
83334 unsigned long high_limit;
83335 unsigned long align_mask;
83336 unsigned long align_offset;
83337+ unsigned long threadstack_offset;
83338 };
83339
83340-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
83341-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83342+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
83343+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
83344
83345 /*
83346 * Search for an unmapped address range.
83347@@ -1900,7 +1917,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83348 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
83349 */
83350 static inline unsigned long
83351-vm_unmapped_area(struct vm_unmapped_area_info *info)
83352+vm_unmapped_area(const struct vm_unmapped_area_info *info)
83353 {
83354 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
83355 return unmapped_area(info);
83356@@ -1962,6 +1979,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
83357 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
83358 struct vm_area_struct **pprev);
83359
83360+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
83361+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
83362+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
83363+
83364 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
83365 NULL if none. Assume start_addr < end_addr. */
83366 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
83367@@ -1991,10 +2012,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
83368 }
83369
83370 #ifdef CONFIG_MMU
83371-pgprot_t vm_get_page_prot(unsigned long vm_flags);
83372+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
83373 void vma_set_page_prot(struct vm_area_struct *vma);
83374 #else
83375-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
83376+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
83377 {
83378 return __pgprot(0);
83379 }
83380@@ -2056,6 +2077,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
83381 static inline void vm_stat_account(struct mm_struct *mm,
83382 unsigned long flags, struct file *file, long pages)
83383 {
83384+
83385+#ifdef CONFIG_PAX_RANDMMAP
83386+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
83387+#endif
83388+
83389 mm->total_vm += pages;
83390 }
83391 #endif /* CONFIG_PROC_FS */
83392@@ -2159,7 +2185,7 @@ extern int unpoison_memory(unsigned long pfn);
83393 extern int sysctl_memory_failure_early_kill;
83394 extern int sysctl_memory_failure_recovery;
83395 extern void shake_page(struct page *p, int access);
83396-extern atomic_long_t num_poisoned_pages;
83397+extern atomic_long_unchecked_t num_poisoned_pages;
83398 extern int soft_offline_page(struct page *page, int flags);
83399
83400 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
83401@@ -2210,5 +2236,11 @@ void __init setup_nr_node_ids(void);
83402 static inline void setup_nr_node_ids(void) {}
83403 #endif
83404
83405+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83406+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
83407+#else
83408+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
83409+#endif
83410+
83411 #endif /* __KERNEL__ */
83412 #endif /* _LINUX_MM_H */
83413diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
83414index 6d34aa2..d73d848 100644
83415--- a/include/linux/mm_types.h
83416+++ b/include/linux/mm_types.h
83417@@ -309,7 +309,9 @@ struct vm_area_struct {
83418 #ifdef CONFIG_NUMA
83419 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
83420 #endif
83421-};
83422+
83423+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
83424+} __randomize_layout;
83425
83426 struct core_thread {
83427 struct task_struct *task;
83428@@ -459,7 +461,25 @@ struct mm_struct {
83429 /* address of the bounds directory */
83430 void __user *bd_addr;
83431 #endif
83432-};
83433+
83434+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
83435+ unsigned long pax_flags;
83436+#endif
83437+
83438+#ifdef CONFIG_PAX_DLRESOLVE
83439+ unsigned long call_dl_resolve;
83440+#endif
83441+
83442+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
83443+ unsigned long call_syscall;
83444+#endif
83445+
83446+#ifdef CONFIG_PAX_ASLR
83447+ unsigned long delta_mmap; /* randomized offset */
83448+ unsigned long delta_stack; /* randomized offset */
83449+#endif
83450+
83451+} __randomize_layout;
83452
83453 static inline void mm_init_cpumask(struct mm_struct *mm)
83454 {
83455diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
83456index c5d5278..f0b68c8 100644
83457--- a/include/linux/mmiotrace.h
83458+++ b/include/linux/mmiotrace.h
83459@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
83460 /* Called from ioremap.c */
83461 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
83462 void __iomem *addr);
83463-extern void mmiotrace_iounmap(volatile void __iomem *addr);
83464+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
83465
83466 /* For anyone to insert markers. Remember trailing newline. */
83467 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
83468@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
83469 {
83470 }
83471
83472-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
83473+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
83474 {
83475 }
83476
83477diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
83478index 2f0856d..5a4bc1e 100644
83479--- a/include/linux/mmzone.h
83480+++ b/include/linux/mmzone.h
83481@@ -527,7 +527,7 @@ struct zone {
83482
83483 ZONE_PADDING(_pad3_)
83484 /* Zone statistics */
83485- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83486+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83487 } ____cacheline_internodealigned_in_smp;
83488
83489 enum zone_flags {
83490diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
83491index 745def8..08a820b 100644
83492--- a/include/linux/mod_devicetable.h
83493+++ b/include/linux/mod_devicetable.h
83494@@ -139,7 +139,7 @@ struct usb_device_id {
83495 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
83496 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
83497
83498-#define HID_ANY_ID (~0)
83499+#define HID_ANY_ID (~0U)
83500 #define HID_BUS_ANY 0xffff
83501 #define HID_GROUP_ANY 0x0000
83502
83503@@ -475,7 +475,7 @@ struct dmi_system_id {
83504 const char *ident;
83505 struct dmi_strmatch matches[4];
83506 void *driver_data;
83507-};
83508+} __do_const;
83509 /*
83510 * struct dmi_device_id appears during expansion of
83511 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
83512diff --git a/include/linux/module.h b/include/linux/module.h
83513index b653d7c..22a238f 100644
83514--- a/include/linux/module.h
83515+++ b/include/linux/module.h
83516@@ -17,9 +17,11 @@
83517 #include <linux/moduleparam.h>
83518 #include <linux/jump_label.h>
83519 #include <linux/export.h>
83520+#include <linux/fs.h>
83521
83522 #include <linux/percpu.h>
83523 #include <asm/module.h>
83524+#include <asm/pgtable.h>
83525
83526 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
83527 #define MODULE_SIG_STRING "~Module signature appended~\n"
83528@@ -42,7 +44,7 @@ struct module_kobject {
83529 struct kobject *drivers_dir;
83530 struct module_param_attrs *mp;
83531 struct completion *kobj_completion;
83532-};
83533+} __randomize_layout;
83534
83535 struct module_attribute {
83536 struct attribute attr;
83537@@ -54,12 +56,13 @@ struct module_attribute {
83538 int (*test)(struct module *);
83539 void (*free)(struct module *);
83540 };
83541+typedef struct module_attribute __no_const module_attribute_no_const;
83542
83543 struct module_version_attribute {
83544 struct module_attribute mattr;
83545 const char *module_name;
83546 const char *version;
83547-} __attribute__ ((__aligned__(sizeof(void *))));
83548+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
83549
83550 extern ssize_t __modver_version_show(struct module_attribute *,
83551 struct module_kobject *, char *);
83552@@ -221,7 +224,7 @@ struct module {
83553
83554 /* Sysfs stuff. */
83555 struct module_kobject mkobj;
83556- struct module_attribute *modinfo_attrs;
83557+ module_attribute_no_const *modinfo_attrs;
83558 const char *version;
83559 const char *srcversion;
83560 struct kobject *holders_dir;
83561@@ -270,19 +273,16 @@ struct module {
83562 int (*init)(void);
83563
83564 /* If this is non-NULL, vfree after init() returns */
83565- void *module_init;
83566+ void *module_init_rx, *module_init_rw;
83567
83568 /* Here is the actual code + data, vfree'd on unload. */
83569- void *module_core;
83570+ void *module_core_rx, *module_core_rw;
83571
83572 /* Here are the sizes of the init and core sections */
83573- unsigned int init_size, core_size;
83574+ unsigned int init_size_rw, core_size_rw;
83575
83576 /* The size of the executable code in each section. */
83577- unsigned int init_text_size, core_text_size;
83578-
83579- /* Size of RO sections of the module (text+rodata) */
83580- unsigned int init_ro_size, core_ro_size;
83581+ unsigned int init_size_rx, core_size_rx;
83582
83583 /* Arch-specific module values */
83584 struct mod_arch_specific arch;
83585@@ -338,6 +338,10 @@ struct module {
83586 #ifdef CONFIG_EVENT_TRACING
83587 struct ftrace_event_call **trace_events;
83588 unsigned int num_trace_events;
83589+ struct file_operations trace_id;
83590+ struct file_operations trace_enable;
83591+ struct file_operations trace_format;
83592+ struct file_operations trace_filter;
83593 #endif
83594 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
83595 unsigned int num_ftrace_callsites;
83596@@ -361,7 +365,7 @@ struct module {
83597 ctor_fn_t *ctors;
83598 unsigned int num_ctors;
83599 #endif
83600-};
83601+} __randomize_layout;
83602 #ifndef MODULE_ARCH_INIT
83603 #define MODULE_ARCH_INIT {}
83604 #endif
83605@@ -382,18 +386,48 @@ bool is_module_address(unsigned long addr);
83606 bool is_module_percpu_address(unsigned long addr);
83607 bool is_module_text_address(unsigned long addr);
83608
83609+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
83610+{
83611+
83612+#ifdef CONFIG_PAX_KERNEXEC
83613+ if (ktla_ktva(addr) >= (unsigned long)start &&
83614+ ktla_ktva(addr) < (unsigned long)start + size)
83615+ return 1;
83616+#endif
83617+
83618+ return ((void *)addr >= start && (void *)addr < start + size);
83619+}
83620+
83621+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
83622+{
83623+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
83624+}
83625+
83626+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
83627+{
83628+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
83629+}
83630+
83631+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
83632+{
83633+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
83634+}
83635+
83636+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
83637+{
83638+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
83639+}
83640+
83641 static inline bool within_module_core(unsigned long addr,
83642 const struct module *mod)
83643 {
83644- return (unsigned long)mod->module_core <= addr &&
83645- addr < (unsigned long)mod->module_core + mod->core_size;
83646+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
83647 }
83648
83649 static inline bool within_module_init(unsigned long addr,
83650 const struct module *mod)
83651 {
83652- return (unsigned long)mod->module_init <= addr &&
83653- addr < (unsigned long)mod->module_init + mod->init_size;
83654+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
83655 }
83656
83657 static inline bool within_module(unsigned long addr, const struct module *mod)
83658diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
83659index f755626..641f822 100644
83660--- a/include/linux/moduleloader.h
83661+++ b/include/linux/moduleloader.h
83662@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
83663 sections. Returns NULL on failure. */
83664 void *module_alloc(unsigned long size);
83665
83666+#ifdef CONFIG_PAX_KERNEXEC
83667+void *module_alloc_exec(unsigned long size);
83668+#else
83669+#define module_alloc_exec(x) module_alloc(x)
83670+#endif
83671+
83672 /* Free memory returned from module_alloc. */
83673 void module_memfree(void *module_region);
83674
83675+#ifdef CONFIG_PAX_KERNEXEC
83676+void module_memfree_exec(void *module_region);
83677+#else
83678+#define module_memfree_exec(x) module_memfree((x))
83679+#endif
83680+
83681 /*
83682 * Apply the given relocation to the (simplified) ELF. Return -error
83683 * or 0.
83684@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
83685 unsigned int relsec,
83686 struct module *me)
83687 {
83688+#ifdef CONFIG_MODULES
83689 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83690 module_name(me));
83691+#endif
83692 return -ENOEXEC;
83693 }
83694 #endif
83695@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
83696 unsigned int relsec,
83697 struct module *me)
83698 {
83699+#ifdef CONFIG_MODULES
83700 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83701 module_name(me));
83702+#endif
83703 return -ENOEXEC;
83704 }
83705 #endif
83706diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
83707index 1c9effa..1160bdd 100644
83708--- a/include/linux/moduleparam.h
83709+++ b/include/linux/moduleparam.h
83710@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
83711 * @len is usually just sizeof(string).
83712 */
83713 #define module_param_string(name, string, len, perm) \
83714- static const struct kparam_string __param_string_##name \
83715+ static const struct kparam_string __param_string_##name __used \
83716 = { len, string }; \
83717 __module_param_call(MODULE_PARAM_PREFIX, name, \
83718 &param_ops_string, \
83719@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
83720 */
83721 #define module_param_array_named(name, array, type, nump, perm) \
83722 param_check_##type(name, &(array)[0]); \
83723- static const struct kparam_array __param_arr_##name \
83724+ static const struct kparam_array __param_arr_##name __used \
83725 = { .max = ARRAY_SIZE(array), .num = nump, \
83726 .ops = &param_ops_##type, \
83727 .elemsize = sizeof(array[0]), .elem = array }; \
83728diff --git a/include/linux/mount.h b/include/linux/mount.h
83729index c2c561d..a5f2a8c 100644
83730--- a/include/linux/mount.h
83731+++ b/include/linux/mount.h
83732@@ -66,7 +66,7 @@ struct vfsmount {
83733 struct dentry *mnt_root; /* root of the mounted tree */
83734 struct super_block *mnt_sb; /* pointer to superblock */
83735 int mnt_flags;
83736-};
83737+} __randomize_layout;
83738
83739 struct file; /* forward dec */
83740 struct path;
83741diff --git a/include/linux/namei.h b/include/linux/namei.h
83742index c899077..b9a2010 100644
83743--- a/include/linux/namei.h
83744+++ b/include/linux/namei.h
83745@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
83746 extern void unlock_rename(struct dentry *, struct dentry *);
83747
83748 extern void nd_jump_link(struct nameidata *nd, struct path *path);
83749-extern void nd_set_link(struct nameidata *nd, char *path);
83750-extern char *nd_get_link(struct nameidata *nd);
83751+extern void nd_set_link(struct nameidata *nd, const char *path);
83752+extern const char *nd_get_link(const struct nameidata *nd);
83753
83754 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
83755 {
83756diff --git a/include/linux/net.h b/include/linux/net.h
83757index 17d8339..81656c0 100644
83758--- a/include/linux/net.h
83759+++ b/include/linux/net.h
83760@@ -192,7 +192,7 @@ struct net_proto_family {
83761 int (*create)(struct net *net, struct socket *sock,
83762 int protocol, int kern);
83763 struct module *owner;
83764-};
83765+} __do_const;
83766
83767 struct iovec;
83768 struct kvec;
83769diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
83770index 52fd8e8..19430a1 100644
83771--- a/include/linux/netdevice.h
83772+++ b/include/linux/netdevice.h
83773@@ -1191,6 +1191,7 @@ struct net_device_ops {
83774 u8 state);
83775 #endif
83776 };
83777+typedef struct net_device_ops __no_const net_device_ops_no_const;
83778
83779 /**
83780 * enum net_device_priv_flags - &struct net_device priv_flags
83781@@ -1537,10 +1538,10 @@ struct net_device {
83782
83783 struct net_device_stats stats;
83784
83785- atomic_long_t rx_dropped;
83786- atomic_long_t tx_dropped;
83787+ atomic_long_unchecked_t rx_dropped;
83788+ atomic_long_unchecked_t tx_dropped;
83789
83790- atomic_t carrier_changes;
83791+ atomic_unchecked_t carrier_changes;
83792
83793 #ifdef CONFIG_WIRELESS_EXT
83794 const struct iw_handler_def * wireless_handlers;
83795diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
83796index 2517ece..0bbfcfb 100644
83797--- a/include/linux/netfilter.h
83798+++ b/include/linux/netfilter.h
83799@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
83800 #endif
83801 /* Use the module struct to lock set/get code in place */
83802 struct module *owner;
83803-};
83804+} __do_const;
83805
83806 /* Function to register/unregister hook points. */
83807 int nf_register_hook(struct nf_hook_ops *reg);
83808diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
83809index e955d47..04a5338 100644
83810--- a/include/linux/netfilter/nfnetlink.h
83811+++ b/include/linux/netfilter/nfnetlink.h
83812@@ -19,7 +19,7 @@ struct nfnl_callback {
83813 const struct nlattr * const cda[]);
83814 const struct nla_policy *policy; /* netlink attribute policy */
83815 const u_int16_t attr_count; /* number of nlattr's */
83816-};
83817+} __do_const;
83818
83819 struct nfnetlink_subsystem {
83820 const char *name;
83821diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
83822new file mode 100644
83823index 0000000..33f4af8
83824--- /dev/null
83825+++ b/include/linux/netfilter/xt_gradm.h
83826@@ -0,0 +1,9 @@
83827+#ifndef _LINUX_NETFILTER_XT_GRADM_H
83828+#define _LINUX_NETFILTER_XT_GRADM_H 1
83829+
83830+struct xt_gradm_mtinfo {
83831+ __u16 flags;
83832+ __u16 invflags;
83833+};
83834+
83835+#endif
83836diff --git a/include/linux/nls.h b/include/linux/nls.h
83837index 520681b..2b7fabb 100644
83838--- a/include/linux/nls.h
83839+++ b/include/linux/nls.h
83840@@ -31,7 +31,7 @@ struct nls_table {
83841 const unsigned char *charset2upper;
83842 struct module *owner;
83843 struct nls_table *next;
83844-};
83845+} __do_const;
83846
83847 /* this value hold the maximum octet of charset */
83848 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
83849@@ -46,7 +46,7 @@ enum utf16_endian {
83850 /* nls_base.c */
83851 extern int __register_nls(struct nls_table *, struct module *);
83852 extern int unregister_nls(struct nls_table *);
83853-extern struct nls_table *load_nls(char *);
83854+extern struct nls_table *load_nls(const char *);
83855 extern void unload_nls(struct nls_table *);
83856 extern struct nls_table *load_nls_default(void);
83857 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
83858diff --git a/include/linux/notifier.h b/include/linux/notifier.h
83859index d14a4c3..a078786 100644
83860--- a/include/linux/notifier.h
83861+++ b/include/linux/notifier.h
83862@@ -54,7 +54,8 @@ struct notifier_block {
83863 notifier_fn_t notifier_call;
83864 struct notifier_block __rcu *next;
83865 int priority;
83866-};
83867+} __do_const;
83868+typedef struct notifier_block __no_const notifier_block_no_const;
83869
83870 struct atomic_notifier_head {
83871 spinlock_t lock;
83872diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
83873index b2a0f15..4d7da32 100644
83874--- a/include/linux/oprofile.h
83875+++ b/include/linux/oprofile.h
83876@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
83877 int oprofilefs_create_ro_ulong(struct dentry * root,
83878 char const * name, ulong * val);
83879
83880-/** Create a file for read-only access to an atomic_t. */
83881+/** Create a file for read-only access to an atomic_unchecked_t. */
83882 int oprofilefs_create_ro_atomic(struct dentry * root,
83883- char const * name, atomic_t * val);
83884+ char const * name, atomic_unchecked_t * val);
83885
83886 /** create a directory */
83887 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
83888diff --git a/include/linux/padata.h b/include/linux/padata.h
83889index 4386946..f50c615 100644
83890--- a/include/linux/padata.h
83891+++ b/include/linux/padata.h
83892@@ -129,7 +129,7 @@ struct parallel_data {
83893 struct padata_serial_queue __percpu *squeue;
83894 atomic_t reorder_objects;
83895 atomic_t refcnt;
83896- atomic_t seq_nr;
83897+ atomic_unchecked_t seq_nr;
83898 struct padata_cpumask cpumask;
83899 spinlock_t lock ____cacheline_aligned;
83900 unsigned int processed;
83901diff --git a/include/linux/path.h b/include/linux/path.h
83902index d137218..be0c176 100644
83903--- a/include/linux/path.h
83904+++ b/include/linux/path.h
83905@@ -1,13 +1,15 @@
83906 #ifndef _LINUX_PATH_H
83907 #define _LINUX_PATH_H
83908
83909+#include <linux/compiler.h>
83910+
83911 struct dentry;
83912 struct vfsmount;
83913
83914 struct path {
83915 struct vfsmount *mnt;
83916 struct dentry *dentry;
83917-};
83918+} __randomize_layout;
83919
83920 extern void path_get(const struct path *);
83921 extern void path_put(const struct path *);
83922diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
83923index 8c78950..0d74ed9 100644
83924--- a/include/linux/pci_hotplug.h
83925+++ b/include/linux/pci_hotplug.h
83926@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
83927 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
83928 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
83929 int (*reset_slot) (struct hotplug_slot *slot, int probe);
83930-};
83931+} __do_const;
83932+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
83933
83934 /**
83935 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
83936diff --git a/include/linux/percpu.h b/include/linux/percpu.h
83937index caebf2a..4c3ae9d 100644
83938--- a/include/linux/percpu.h
83939+++ b/include/linux/percpu.h
83940@@ -34,7 +34,7 @@
83941 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
83942 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
83943 */
83944-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
83945+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
83946 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
83947
83948 /*
83949diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
83950index 664de5a..b3e1bf4 100644
83951--- a/include/linux/perf_event.h
83952+++ b/include/linux/perf_event.h
83953@@ -336,8 +336,8 @@ struct perf_event {
83954
83955 enum perf_event_active_state state;
83956 unsigned int attach_state;
83957- local64_t count;
83958- atomic64_t child_count;
83959+ local64_t count; /* PaX: fix it one day */
83960+ atomic64_unchecked_t child_count;
83961
83962 /*
83963 * These are the total time in nanoseconds that the event
83964@@ -388,8 +388,8 @@ struct perf_event {
83965 * These accumulate total time (in nanoseconds) that children
83966 * events have been enabled and running, respectively.
83967 */
83968- atomic64_t child_total_time_enabled;
83969- atomic64_t child_total_time_running;
83970+ atomic64_unchecked_t child_total_time_enabled;
83971+ atomic64_unchecked_t child_total_time_running;
83972
83973 /*
83974 * Protect attach/detach and child_list:
83975@@ -733,7 +733,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
83976 entry->ip[entry->nr++] = ip;
83977 }
83978
83979-extern int sysctl_perf_event_paranoid;
83980+extern int sysctl_perf_event_legitimately_concerned;
83981 extern int sysctl_perf_event_mlock;
83982 extern int sysctl_perf_event_sample_rate;
83983 extern int sysctl_perf_cpu_time_max_percent;
83984@@ -748,19 +748,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
83985 loff_t *ppos);
83986
83987
83988+static inline bool perf_paranoid_any(void)
83989+{
83990+ return sysctl_perf_event_legitimately_concerned > 2;
83991+}
83992+
83993 static inline bool perf_paranoid_tracepoint_raw(void)
83994 {
83995- return sysctl_perf_event_paranoid > -1;
83996+ return sysctl_perf_event_legitimately_concerned > -1;
83997 }
83998
83999 static inline bool perf_paranoid_cpu(void)
84000 {
84001- return sysctl_perf_event_paranoid > 0;
84002+ return sysctl_perf_event_legitimately_concerned > 0;
84003 }
84004
84005 static inline bool perf_paranoid_kernel(void)
84006 {
84007- return sysctl_perf_event_paranoid > 1;
84008+ return sysctl_perf_event_legitimately_concerned > 1;
84009 }
84010
84011 extern void perf_event_init(void);
84012@@ -891,7 +896,7 @@ struct perf_pmu_events_attr {
84013 struct device_attribute attr;
84014 u64 id;
84015 const char *event_str;
84016-};
84017+} __do_const;
84018
84019 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
84020 static struct perf_pmu_events_attr _var = { \
84021diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
84022index b9cf6c5..5462472 100644
84023--- a/include/linux/pid_namespace.h
84024+++ b/include/linux/pid_namespace.h
84025@@ -45,7 +45,7 @@ struct pid_namespace {
84026 int hide_pid;
84027 int reboot; /* group exit code if this pidns was rebooted */
84028 struct ns_common ns;
84029-};
84030+} __randomize_layout;
84031
84032 extern struct pid_namespace init_pid_ns;
84033
84034diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
84035index eb8b8ac..62649e1 100644
84036--- a/include/linux/pipe_fs_i.h
84037+++ b/include/linux/pipe_fs_i.h
84038@@ -47,10 +47,10 @@ struct pipe_inode_info {
84039 struct mutex mutex;
84040 wait_queue_head_t wait;
84041 unsigned int nrbufs, curbuf, buffers;
84042- unsigned int readers;
84043- unsigned int writers;
84044- unsigned int files;
84045- unsigned int waiting_writers;
84046+ atomic_t readers;
84047+ atomic_t writers;
84048+ atomic_t files;
84049+ atomic_t waiting_writers;
84050 unsigned int r_counter;
84051 unsigned int w_counter;
84052 struct page *tmp_page;
84053diff --git a/include/linux/pm.h b/include/linux/pm.h
84054index 8b59763..8a05939 100644
84055--- a/include/linux/pm.h
84056+++ b/include/linux/pm.h
84057@@ -608,6 +608,7 @@ struct dev_pm_domain {
84058 struct dev_pm_ops ops;
84059 void (*detach)(struct device *dev, bool power_off);
84060 };
84061+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
84062
84063 /*
84064 * The PM_EVENT_ messages are also used by drivers implementing the legacy
84065diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
84066index a9edab2..8bada56 100644
84067--- a/include/linux/pm_domain.h
84068+++ b/include/linux/pm_domain.h
84069@@ -39,11 +39,11 @@ struct gpd_dev_ops {
84070 int (*save_state)(struct device *dev);
84071 int (*restore_state)(struct device *dev);
84072 bool (*active_wakeup)(struct device *dev);
84073-};
84074+} __no_const;
84075
84076 struct gpd_cpuidle_data {
84077 unsigned int saved_exit_latency;
84078- struct cpuidle_state *idle_state;
84079+ cpuidle_state_no_const *idle_state;
84080 };
84081
84082 struct generic_pm_domain {
84083diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
84084index 30e84d4..22278b4 100644
84085--- a/include/linux/pm_runtime.h
84086+++ b/include/linux/pm_runtime.h
84087@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
84088
84089 static inline void pm_runtime_mark_last_busy(struct device *dev)
84090 {
84091- ACCESS_ONCE(dev->power.last_busy) = jiffies;
84092+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
84093 }
84094
84095 static inline bool pm_runtime_is_irq_safe(struct device *dev)
84096diff --git a/include/linux/pnp.h b/include/linux/pnp.h
84097index 195aafc..49a7bc2 100644
84098--- a/include/linux/pnp.h
84099+++ b/include/linux/pnp.h
84100@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
84101 struct pnp_fixup {
84102 char id[7];
84103 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
84104-};
84105+} __do_const;
84106
84107 /* config parameters */
84108 #define PNP_CONFIG_NORMAL 0x0001
84109diff --git a/include/linux/poison.h b/include/linux/poison.h
84110index 2110a81..13a11bb 100644
84111--- a/include/linux/poison.h
84112+++ b/include/linux/poison.h
84113@@ -19,8 +19,8 @@
84114 * under normal circumstances, used to verify that nobody uses
84115 * non-initialized list entries.
84116 */
84117-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
84118-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
84119+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
84120+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
84121
84122 /********** include/linux/timer.h **********/
84123 /*
84124diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
84125index d8b187c3..9a9257a 100644
84126--- a/include/linux/power/smartreflex.h
84127+++ b/include/linux/power/smartreflex.h
84128@@ -238,7 +238,7 @@ struct omap_sr_class_data {
84129 int (*notify)(struct omap_sr *sr, u32 status);
84130 u8 notify_flags;
84131 u8 class_type;
84132-};
84133+} __do_const;
84134
84135 /**
84136 * struct omap_sr_nvalue_table - Smartreflex n-target value info
84137diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
84138index 4ea1d37..80f4b33 100644
84139--- a/include/linux/ppp-comp.h
84140+++ b/include/linux/ppp-comp.h
84141@@ -84,7 +84,7 @@ struct compressor {
84142 struct module *owner;
84143 /* Extra skb space needed by the compressor algorithm */
84144 unsigned int comp_extra;
84145-};
84146+} __do_const;
84147
84148 /*
84149 * The return value from decompress routine is the length of the
84150diff --git a/include/linux/preempt.h b/include/linux/preempt.h
84151index de83b4e..c4b997d 100644
84152--- a/include/linux/preempt.h
84153+++ b/include/linux/preempt.h
84154@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
84155 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
84156 #endif
84157
84158+#define raw_preempt_count_add(val) __preempt_count_add(val)
84159+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
84160+
84161 #define __preempt_count_inc() __preempt_count_add(1)
84162 #define __preempt_count_dec() __preempt_count_sub(1)
84163
84164 #define preempt_count_inc() preempt_count_add(1)
84165+#define raw_preempt_count_inc() raw_preempt_count_add(1)
84166 #define preempt_count_dec() preempt_count_sub(1)
84167+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
84168
84169 #ifdef CONFIG_PREEMPT_COUNT
84170
84171@@ -41,6 +46,12 @@ do { \
84172 barrier(); \
84173 } while (0)
84174
84175+#define raw_preempt_disable() \
84176+do { \
84177+ raw_preempt_count_inc(); \
84178+ barrier(); \
84179+} while (0)
84180+
84181 #define sched_preempt_enable_no_resched() \
84182 do { \
84183 barrier(); \
84184@@ -49,6 +60,12 @@ do { \
84185
84186 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
84187
84188+#define raw_preempt_enable_no_resched() \
84189+do { \
84190+ barrier(); \
84191+ raw_preempt_count_dec(); \
84192+} while (0)
84193+
84194 #ifdef CONFIG_PREEMPT
84195 #define preempt_enable() \
84196 do { \
84197@@ -113,8 +130,10 @@ do { \
84198 * region.
84199 */
84200 #define preempt_disable() barrier()
84201+#define raw_preempt_disable() barrier()
84202 #define sched_preempt_enable_no_resched() barrier()
84203 #define preempt_enable_no_resched() barrier()
84204+#define raw_preempt_enable_no_resched() barrier()
84205 #define preempt_enable() barrier()
84206 #define preempt_check_resched() do { } while (0)
84207
84208@@ -128,11 +147,13 @@ do { \
84209 /*
84210 * Modules have no business playing preemption tricks.
84211 */
84212+#ifndef CONFIG_PAX_KERNEXEC
84213 #undef sched_preempt_enable_no_resched
84214 #undef preempt_enable_no_resched
84215 #undef preempt_enable_no_resched_notrace
84216 #undef preempt_check_resched
84217 #endif
84218+#endif
84219
84220 #define preempt_set_need_resched() \
84221 do { \
84222diff --git a/include/linux/printk.h b/include/linux/printk.h
84223index 4d5bf57..d94eccf 100644
84224--- a/include/linux/printk.h
84225+++ b/include/linux/printk.h
84226@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
84227 #endif
84228
84229 typedef int(*printk_func_t)(const char *fmt, va_list args);
84230+extern int kptr_restrict;
84231
84232 #ifdef CONFIG_PRINTK
84233 asmlinkage __printf(5, 0)
84234@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
84235
84236 extern int printk_delay_msec;
84237 extern int dmesg_restrict;
84238-extern int kptr_restrict;
84239
84240 extern void wake_up_klogd(void);
84241
84242diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
84243index b97bf2e..f14c92d4 100644
84244--- a/include/linux/proc_fs.h
84245+++ b/include/linux/proc_fs.h
84246@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
84247 extern struct proc_dir_entry *proc_symlink(const char *,
84248 struct proc_dir_entry *, const char *);
84249 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
84250+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
84251 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
84252 struct proc_dir_entry *, void *);
84253+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
84254+ struct proc_dir_entry *, void *);
84255 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
84256 struct proc_dir_entry *);
84257
84258@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
84259 return proc_create_data(name, mode, parent, proc_fops, NULL);
84260 }
84261
84262+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
84263+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
84264+{
84265+#ifdef CONFIG_GRKERNSEC_PROC_USER
84266+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
84267+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84268+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
84269+#else
84270+ return proc_create_data(name, mode, parent, proc_fops, NULL);
84271+#endif
84272+}
84273+
84274+
84275 extern void proc_set_size(struct proc_dir_entry *, loff_t);
84276 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
84277 extern void *PDE_DATA(const struct inode *);
84278@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
84279 struct proc_dir_entry *parent,const char *dest) { return NULL;}
84280 static inline struct proc_dir_entry *proc_mkdir(const char *name,
84281 struct proc_dir_entry *parent) {return NULL;}
84282+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
84283+ struct proc_dir_entry *parent) { return NULL; }
84284 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
84285 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84286+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
84287+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84288 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
84289 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
84290 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
84291@@ -79,7 +99,7 @@ struct net;
84292 static inline struct proc_dir_entry *proc_net_mkdir(
84293 struct net *net, const char *name, struct proc_dir_entry *parent)
84294 {
84295- return proc_mkdir_data(name, 0, parent, net);
84296+ return proc_mkdir_data_restrict(name, 0, parent, net);
84297 }
84298
84299 #endif /* _LINUX_PROC_FS_H */
84300diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
84301index 42dfc61..8113a99 100644
84302--- a/include/linux/proc_ns.h
84303+++ b/include/linux/proc_ns.h
84304@@ -16,7 +16,7 @@ struct proc_ns_operations {
84305 struct ns_common *(*get)(struct task_struct *task);
84306 void (*put)(struct ns_common *ns);
84307 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
84308-};
84309+} __do_const __randomize_layout;
84310
84311 extern const struct proc_ns_operations netns_operations;
84312 extern const struct proc_ns_operations utsns_operations;
84313diff --git a/include/linux/quota.h b/include/linux/quota.h
84314index b86df49..8002997 100644
84315--- a/include/linux/quota.h
84316+++ b/include/linux/quota.h
84317@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
84318
84319 extern bool qid_eq(struct kqid left, struct kqid right);
84320 extern bool qid_lt(struct kqid left, struct kqid right);
84321-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
84322+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
84323 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
84324 extern bool qid_valid(struct kqid qid);
84325
84326diff --git a/include/linux/random.h b/include/linux/random.h
84327index b05856e..0a9f14e 100644
84328--- a/include/linux/random.h
84329+++ b/include/linux/random.h
84330@@ -9,9 +9,19 @@
84331 #include <uapi/linux/random.h>
84332
84333 extern void add_device_randomness(const void *, unsigned int);
84334+
84335+static inline void add_latent_entropy(void)
84336+{
84337+
84338+#ifdef LATENT_ENTROPY_PLUGIN
84339+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84340+#endif
84341+
84342+}
84343+
84344 extern void add_input_randomness(unsigned int type, unsigned int code,
84345- unsigned int value);
84346-extern void add_interrupt_randomness(int irq, int irq_flags);
84347+ unsigned int value) __latent_entropy;
84348+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
84349
84350 extern void get_random_bytes(void *buf, int nbytes);
84351 extern void get_random_bytes_arch(void *buf, int nbytes);
84352@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
84353 extern const struct file_operations random_fops, urandom_fops;
84354 #endif
84355
84356-unsigned int get_random_int(void);
84357+unsigned int __intentional_overflow(-1) get_random_int(void);
84358 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
84359
84360-u32 prandom_u32(void);
84361+u32 prandom_u32(void) __intentional_overflow(-1);
84362 void prandom_bytes(void *buf, size_t nbytes);
84363 void prandom_seed(u32 seed);
84364 void prandom_reseed_late(void);
84365@@ -37,6 +47,11 @@ struct rnd_state {
84366 u32 prandom_u32_state(struct rnd_state *state);
84367 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84368
84369+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
84370+{
84371+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
84372+}
84373+
84374 /**
84375 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
84376 * @ep_ro: right open interval endpoint
84377@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84378 *
84379 * Returns: pseudo-random number in interval [0, ep_ro)
84380 */
84381-static inline u32 prandom_u32_max(u32 ep_ro)
84382+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
84383 {
84384 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
84385 }
84386diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
84387index 378c5ee..aa84a47 100644
84388--- a/include/linux/rbtree_augmented.h
84389+++ b/include/linux/rbtree_augmented.h
84390@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
84391 old->rbaugmented = rbcompute(old); \
84392 } \
84393 rbstatic const struct rb_augment_callbacks rbname = { \
84394- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
84395+ .propagate = rbname ## _propagate, \
84396+ .copy = rbname ## _copy, \
84397+ .rotate = rbname ## _rotate \
84398 };
84399
84400
84401diff --git a/include/linux/rculist.h b/include/linux/rculist.h
84402index 529bc94..82ce778 100644
84403--- a/include/linux/rculist.h
84404+++ b/include/linux/rculist.h
84405@@ -29,8 +29,8 @@
84406 */
84407 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
84408 {
84409- ACCESS_ONCE(list->next) = list;
84410- ACCESS_ONCE(list->prev) = list;
84411+ ACCESS_ONCE_RW(list->next) = list;
84412+ ACCESS_ONCE_RW(list->prev) = list;
84413 }
84414
84415 /*
84416@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
84417 struct list_head *prev, struct list_head *next);
84418 #endif
84419
84420+void __pax_list_add_rcu(struct list_head *new,
84421+ struct list_head *prev, struct list_head *next);
84422+
84423 /**
84424 * list_add_rcu - add a new entry to rcu-protected list
84425 * @new: new entry to be added
84426@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
84427 __list_add_rcu(new, head, head->next);
84428 }
84429
84430+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
84431+{
84432+ __pax_list_add_rcu(new, head, head->next);
84433+}
84434+
84435 /**
84436 * list_add_tail_rcu - add a new entry to rcu-protected list
84437 * @new: new entry to be added
84438@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
84439 __list_add_rcu(new, head->prev, head);
84440 }
84441
84442+static inline void pax_list_add_tail_rcu(struct list_head *new,
84443+ struct list_head *head)
84444+{
84445+ __pax_list_add_rcu(new, head->prev, head);
84446+}
84447+
84448 /**
84449 * list_del_rcu - deletes entry from list without re-initialization
84450 * @entry: the element to delete from the list.
84451@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
84452 entry->prev = LIST_POISON2;
84453 }
84454
84455+extern void pax_list_del_rcu(struct list_head *entry);
84456+
84457 /**
84458 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
84459 * @n: the element to delete from the hash list.
84460diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
84461index ed4f593..8a51501 100644
84462--- a/include/linux/rcupdate.h
84463+++ b/include/linux/rcupdate.h
84464@@ -332,7 +332,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
84465 #define rcu_note_voluntary_context_switch(t) \
84466 do { \
84467 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
84468- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
84469+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
84470 } while (0)
84471 #else /* #ifdef CONFIG_TASKS_RCU */
84472 #define TASKS_RCU(x) do { } while (0)
84473diff --git a/include/linux/reboot.h b/include/linux/reboot.h
84474index 67fc8fc..a90f7d8 100644
84475--- a/include/linux/reboot.h
84476+++ b/include/linux/reboot.h
84477@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
84478 */
84479
84480 extern void migrate_to_reboot_cpu(void);
84481-extern void machine_restart(char *cmd);
84482-extern void machine_halt(void);
84483-extern void machine_power_off(void);
84484+extern void machine_restart(char *cmd) __noreturn;
84485+extern void machine_halt(void) __noreturn;
84486+extern void machine_power_off(void) __noreturn;
84487
84488 extern void machine_shutdown(void);
84489 struct pt_regs;
84490@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
84491 */
84492
84493 extern void kernel_restart_prepare(char *cmd);
84494-extern void kernel_restart(char *cmd);
84495-extern void kernel_halt(void);
84496-extern void kernel_power_off(void);
84497+extern void kernel_restart(char *cmd) __noreturn;
84498+extern void kernel_halt(void) __noreturn;
84499+extern void kernel_power_off(void) __noreturn;
84500
84501 extern int C_A_D; /* for sysctl */
84502 void ctrl_alt_del(void);
84503@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
84504 * Emergency restart, callable from an interrupt handler.
84505 */
84506
84507-extern void emergency_restart(void);
84508+extern void emergency_restart(void) __noreturn;
84509 #include <asm/emergency-restart.h>
84510
84511 #endif /* _LINUX_REBOOT_H */
84512diff --git a/include/linux/regset.h b/include/linux/regset.h
84513index 8e0c9fe..ac4d221 100644
84514--- a/include/linux/regset.h
84515+++ b/include/linux/regset.h
84516@@ -161,7 +161,8 @@ struct user_regset {
84517 unsigned int align;
84518 unsigned int bias;
84519 unsigned int core_note_type;
84520-};
84521+} __do_const;
84522+typedef struct user_regset __no_const user_regset_no_const;
84523
84524 /**
84525 * struct user_regset_view - available regsets
84526diff --git a/include/linux/relay.h b/include/linux/relay.h
84527index d7c8359..818daf5 100644
84528--- a/include/linux/relay.h
84529+++ b/include/linux/relay.h
84530@@ -157,7 +157,7 @@ struct rchan_callbacks
84531 * The callback should return 0 if successful, negative if not.
84532 */
84533 int (*remove_buf_file)(struct dentry *dentry);
84534-};
84535+} __no_const;
84536
84537 /*
84538 * CONFIG_RELAY kernel API, kernel/relay.c
84539diff --git a/include/linux/rio.h b/include/linux/rio.h
84540index 6bda06f..bf39a9b 100644
84541--- a/include/linux/rio.h
84542+++ b/include/linux/rio.h
84543@@ -358,7 +358,7 @@ struct rio_ops {
84544 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
84545 u64 rstart, u32 size, u32 flags);
84546 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
84547-};
84548+} __no_const;
84549
84550 #define RIO_RESOURCE_MEM 0x00000100
84551 #define RIO_RESOURCE_DOORBELL 0x00000200
84552diff --git a/include/linux/rmap.h b/include/linux/rmap.h
84553index d9d7e7e..86f47ac 100644
84554--- a/include/linux/rmap.h
84555+++ b/include/linux/rmap.h
84556@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
84557 void anon_vma_init(void); /* create anon_vma_cachep */
84558 int anon_vma_prepare(struct vm_area_struct *);
84559 void unlink_anon_vmas(struct vm_area_struct *);
84560-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
84561-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
84562+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
84563+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
84564
84565 static inline void anon_vma_merge(struct vm_area_struct *vma,
84566 struct vm_area_struct *next)
84567diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
84568index ed8f9e7..999bc96 100644
84569--- a/include/linux/scatterlist.h
84570+++ b/include/linux/scatterlist.h
84571@@ -1,6 +1,7 @@
84572 #ifndef _LINUX_SCATTERLIST_H
84573 #define _LINUX_SCATTERLIST_H
84574
84575+#include <linux/sched.h>
84576 #include <linux/string.h>
84577 #include <linux/bug.h>
84578 #include <linux/mm.h>
84579@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
84580 #ifdef CONFIG_DEBUG_SG
84581 BUG_ON(!virt_addr_valid(buf));
84582 #endif
84583+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84584+ if (object_starts_on_stack(buf)) {
84585+ void *adjbuf = buf - current->stack + current->lowmem_stack;
84586+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
84587+ } else
84588+#endif
84589 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
84590 }
84591
84592diff --git a/include/linux/sched.h b/include/linux/sched.h
84593index 8db31ef..0af1f81 100644
84594--- a/include/linux/sched.h
84595+++ b/include/linux/sched.h
84596@@ -133,6 +133,7 @@ struct fs_struct;
84597 struct perf_event_context;
84598 struct blk_plug;
84599 struct filename;
84600+struct linux_binprm;
84601
84602 #define VMACACHE_BITS 2
84603 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
84604@@ -415,7 +416,7 @@ extern char __sched_text_start[], __sched_text_end[];
84605 extern int in_sched_functions(unsigned long addr);
84606
84607 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
84608-extern signed long schedule_timeout(signed long timeout);
84609+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
84610 extern signed long schedule_timeout_interruptible(signed long timeout);
84611 extern signed long schedule_timeout_killable(signed long timeout);
84612 extern signed long schedule_timeout_uninterruptible(signed long timeout);
84613@@ -426,6 +427,19 @@ struct nsproxy;
84614 struct user_namespace;
84615
84616 #ifdef CONFIG_MMU
84617+
84618+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
84619+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
84620+#else
84621+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
84622+{
84623+ return 0;
84624+}
84625+#endif
84626+
84627+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
84628+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
84629+
84630 extern void arch_pick_mmap_layout(struct mm_struct *mm);
84631 extern unsigned long
84632 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
84633@@ -724,6 +738,17 @@ struct signal_struct {
84634 #ifdef CONFIG_TASKSTATS
84635 struct taskstats *stats;
84636 #endif
84637+
84638+#ifdef CONFIG_GRKERNSEC
84639+ u32 curr_ip;
84640+ u32 saved_ip;
84641+ u32 gr_saddr;
84642+ u32 gr_daddr;
84643+ u16 gr_sport;
84644+ u16 gr_dport;
84645+ u8 used_accept:1;
84646+#endif
84647+
84648 #ifdef CONFIG_AUDIT
84649 unsigned audit_tty;
84650 unsigned audit_tty_log_passwd;
84651@@ -750,7 +775,7 @@ struct signal_struct {
84652 struct mutex cred_guard_mutex; /* guard against foreign influences on
84653 * credential calculations
84654 * (notably. ptrace) */
84655-};
84656+} __randomize_layout;
84657
84658 /*
84659 * Bits in flags field of signal_struct.
84660@@ -803,6 +828,14 @@ struct user_struct {
84661 struct key *session_keyring; /* UID's default session keyring */
84662 #endif
84663
84664+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
84665+ unsigned char kernel_banned;
84666+#endif
84667+#ifdef CONFIG_GRKERNSEC_BRUTE
84668+ unsigned char suid_banned;
84669+ unsigned long suid_ban_expires;
84670+#endif
84671+
84672 /* Hash table maintenance information */
84673 struct hlist_node uidhash_node;
84674 kuid_t uid;
84675@@ -810,7 +843,7 @@ struct user_struct {
84676 #ifdef CONFIG_PERF_EVENTS
84677 atomic_long_t locked_vm;
84678 #endif
84679-};
84680+} __randomize_layout;
84681
84682 extern int uids_sysfs_init(void);
84683
84684@@ -1274,6 +1307,9 @@ enum perf_event_task_context {
84685 struct task_struct {
84686 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
84687 void *stack;
84688+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84689+ void *lowmem_stack;
84690+#endif
84691 atomic_t usage;
84692 unsigned int flags; /* per process flags, defined below */
84693 unsigned int ptrace;
84694@@ -1405,8 +1441,8 @@ struct task_struct {
84695 struct list_head thread_node;
84696
84697 struct completion *vfork_done; /* for vfork() */
84698- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
84699- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84700+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
84701+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84702
84703 cputime_t utime, stime, utimescaled, stimescaled;
84704 cputime_t gtime;
84705@@ -1431,11 +1467,6 @@ struct task_struct {
84706 struct task_cputime cputime_expires;
84707 struct list_head cpu_timers[3];
84708
84709-/* process credentials */
84710- const struct cred __rcu *real_cred; /* objective and real subjective task
84711- * credentials (COW) */
84712- const struct cred __rcu *cred; /* effective (overridable) subjective task
84713- * credentials (COW) */
84714 char comm[TASK_COMM_LEN]; /* executable name excluding path
84715 - access with [gs]et_task_comm (which lock
84716 it with task_lock())
84717@@ -1453,6 +1484,10 @@ struct task_struct {
84718 #endif
84719 /* CPU-specific state of this task */
84720 struct thread_struct thread;
84721+/* thread_info moved to task_struct */
84722+#ifdef CONFIG_X86
84723+ struct thread_info tinfo;
84724+#endif
84725 /* filesystem information */
84726 struct fs_struct *fs;
84727 /* open file information */
84728@@ -1527,6 +1562,10 @@ struct task_struct {
84729 gfp_t lockdep_reclaim_gfp;
84730 #endif
84731
84732+/* process credentials */
84733+ const struct cred __rcu *real_cred; /* objective and real subjective task
84734+ * credentials (COW) */
84735+
84736 /* journalling filesystem info */
84737 void *journal_info;
84738
84739@@ -1565,6 +1604,10 @@ struct task_struct {
84740 /* cg_list protected by css_set_lock and tsk->alloc_lock */
84741 struct list_head cg_list;
84742 #endif
84743+
84744+ const struct cred __rcu *cred; /* effective (overridable) subjective task
84745+ * credentials (COW) */
84746+
84747 #ifdef CONFIG_FUTEX
84748 struct robust_list_head __user *robust_list;
84749 #ifdef CONFIG_COMPAT
84750@@ -1673,7 +1716,7 @@ struct task_struct {
84751 * Number of functions that haven't been traced
84752 * because of depth overrun.
84753 */
84754- atomic_t trace_overrun;
84755+ atomic_unchecked_t trace_overrun;
84756 /* Pause for the tracing */
84757 atomic_t tracing_graph_pause;
84758 #endif
84759@@ -1701,7 +1744,78 @@ struct task_struct {
84760 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
84761 unsigned long task_state_change;
84762 #endif
84763-};
84764+
84765+#ifdef CONFIG_GRKERNSEC
84766+ /* grsecurity */
84767+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
84768+ u64 exec_id;
84769+#endif
84770+#ifdef CONFIG_GRKERNSEC_SETXID
84771+ const struct cred *delayed_cred;
84772+#endif
84773+ struct dentry *gr_chroot_dentry;
84774+ struct acl_subject_label *acl;
84775+ struct acl_subject_label *tmpacl;
84776+ struct acl_role_label *role;
84777+ struct file *exec_file;
84778+ unsigned long brute_expires;
84779+ u16 acl_role_id;
84780+ u8 inherited;
84781+ /* is this the task that authenticated to the special role */
84782+ u8 acl_sp_role;
84783+ u8 is_writable;
84784+ u8 brute;
84785+ u8 gr_is_chrooted;
84786+#endif
84787+
84788+} __randomize_layout;
84789+
84790+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
84791+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
84792+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
84793+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
84794+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
84795+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
84796+
84797+#ifdef CONFIG_PAX_SOFTMODE
84798+extern int pax_softmode;
84799+#endif
84800+
84801+extern int pax_check_flags(unsigned long *);
84802+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
84803+
84804+/* if tsk != current then task_lock must be held on it */
84805+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
84806+static inline unsigned long pax_get_flags(struct task_struct *tsk)
84807+{
84808+ if (likely(tsk->mm))
84809+ return tsk->mm->pax_flags;
84810+ else
84811+ return 0UL;
84812+}
84813+
84814+/* if tsk != current then task_lock must be held on it */
84815+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
84816+{
84817+ if (likely(tsk->mm)) {
84818+ tsk->mm->pax_flags = flags;
84819+ return 0;
84820+ }
84821+ return -EINVAL;
84822+}
84823+#endif
84824+
84825+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
84826+extern void pax_set_initial_flags(struct linux_binprm *bprm);
84827+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
84828+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
84829+#endif
84830+
84831+struct path;
84832+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
84833+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
84834+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
84835+extern void pax_report_refcount_overflow(struct pt_regs *regs);
84836
84837 /* Future-safe accessor for struct task_struct's cpus_allowed. */
84838 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
84839@@ -1783,7 +1897,7 @@ struct pid_namespace;
84840 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
84841 struct pid_namespace *ns);
84842
84843-static inline pid_t task_pid_nr(struct task_struct *tsk)
84844+static inline pid_t task_pid_nr(const struct task_struct *tsk)
84845 {
84846 return tsk->pid;
84847 }
84848@@ -2150,6 +2264,25 @@ extern u64 sched_clock_cpu(int cpu);
84849
84850 extern void sched_clock_init(void);
84851
84852+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84853+static inline void populate_stack(void)
84854+{
84855+ struct task_struct *curtask = current;
84856+ int c;
84857+ int *ptr = curtask->stack;
84858+ int *end = curtask->stack + THREAD_SIZE;
84859+
84860+ while (ptr < end) {
84861+ c = *(volatile int *)ptr;
84862+ ptr += PAGE_SIZE/sizeof(int);
84863+ }
84864+}
84865+#else
84866+static inline void populate_stack(void)
84867+{
84868+}
84869+#endif
84870+
84871 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
84872 static inline void sched_clock_tick(void)
84873 {
84874@@ -2283,7 +2416,9 @@ void yield(void);
84875 extern struct exec_domain default_exec_domain;
84876
84877 union thread_union {
84878+#ifndef CONFIG_X86
84879 struct thread_info thread_info;
84880+#endif
84881 unsigned long stack[THREAD_SIZE/sizeof(long)];
84882 };
84883
84884@@ -2316,6 +2451,7 @@ extern struct pid_namespace init_pid_ns;
84885 */
84886
84887 extern struct task_struct *find_task_by_vpid(pid_t nr);
84888+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
84889 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
84890 struct pid_namespace *ns);
84891
84892@@ -2480,7 +2616,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
84893 extern void exit_itimers(struct signal_struct *);
84894 extern void flush_itimer_signals(void);
84895
84896-extern void do_group_exit(int);
84897+extern __noreturn void do_group_exit(int);
84898
84899 extern int do_execve(struct filename *,
84900 const char __user * const __user *,
84901@@ -2701,9 +2837,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
84902 #define task_stack_end_corrupted(task) \
84903 (*(end_of_stack(task)) != STACK_END_MAGIC)
84904
84905-static inline int object_is_on_stack(void *obj)
84906+static inline int object_starts_on_stack(const void *obj)
84907 {
84908- void *stack = task_stack_page(current);
84909+ const void *stack = task_stack_page(current);
84910
84911 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
84912 }
84913diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
84914index 596a0e0..bea77ec 100644
84915--- a/include/linux/sched/sysctl.h
84916+++ b/include/linux/sched/sysctl.h
84917@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
84918 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
84919
84920 extern int sysctl_max_map_count;
84921+extern unsigned long sysctl_heap_stack_gap;
84922
84923 extern unsigned int sysctl_sched_latency;
84924 extern unsigned int sysctl_sched_min_granularity;
84925diff --git a/include/linux/security.h b/include/linux/security.h
84926index ba96471..74fb3f6 100644
84927--- a/include/linux/security.h
84928+++ b/include/linux/security.h
84929@@ -27,6 +27,7 @@
84930 #include <linux/slab.h>
84931 #include <linux/err.h>
84932 #include <linux/string.h>
84933+#include <linux/grsecurity.h>
84934
84935 struct linux_binprm;
84936 struct cred;
84937@@ -116,8 +117,6 @@ struct seq_file;
84938
84939 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
84940
84941-void reset_security_ops(void);
84942-
84943 #ifdef CONFIG_MMU
84944 extern unsigned long mmap_min_addr;
84945 extern unsigned long dac_mmap_min_addr;
84946@@ -1729,7 +1728,7 @@ struct security_operations {
84947 struct audit_context *actx);
84948 void (*audit_rule_free) (void *lsmrule);
84949 #endif /* CONFIG_AUDIT */
84950-};
84951+} __randomize_layout;
84952
84953 /* prototypes */
84954 extern int security_init(void);
84955diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
84956index dc368b8..e895209 100644
84957--- a/include/linux/semaphore.h
84958+++ b/include/linux/semaphore.h
84959@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
84960 }
84961
84962 extern void down(struct semaphore *sem);
84963-extern int __must_check down_interruptible(struct semaphore *sem);
84964+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
84965 extern int __must_check down_killable(struct semaphore *sem);
84966 extern int __must_check down_trylock(struct semaphore *sem);
84967 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
84968diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
84969index cf6a9da..bd86b1f 100644
84970--- a/include/linux/seq_file.h
84971+++ b/include/linux/seq_file.h
84972@@ -27,6 +27,9 @@ struct seq_file {
84973 struct mutex lock;
84974 const struct seq_operations *op;
84975 int poll_event;
84976+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
84977+ u64 exec_id;
84978+#endif
84979 #ifdef CONFIG_USER_NS
84980 struct user_namespace *user_ns;
84981 #endif
84982@@ -39,6 +42,7 @@ struct seq_operations {
84983 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
84984 int (*show) (struct seq_file *m, void *v);
84985 };
84986+typedef struct seq_operations __no_const seq_operations_no_const;
84987
84988 #define SEQ_SKIP 1
84989
84990@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
84991
84992 char *mangle_path(char *s, const char *p, const char *esc);
84993 int seq_open(struct file *, const struct seq_operations *);
84994+int seq_open_restrict(struct file *, const struct seq_operations *);
84995 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
84996 loff_t seq_lseek(struct file *, loff_t, int);
84997 int seq_release(struct inode *, struct file *);
84998@@ -153,6 +158,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
84999 }
85000
85001 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
85002+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
85003 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
85004 int single_release(struct inode *, struct file *);
85005 void *__seq_open_private(struct file *, const struct seq_operations *, int);
85006diff --git a/include/linux/shm.h b/include/linux/shm.h
85007index 6fb8016..ab4465e 100644
85008--- a/include/linux/shm.h
85009+++ b/include/linux/shm.h
85010@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
85011 /* The task created the shm object. NULL if the task is dead. */
85012 struct task_struct *shm_creator;
85013 struct list_head shm_clist; /* list by creator */
85014+#ifdef CONFIG_GRKERNSEC
85015+ u64 shm_createtime;
85016+ pid_t shm_lapid;
85017+#endif
85018 };
85019
85020 /* shm_mode upper byte flags */
85021diff --git a/include/linux/signal.h b/include/linux/signal.h
85022index ab1e039..ad4229e 100644
85023--- a/include/linux/signal.h
85024+++ b/include/linux/signal.h
85025@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
85026 * know it'll be handled, so that they don't get converted to
85027 * SIGKILL or just silently dropped.
85028 */
85029- kernel_sigaction(sig, (__force __sighandler_t)2);
85030+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
85031 }
85032
85033 static inline void disallow_signal(int sig)
85034diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
85035index 85ab7d7..eb1585a 100644
85036--- a/include/linux/skbuff.h
85037+++ b/include/linux/skbuff.h
85038@@ -763,7 +763,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
85039 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
85040 int node);
85041 struct sk_buff *build_skb(void *data, unsigned int frag_size);
85042-static inline struct sk_buff *alloc_skb(unsigned int size,
85043+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
85044 gfp_t priority)
85045 {
85046 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
85047@@ -1952,7 +1952,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
85048 return skb->inner_transport_header - skb->inner_network_header;
85049 }
85050
85051-static inline int skb_network_offset(const struct sk_buff *skb)
85052+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
85053 {
85054 return skb_network_header(skb) - skb->data;
85055 }
85056@@ -2012,7 +2012,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
85057 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
85058 */
85059 #ifndef NET_SKB_PAD
85060-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
85061+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
85062 #endif
85063
85064 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
85065@@ -2655,9 +2655,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
85066 int *err);
85067 unsigned int datagram_poll(struct file *file, struct socket *sock,
85068 struct poll_table_struct *wait);
85069-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85070+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85071 struct iov_iter *to, int size);
85072-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85073+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85074 struct msghdr *msg, int size)
85075 {
85076 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
85077@@ -3131,6 +3131,9 @@ static inline void nf_reset(struct sk_buff *skb)
85078 nf_bridge_put(skb->nf_bridge);
85079 skb->nf_bridge = NULL;
85080 #endif
85081+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
85082+ skb->nf_trace = 0;
85083+#endif
85084 }
85085
85086 static inline void nf_reset_trace(struct sk_buff *skb)
85087diff --git a/include/linux/slab.h b/include/linux/slab.h
85088index 9a139b6..aab37b4 100644
85089--- a/include/linux/slab.h
85090+++ b/include/linux/slab.h
85091@@ -14,15 +14,29 @@
85092 #include <linux/gfp.h>
85093 #include <linux/types.h>
85094 #include <linux/workqueue.h>
85095-
85096+#include <linux/err.h>
85097
85098 /*
85099 * Flags to pass to kmem_cache_create().
85100 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
85101 */
85102 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
85103+
85104+#ifdef CONFIG_PAX_USERCOPY_SLABS
85105+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
85106+#else
85107+#define SLAB_USERCOPY 0x00000000UL
85108+#endif
85109+
85110 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
85111 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
85112+
85113+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85114+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
85115+#else
85116+#define SLAB_NO_SANITIZE 0x00000000UL
85117+#endif
85118+
85119 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
85120 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
85121 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
85122@@ -98,10 +112,13 @@
85123 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
85124 * Both make kfree a no-op.
85125 */
85126-#define ZERO_SIZE_PTR ((void *)16)
85127+#define ZERO_SIZE_PTR \
85128+({ \
85129+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
85130+ (void *)(-MAX_ERRNO-1L); \
85131+})
85132
85133-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
85134- (unsigned long)ZERO_SIZE_PTR)
85135+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
85136
85137 #include <linux/kmemleak.h>
85138
85139@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
85140 void kfree(const void *);
85141 void kzfree(const void *);
85142 size_t ksize(const void *);
85143+const char *check_heap_object(const void *ptr, unsigned long n);
85144+bool is_usercopy_object(const void *ptr);
85145
85146 /*
85147 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
85148@@ -236,6 +255,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
85149 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85150 #endif
85151
85152+#ifdef CONFIG_PAX_USERCOPY_SLABS
85153+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
85154+#endif
85155+
85156 /*
85157 * Figure out which kmalloc slab an allocation of a certain size
85158 * belongs to.
85159@@ -244,7 +267,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85160 * 2 = 120 .. 192 bytes
85161 * n = 2^(n-1) .. 2^n -1
85162 */
85163-static __always_inline int kmalloc_index(size_t size)
85164+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
85165 {
85166 if (!size)
85167 return 0;
85168@@ -287,14 +310,14 @@ static __always_inline int kmalloc_index(size_t size)
85169 }
85170 #endif /* !CONFIG_SLOB */
85171
85172-void *__kmalloc(size_t size, gfp_t flags);
85173+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
85174 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
85175
85176 #ifdef CONFIG_NUMA
85177-void *__kmalloc_node(size_t size, gfp_t flags, int node);
85178+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
85179 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
85180 #else
85181-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
85182+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
85183 {
85184 return __kmalloc(size, flags);
85185 }
85186diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
85187index b869d16..1453c73 100644
85188--- a/include/linux/slab_def.h
85189+++ b/include/linux/slab_def.h
85190@@ -40,7 +40,7 @@ struct kmem_cache {
85191 /* 4) cache creation/removal */
85192 const char *name;
85193 struct list_head list;
85194- int refcount;
85195+ atomic_t refcount;
85196 int object_size;
85197 int align;
85198
85199@@ -56,10 +56,14 @@ struct kmem_cache {
85200 unsigned long node_allocs;
85201 unsigned long node_frees;
85202 unsigned long node_overflow;
85203- atomic_t allochit;
85204- atomic_t allocmiss;
85205- atomic_t freehit;
85206- atomic_t freemiss;
85207+ atomic_unchecked_t allochit;
85208+ atomic_unchecked_t allocmiss;
85209+ atomic_unchecked_t freehit;
85210+ atomic_unchecked_t freemiss;
85211+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85212+ atomic_unchecked_t sanitized;
85213+ atomic_unchecked_t not_sanitized;
85214+#endif
85215
85216 /*
85217 * If debugging is enabled, then the allocator can add additional
85218diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
85219index d82abd4..408c3a0 100644
85220--- a/include/linux/slub_def.h
85221+++ b/include/linux/slub_def.h
85222@@ -74,7 +74,7 @@ struct kmem_cache {
85223 struct kmem_cache_order_objects max;
85224 struct kmem_cache_order_objects min;
85225 gfp_t allocflags; /* gfp flags to use on each alloc */
85226- int refcount; /* Refcount for slab cache destroy */
85227+ atomic_t refcount; /* Refcount for slab cache destroy */
85228 void (*ctor)(void *);
85229 int inuse; /* Offset to metadata */
85230 int align; /* Alignment */
85231diff --git a/include/linux/smp.h b/include/linux/smp.h
85232index 93dff5f..933c561 100644
85233--- a/include/linux/smp.h
85234+++ b/include/linux/smp.h
85235@@ -176,7 +176,9 @@ static inline void wake_up_all_idle_cpus(void) { }
85236 #endif
85237
85238 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
85239+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
85240 #define put_cpu() preempt_enable()
85241+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
85242
85243 /*
85244 * Callback to arch code if there's nosmp or maxcpus=0 on the
85245diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
85246index 46cca4c..3323536 100644
85247--- a/include/linux/sock_diag.h
85248+++ b/include/linux/sock_diag.h
85249@@ -11,7 +11,7 @@ struct sock;
85250 struct sock_diag_handler {
85251 __u8 family;
85252 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
85253-};
85254+} __do_const;
85255
85256 int sock_diag_register(const struct sock_diag_handler *h);
85257 void sock_diag_unregister(const struct sock_diag_handler *h);
85258diff --git a/include/linux/sonet.h b/include/linux/sonet.h
85259index 680f9a3..f13aeb0 100644
85260--- a/include/linux/sonet.h
85261+++ b/include/linux/sonet.h
85262@@ -7,7 +7,7 @@
85263 #include <uapi/linux/sonet.h>
85264
85265 struct k_sonet_stats {
85266-#define __HANDLE_ITEM(i) atomic_t i
85267+#define __HANDLE_ITEM(i) atomic_unchecked_t i
85268 __SONET_ITEMS
85269 #undef __HANDLE_ITEM
85270 };
85271diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
85272index 07d8e53..dc934c9 100644
85273--- a/include/linux/sunrpc/addr.h
85274+++ b/include/linux/sunrpc/addr.h
85275@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
85276 {
85277 switch (sap->sa_family) {
85278 case AF_INET:
85279- return ntohs(((struct sockaddr_in *)sap)->sin_port);
85280+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
85281 case AF_INET6:
85282- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
85283+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
85284 }
85285 return 0;
85286 }
85287@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
85288 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
85289 const struct sockaddr *src)
85290 {
85291- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
85292+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
85293 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
85294
85295 dsin->sin_family = ssin->sin_family;
85296@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
85297 if (sa->sa_family != AF_INET6)
85298 return 0;
85299
85300- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
85301+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
85302 }
85303
85304 #endif /* _LINUX_SUNRPC_ADDR_H */
85305diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
85306index 598ba80..d90cba6 100644
85307--- a/include/linux/sunrpc/clnt.h
85308+++ b/include/linux/sunrpc/clnt.h
85309@@ -100,7 +100,7 @@ struct rpc_procinfo {
85310 unsigned int p_timer; /* Which RTT timer to use */
85311 u32 p_statidx; /* Which procedure to account */
85312 const char * p_name; /* name of procedure */
85313-};
85314+} __do_const;
85315
85316 #ifdef __KERNEL__
85317
85318diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
85319index 6f22cfe..9fd0909 100644
85320--- a/include/linux/sunrpc/svc.h
85321+++ b/include/linux/sunrpc/svc.h
85322@@ -420,7 +420,7 @@ struct svc_procedure {
85323 unsigned int pc_count; /* call count */
85324 unsigned int pc_cachetype; /* cache info (NFS) */
85325 unsigned int pc_xdrressize; /* maximum size of XDR reply */
85326-};
85327+} __do_const;
85328
85329 /*
85330 * Function prototypes.
85331diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
85332index 975da75..318c083 100644
85333--- a/include/linux/sunrpc/svc_rdma.h
85334+++ b/include/linux/sunrpc/svc_rdma.h
85335@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
85336 extern unsigned int svcrdma_max_requests;
85337 extern unsigned int svcrdma_max_req_size;
85338
85339-extern atomic_t rdma_stat_recv;
85340-extern atomic_t rdma_stat_read;
85341-extern atomic_t rdma_stat_write;
85342-extern atomic_t rdma_stat_sq_starve;
85343-extern atomic_t rdma_stat_rq_starve;
85344-extern atomic_t rdma_stat_rq_poll;
85345-extern atomic_t rdma_stat_rq_prod;
85346-extern atomic_t rdma_stat_sq_poll;
85347-extern atomic_t rdma_stat_sq_prod;
85348+extern atomic_unchecked_t rdma_stat_recv;
85349+extern atomic_unchecked_t rdma_stat_read;
85350+extern atomic_unchecked_t rdma_stat_write;
85351+extern atomic_unchecked_t rdma_stat_sq_starve;
85352+extern atomic_unchecked_t rdma_stat_rq_starve;
85353+extern atomic_unchecked_t rdma_stat_rq_poll;
85354+extern atomic_unchecked_t rdma_stat_rq_prod;
85355+extern atomic_unchecked_t rdma_stat_sq_poll;
85356+extern atomic_unchecked_t rdma_stat_sq_prod;
85357
85358 #define RPCRDMA_VERSION 1
85359
85360diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
85361index 8d71d65..f79586e 100644
85362--- a/include/linux/sunrpc/svcauth.h
85363+++ b/include/linux/sunrpc/svcauth.h
85364@@ -120,7 +120,7 @@ struct auth_ops {
85365 int (*release)(struct svc_rqst *rq);
85366 void (*domain_release)(struct auth_domain *);
85367 int (*set_client)(struct svc_rqst *rq);
85368-};
85369+} __do_const;
85370
85371 #define SVC_GARBAGE 1
85372 #define SVC_SYSERR 2
85373diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
85374index e7a018e..49f8b17 100644
85375--- a/include/linux/swiotlb.h
85376+++ b/include/linux/swiotlb.h
85377@@ -60,7 +60,8 @@ extern void
85378
85379 extern void
85380 swiotlb_free_coherent(struct device *hwdev, size_t size,
85381- void *vaddr, dma_addr_t dma_handle);
85382+ void *vaddr, dma_addr_t dma_handle,
85383+ struct dma_attrs *attrs);
85384
85385 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
85386 unsigned long offset, size_t size,
85387diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
85388index 85893d7..4923581 100644
85389--- a/include/linux/syscalls.h
85390+++ b/include/linux/syscalls.h
85391@@ -99,10 +99,16 @@ union bpf_attr;
85392 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
85393
85394 #define __SC_DECL(t, a) t a
85395+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
85396 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
85397 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
85398 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
85399-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
85400+#define __SC_LONG(t, a) __typeof( \
85401+ __builtin_choose_expr( \
85402+ sizeof(t) > sizeof(int), \
85403+ (t) 0, \
85404+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
85405+ )) a
85406 #define __SC_CAST(t, a) (t) a
85407 #define __SC_ARGS(t, a) a
85408 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
85409@@ -384,11 +390,11 @@ asmlinkage long sys_sync(void);
85410 asmlinkage long sys_fsync(unsigned int fd);
85411 asmlinkage long sys_fdatasync(unsigned int fd);
85412 asmlinkage long sys_bdflush(int func, long data);
85413-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
85414- char __user *type, unsigned long flags,
85415+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
85416+ const char __user *type, unsigned long flags,
85417 void __user *data);
85418-asmlinkage long sys_umount(char __user *name, int flags);
85419-asmlinkage long sys_oldumount(char __user *name);
85420+asmlinkage long sys_umount(const char __user *name, int flags);
85421+asmlinkage long sys_oldumount(const char __user *name);
85422 asmlinkage long sys_truncate(const char __user *path, long length);
85423 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
85424 asmlinkage long sys_stat(const char __user *filename,
85425@@ -600,7 +606,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
85426 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
85427 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
85428 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
85429- struct sockaddr __user *, int);
85430+ struct sockaddr __user *, int) __intentional_overflow(0);
85431 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
85432 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
85433 unsigned int vlen, unsigned flags);
85434diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
85435index 27b3b0b..e093dd9 100644
85436--- a/include/linux/syscore_ops.h
85437+++ b/include/linux/syscore_ops.h
85438@@ -16,7 +16,7 @@ struct syscore_ops {
85439 int (*suspend)(void);
85440 void (*resume)(void);
85441 void (*shutdown)(void);
85442-};
85443+} __do_const;
85444
85445 extern void register_syscore_ops(struct syscore_ops *ops);
85446 extern void unregister_syscore_ops(struct syscore_ops *ops);
85447diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
85448index b7361f8..341a15a 100644
85449--- a/include/linux/sysctl.h
85450+++ b/include/linux/sysctl.h
85451@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
85452
85453 extern int proc_dostring(struct ctl_table *, int,
85454 void __user *, size_t *, loff_t *);
85455+extern int proc_dostring_modpriv(struct ctl_table *, int,
85456+ void __user *, size_t *, loff_t *);
85457 extern int proc_dointvec(struct ctl_table *, int,
85458 void __user *, size_t *, loff_t *);
85459 extern int proc_dointvec_minmax(struct ctl_table *, int,
85460@@ -113,7 +115,8 @@ struct ctl_table
85461 struct ctl_table_poll *poll;
85462 void *extra1;
85463 void *extra2;
85464-};
85465+} __do_const __randomize_layout;
85466+typedef struct ctl_table __no_const ctl_table_no_const;
85467
85468 struct ctl_node {
85469 struct rb_node node;
85470diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
85471index ddad161..a3efd26 100644
85472--- a/include/linux/sysfs.h
85473+++ b/include/linux/sysfs.h
85474@@ -34,7 +34,8 @@ struct attribute {
85475 struct lock_class_key *key;
85476 struct lock_class_key skey;
85477 #endif
85478-};
85479+} __do_const;
85480+typedef struct attribute __no_const attribute_no_const;
85481
85482 /**
85483 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
85484@@ -63,7 +64,8 @@ struct attribute_group {
85485 struct attribute *, int);
85486 struct attribute **attrs;
85487 struct bin_attribute **bin_attrs;
85488-};
85489+} __do_const;
85490+typedef struct attribute_group __no_const attribute_group_no_const;
85491
85492 /**
85493 * Use these macros to make defining attributes easier. See include/linux/device.h
85494@@ -137,7 +139,8 @@ struct bin_attribute {
85495 char *, loff_t, size_t);
85496 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
85497 struct vm_area_struct *vma);
85498-};
85499+} __do_const;
85500+typedef struct bin_attribute __no_const bin_attribute_no_const;
85501
85502 /**
85503 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
85504diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
85505index 387fa7d..3fcde6b 100644
85506--- a/include/linux/sysrq.h
85507+++ b/include/linux/sysrq.h
85508@@ -16,6 +16,7 @@
85509
85510 #include <linux/errno.h>
85511 #include <linux/types.h>
85512+#include <linux/compiler.h>
85513
85514 /* Possible values of bitmask for enabling sysrq functions */
85515 /* 0x0001 is reserved for enable everything */
85516@@ -33,7 +34,7 @@ struct sysrq_key_op {
85517 char *help_msg;
85518 char *action_msg;
85519 int enable_mask;
85520-};
85521+} __do_const;
85522
85523 #ifdef CONFIG_MAGIC_SYSRQ
85524
85525diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
85526index ff307b5..f1a4468 100644
85527--- a/include/linux/thread_info.h
85528+++ b/include/linux/thread_info.h
85529@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
85530 #error "no set_restore_sigmask() provided and default one won't work"
85531 #endif
85532
85533+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
85534+
85535+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
85536+{
85537+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
85538+}
85539+
85540 #endif /* __KERNEL__ */
85541
85542 #endif /* _LINUX_THREAD_INFO_H */
85543diff --git a/include/linux/tty.h b/include/linux/tty.h
85544index 7d66ae5..0327149 100644
85545--- a/include/linux/tty.h
85546+++ b/include/linux/tty.h
85547@@ -202,7 +202,7 @@ struct tty_port {
85548 const struct tty_port_operations *ops; /* Port operations */
85549 spinlock_t lock; /* Lock protecting tty field */
85550 int blocked_open; /* Waiting to open */
85551- int count; /* Usage count */
85552+ atomic_t count; /* Usage count */
85553 wait_queue_head_t open_wait; /* Open waiters */
85554 wait_queue_head_t close_wait; /* Close waiters */
85555 wait_queue_head_t delta_msr_wait; /* Modem status change */
85556@@ -290,7 +290,7 @@ struct tty_struct {
85557 /* If the tty has a pending do_SAK, queue it here - akpm */
85558 struct work_struct SAK_work;
85559 struct tty_port *port;
85560-};
85561+} __randomize_layout;
85562
85563 /* Each of a tty's open files has private_data pointing to tty_file_private */
85564 struct tty_file_private {
85565@@ -549,7 +549,7 @@ extern int tty_port_open(struct tty_port *port,
85566 struct tty_struct *tty, struct file *filp);
85567 static inline int tty_port_users(struct tty_port *port)
85568 {
85569- return port->count + port->blocked_open;
85570+ return atomic_read(&port->count) + port->blocked_open;
85571 }
85572
85573 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
85574diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
85575index 92e337c..f46757b 100644
85576--- a/include/linux/tty_driver.h
85577+++ b/include/linux/tty_driver.h
85578@@ -291,7 +291,7 @@ struct tty_operations {
85579 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
85580 #endif
85581 const struct file_operations *proc_fops;
85582-};
85583+} __do_const __randomize_layout;
85584
85585 struct tty_driver {
85586 int magic; /* magic number for this structure */
85587@@ -325,7 +325,7 @@ struct tty_driver {
85588
85589 const struct tty_operations *ops;
85590 struct list_head tty_drivers;
85591-};
85592+} __randomize_layout;
85593
85594 extern struct list_head tty_drivers;
85595
85596diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
85597index 00c9d68..bc0188b 100644
85598--- a/include/linux/tty_ldisc.h
85599+++ b/include/linux/tty_ldisc.h
85600@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
85601
85602 struct module *owner;
85603
85604- int refcount;
85605+ atomic_t refcount;
85606 };
85607
85608 struct tty_ldisc {
85609diff --git a/include/linux/types.h b/include/linux/types.h
85610index a0bb704..f511c77 100644
85611--- a/include/linux/types.h
85612+++ b/include/linux/types.h
85613@@ -177,10 +177,26 @@ typedef struct {
85614 int counter;
85615 } atomic_t;
85616
85617+#ifdef CONFIG_PAX_REFCOUNT
85618+typedef struct {
85619+ int counter;
85620+} atomic_unchecked_t;
85621+#else
85622+typedef atomic_t atomic_unchecked_t;
85623+#endif
85624+
85625 #ifdef CONFIG_64BIT
85626 typedef struct {
85627 long counter;
85628 } atomic64_t;
85629+
85630+#ifdef CONFIG_PAX_REFCOUNT
85631+typedef struct {
85632+ long counter;
85633+} atomic64_unchecked_t;
85634+#else
85635+typedef atomic64_t atomic64_unchecked_t;
85636+#endif
85637 #endif
85638
85639 struct list_head {
85640diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
85641index ecd3319..8a36ded 100644
85642--- a/include/linux/uaccess.h
85643+++ b/include/linux/uaccess.h
85644@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
85645 long ret; \
85646 mm_segment_t old_fs = get_fs(); \
85647 \
85648- set_fs(KERNEL_DS); \
85649 pagefault_disable(); \
85650- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
85651- pagefault_enable(); \
85652+ set_fs(KERNEL_DS); \
85653+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
85654 set_fs(old_fs); \
85655+ pagefault_enable(); \
85656 ret; \
85657 })
85658
85659diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
85660index 2d1f9b6..d7a9fce 100644
85661--- a/include/linux/uidgid.h
85662+++ b/include/linux/uidgid.h
85663@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
85664
85665 #endif /* CONFIG_USER_NS */
85666
85667+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
85668+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
85669+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
85670+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
85671+
85672 #endif /* _LINUX_UIDGID_H */
85673diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
85674index 32c0e83..671eb35 100644
85675--- a/include/linux/uio_driver.h
85676+++ b/include/linux/uio_driver.h
85677@@ -67,7 +67,7 @@ struct uio_device {
85678 struct module *owner;
85679 struct device *dev;
85680 int minor;
85681- atomic_t event;
85682+ atomic_unchecked_t event;
85683 struct fasync_struct *async_queue;
85684 wait_queue_head_t wait;
85685 struct uio_info *info;
85686diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
85687index 99c1b4d..562e6f3 100644
85688--- a/include/linux/unaligned/access_ok.h
85689+++ b/include/linux/unaligned/access_ok.h
85690@@ -4,34 +4,34 @@
85691 #include <linux/kernel.h>
85692 #include <asm/byteorder.h>
85693
85694-static inline u16 get_unaligned_le16(const void *p)
85695+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
85696 {
85697- return le16_to_cpup((__le16 *)p);
85698+ return le16_to_cpup((const __le16 *)p);
85699 }
85700
85701-static inline u32 get_unaligned_le32(const void *p)
85702+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
85703 {
85704- return le32_to_cpup((__le32 *)p);
85705+ return le32_to_cpup((const __le32 *)p);
85706 }
85707
85708-static inline u64 get_unaligned_le64(const void *p)
85709+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
85710 {
85711- return le64_to_cpup((__le64 *)p);
85712+ return le64_to_cpup((const __le64 *)p);
85713 }
85714
85715-static inline u16 get_unaligned_be16(const void *p)
85716+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
85717 {
85718- return be16_to_cpup((__be16 *)p);
85719+ return be16_to_cpup((const __be16 *)p);
85720 }
85721
85722-static inline u32 get_unaligned_be32(const void *p)
85723+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
85724 {
85725- return be32_to_cpup((__be32 *)p);
85726+ return be32_to_cpup((const __be32 *)p);
85727 }
85728
85729-static inline u64 get_unaligned_be64(const void *p)
85730+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
85731 {
85732- return be64_to_cpup((__be64 *)p);
85733+ return be64_to_cpup((const __be64 *)p);
85734 }
85735
85736 static inline void put_unaligned_le16(u16 val, void *p)
85737diff --git a/include/linux/usb.h b/include/linux/usb.h
85738index 058a769..c17a1c2c 100644
85739--- a/include/linux/usb.h
85740+++ b/include/linux/usb.h
85741@@ -566,7 +566,7 @@ struct usb_device {
85742 int maxchild;
85743
85744 u32 quirks;
85745- atomic_t urbnum;
85746+ atomic_unchecked_t urbnum;
85747
85748 unsigned long active_duration;
85749
85750@@ -1650,7 +1650,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
85751
85752 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
85753 __u8 request, __u8 requesttype, __u16 value, __u16 index,
85754- void *data, __u16 size, int timeout);
85755+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
85756 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
85757 void *data, int len, int *actual_length, int timeout);
85758 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
85759diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
85760index 9fd9e48..e2c5f35 100644
85761--- a/include/linux/usb/renesas_usbhs.h
85762+++ b/include/linux/usb/renesas_usbhs.h
85763@@ -39,7 +39,7 @@ enum {
85764 */
85765 struct renesas_usbhs_driver_callback {
85766 int (*notify_hotplug)(struct platform_device *pdev);
85767-};
85768+} __no_const;
85769
85770 /*
85771 * callback functions for platform
85772diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
85773index 8297e5b..0dfae27 100644
85774--- a/include/linux/user_namespace.h
85775+++ b/include/linux/user_namespace.h
85776@@ -39,7 +39,7 @@ struct user_namespace {
85777 struct key *persistent_keyring_register;
85778 struct rw_semaphore persistent_keyring_register_sem;
85779 #endif
85780-};
85781+} __randomize_layout;
85782
85783 extern struct user_namespace init_user_ns;
85784
85785diff --git a/include/linux/utsname.h b/include/linux/utsname.h
85786index 5093f58..c103e58 100644
85787--- a/include/linux/utsname.h
85788+++ b/include/linux/utsname.h
85789@@ -25,7 +25,7 @@ struct uts_namespace {
85790 struct new_utsname name;
85791 struct user_namespace *user_ns;
85792 struct ns_common ns;
85793-};
85794+} __randomize_layout;
85795 extern struct uts_namespace init_uts_ns;
85796
85797 #ifdef CONFIG_UTS_NS
85798diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
85799index 6f8fbcf..4efc177 100644
85800--- a/include/linux/vermagic.h
85801+++ b/include/linux/vermagic.h
85802@@ -25,9 +25,42 @@
85803 #define MODULE_ARCH_VERMAGIC ""
85804 #endif
85805
85806+#ifdef CONFIG_PAX_REFCOUNT
85807+#define MODULE_PAX_REFCOUNT "REFCOUNT "
85808+#else
85809+#define MODULE_PAX_REFCOUNT ""
85810+#endif
85811+
85812+#ifdef CONSTIFY_PLUGIN
85813+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
85814+#else
85815+#define MODULE_CONSTIFY_PLUGIN ""
85816+#endif
85817+
85818+#ifdef STACKLEAK_PLUGIN
85819+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
85820+#else
85821+#define MODULE_STACKLEAK_PLUGIN ""
85822+#endif
85823+
85824+#ifdef RANDSTRUCT_PLUGIN
85825+#include <generated/randomize_layout_hash.h>
85826+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
85827+#else
85828+#define MODULE_RANDSTRUCT_PLUGIN
85829+#endif
85830+
85831+#ifdef CONFIG_GRKERNSEC
85832+#define MODULE_GRSEC "GRSEC "
85833+#else
85834+#define MODULE_GRSEC ""
85835+#endif
85836+
85837 #define VERMAGIC_STRING \
85838 UTS_RELEASE " " \
85839 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
85840 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
85841- MODULE_ARCH_VERMAGIC
85842+ MODULE_ARCH_VERMAGIC \
85843+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
85844+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
85845
85846diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
85847index b483abd..af305ad 100644
85848--- a/include/linux/vga_switcheroo.h
85849+++ b/include/linux/vga_switcheroo.h
85850@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
85851
85852 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
85853
85854-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
85855+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
85856 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
85857-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
85858+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
85859 #else
85860
85861 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
85862@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
85863
85864 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
85865
85866-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
85867+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
85868 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
85869-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
85870+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
85871
85872 #endif
85873 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
85874diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
85875index b87696f..1d11de7 100644
85876--- a/include/linux/vmalloc.h
85877+++ b/include/linux/vmalloc.h
85878@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
85879 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
85880 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
85881 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
85882+
85883+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85884+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
85885+#endif
85886+
85887 /* bits [20..32] reserved for arch specific ioremap internals */
85888
85889 /*
85890@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
85891 unsigned long flags, pgprot_t prot);
85892 extern void vunmap(const void *addr);
85893
85894+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85895+extern void unmap_process_stacks(struct task_struct *task);
85896+#endif
85897+
85898 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
85899 unsigned long uaddr, void *kaddr,
85900 unsigned long size);
85901@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
85902
85903 /* for /dev/kmem */
85904 extern long vread(char *buf, char *addr, unsigned long count);
85905-extern long vwrite(char *buf, char *addr, unsigned long count);
85906+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
85907
85908 /*
85909 * Internals. Dont't use..
85910diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
85911index 82e7db7..f8ce3d0 100644
85912--- a/include/linux/vmstat.h
85913+++ b/include/linux/vmstat.h
85914@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
85915 /*
85916 * Zone based page accounting with per cpu differentials.
85917 */
85918-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85919+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85920
85921 static inline void zone_page_state_add(long x, struct zone *zone,
85922 enum zone_stat_item item)
85923 {
85924- atomic_long_add(x, &zone->vm_stat[item]);
85925- atomic_long_add(x, &vm_stat[item]);
85926+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
85927+ atomic_long_add_unchecked(x, &vm_stat[item]);
85928 }
85929
85930-static inline unsigned long global_page_state(enum zone_stat_item item)
85931+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
85932 {
85933- long x = atomic_long_read(&vm_stat[item]);
85934+ long x = atomic_long_read_unchecked(&vm_stat[item]);
85935 #ifdef CONFIG_SMP
85936 if (x < 0)
85937 x = 0;
85938@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
85939 return x;
85940 }
85941
85942-static inline unsigned long zone_page_state(struct zone *zone,
85943+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
85944 enum zone_stat_item item)
85945 {
85946- long x = atomic_long_read(&zone->vm_stat[item]);
85947+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
85948 #ifdef CONFIG_SMP
85949 if (x < 0)
85950 x = 0;
85951@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
85952 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
85953 enum zone_stat_item item)
85954 {
85955- long x = atomic_long_read(&zone->vm_stat[item]);
85956+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
85957
85958 #ifdef CONFIG_SMP
85959 int cpu;
85960@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
85961
85962 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
85963 {
85964- atomic_long_inc(&zone->vm_stat[item]);
85965- atomic_long_inc(&vm_stat[item]);
85966+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
85967+ atomic_long_inc_unchecked(&vm_stat[item]);
85968 }
85969
85970 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
85971 {
85972- atomic_long_dec(&zone->vm_stat[item]);
85973- atomic_long_dec(&vm_stat[item]);
85974+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
85975+ atomic_long_dec_unchecked(&vm_stat[item]);
85976 }
85977
85978 static inline void __inc_zone_page_state(struct page *page,
85979diff --git a/include/linux/xattr.h b/include/linux/xattr.h
85980index 91b0a68..0e9adf6 100644
85981--- a/include/linux/xattr.h
85982+++ b/include/linux/xattr.h
85983@@ -28,7 +28,7 @@ struct xattr_handler {
85984 size_t size, int handler_flags);
85985 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
85986 size_t size, int flags, int handler_flags);
85987-};
85988+} __do_const;
85989
85990 struct xattr {
85991 const char *name;
85992@@ -37,6 +37,9 @@ struct xattr {
85993 };
85994
85995 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
85996+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
85997+ssize_t pax_getxattr(struct dentry *, void *, size_t);
85998+#endif
85999 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
86000 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
86001 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
86002diff --git a/include/linux/zlib.h b/include/linux/zlib.h
86003index 92dbbd3..13ab0b3 100644
86004--- a/include/linux/zlib.h
86005+++ b/include/linux/zlib.h
86006@@ -31,6 +31,7 @@
86007 #define _ZLIB_H
86008
86009 #include <linux/zconf.h>
86010+#include <linux/compiler.h>
86011
86012 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
86013 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
86014@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
86015
86016 /* basic functions */
86017
86018-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
86019+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
86020 /*
86021 Returns the number of bytes that needs to be allocated for a per-
86022 stream workspace with the specified parameters. A pointer to this
86023diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
86024index eb76cfd..9fd0e7c 100644
86025--- a/include/media/v4l2-dev.h
86026+++ b/include/media/v4l2-dev.h
86027@@ -75,7 +75,7 @@ struct v4l2_file_operations {
86028 int (*mmap) (struct file *, struct vm_area_struct *);
86029 int (*open) (struct file *);
86030 int (*release) (struct file *);
86031-};
86032+} __do_const;
86033
86034 /*
86035 * Newer version of video_device, handled by videodev2.c
86036diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
86037index ffb69da..040393e 100644
86038--- a/include/media/v4l2-device.h
86039+++ b/include/media/v4l2-device.h
86040@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
86041 this function returns 0. If the name ends with a digit (e.g. cx18),
86042 then the name will be set to cx18-0 since cx180 looks really odd. */
86043 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
86044- atomic_t *instance);
86045+ atomic_unchecked_t *instance);
86046
86047 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
86048 Since the parent disappears this ensures that v4l2_dev doesn't have an
86049diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
86050index 2a25dec..bf6dd8a 100644
86051--- a/include/net/9p/transport.h
86052+++ b/include/net/9p/transport.h
86053@@ -62,7 +62,7 @@ struct p9_trans_module {
86054 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
86055 int (*zc_request)(struct p9_client *, struct p9_req_t *,
86056 char *, char *, int , int, int, int);
86057-};
86058+} __do_const;
86059
86060 void v9fs_register_trans(struct p9_trans_module *m);
86061 void v9fs_unregister_trans(struct p9_trans_module *m);
86062diff --git a/include/net/af_unix.h b/include/net/af_unix.h
86063index a175ba4..196eb8242 100644
86064--- a/include/net/af_unix.h
86065+++ b/include/net/af_unix.h
86066@@ -36,7 +36,7 @@ struct unix_skb_parms {
86067 u32 secid; /* Security ID */
86068 #endif
86069 u32 consumed;
86070-};
86071+} __randomize_layout;
86072
86073 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
86074 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
86075diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
86076index d1bb342..e12f7d2 100644
86077--- a/include/net/bluetooth/l2cap.h
86078+++ b/include/net/bluetooth/l2cap.h
86079@@ -608,7 +608,7 @@ struct l2cap_ops {
86080 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
86081 unsigned long hdr_len,
86082 unsigned long len, int nb);
86083-};
86084+} __do_const;
86085
86086 struct l2cap_conn {
86087 struct hci_conn *hcon;
86088diff --git a/include/net/bonding.h b/include/net/bonding.h
86089index 983a94b..7aa9b16 100644
86090--- a/include/net/bonding.h
86091+++ b/include/net/bonding.h
86092@@ -647,7 +647,7 @@ extern struct rtnl_link_ops bond_link_ops;
86093
86094 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
86095 {
86096- atomic_long_inc(&dev->tx_dropped);
86097+ atomic_long_inc_unchecked(&dev->tx_dropped);
86098 dev_kfree_skb_any(skb);
86099 }
86100
86101diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
86102index f2ae33d..c457cf0 100644
86103--- a/include/net/caif/cfctrl.h
86104+++ b/include/net/caif/cfctrl.h
86105@@ -52,7 +52,7 @@ struct cfctrl_rsp {
86106 void (*radioset_rsp)(void);
86107 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
86108 struct cflayer *client_layer);
86109-};
86110+} __no_const;
86111
86112 /* Link Setup Parameters for CAIF-Links. */
86113 struct cfctrl_link_param {
86114@@ -101,8 +101,8 @@ struct cfctrl_request_info {
86115 struct cfctrl {
86116 struct cfsrvl serv;
86117 struct cfctrl_rsp res;
86118- atomic_t req_seq_no;
86119- atomic_t rsp_seq_no;
86120+ atomic_unchecked_t req_seq_no;
86121+ atomic_unchecked_t rsp_seq_no;
86122 struct list_head list;
86123 /* Protects from simultaneous access to first_req list */
86124 spinlock_t info_list_lock;
86125diff --git a/include/net/flow.h b/include/net/flow.h
86126index 8109a15..504466d 100644
86127--- a/include/net/flow.h
86128+++ b/include/net/flow.h
86129@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
86130
86131 void flow_cache_flush(struct net *net);
86132 void flow_cache_flush_deferred(struct net *net);
86133-extern atomic_t flow_cache_genid;
86134+extern atomic_unchecked_t flow_cache_genid;
86135
86136 #endif
86137diff --git a/include/net/genetlink.h b/include/net/genetlink.h
86138index 6c92415..3a352d8 100644
86139--- a/include/net/genetlink.h
86140+++ b/include/net/genetlink.h
86141@@ -130,7 +130,7 @@ struct genl_ops {
86142 u8 cmd;
86143 u8 internal_flags;
86144 u8 flags;
86145-};
86146+} __do_const;
86147
86148 int __genl_register_family(struct genl_family *family);
86149
86150diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
86151index 734d9b5..48a9a4b 100644
86152--- a/include/net/gro_cells.h
86153+++ b/include/net/gro_cells.h
86154@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
86155 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
86156
86157 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
86158- atomic_long_inc(&dev->rx_dropped);
86159+ atomic_long_inc_unchecked(&dev->rx_dropped);
86160 kfree_skb(skb);
86161 return;
86162 }
86163diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
86164index 848e85c..051c7de 100644
86165--- a/include/net/inet_connection_sock.h
86166+++ b/include/net/inet_connection_sock.h
86167@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
86168 int (*bind_conflict)(const struct sock *sk,
86169 const struct inet_bind_bucket *tb, bool relax);
86170 void (*mtu_reduced)(struct sock *sk);
86171-};
86172+} __do_const;
86173
86174 /** inet_connection_sock - INET connection oriented sock
86175 *
86176diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
86177index 80479ab..0c3f647 100644
86178--- a/include/net/inetpeer.h
86179+++ b/include/net/inetpeer.h
86180@@ -47,7 +47,7 @@ struct inet_peer {
86181 */
86182 union {
86183 struct {
86184- atomic_t rid; /* Frag reception counter */
86185+ atomic_unchecked_t rid; /* Frag reception counter */
86186 };
86187 struct rcu_head rcu;
86188 struct inet_peer *gc_next;
86189diff --git a/include/net/ip.h b/include/net/ip.h
86190index 09cf5ae..ab62fcf 100644
86191--- a/include/net/ip.h
86192+++ b/include/net/ip.h
86193@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
86194 }
86195 }
86196
86197-u32 ip_idents_reserve(u32 hash, int segs);
86198+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
86199 void __ip_select_ident(struct iphdr *iph, int segs);
86200
86201 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
86202diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
86203index 09a819e..3ab9e14 100644
86204--- a/include/net/ip_fib.h
86205+++ b/include/net/ip_fib.h
86206@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
86207
86208 #define FIB_RES_SADDR(net, res) \
86209 ((FIB_RES_NH(res).nh_saddr_genid == \
86210- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
86211+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
86212 FIB_RES_NH(res).nh_saddr : \
86213 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
86214 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
86215diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
86216index 615b20b..fd4cbd8 100644
86217--- a/include/net/ip_vs.h
86218+++ b/include/net/ip_vs.h
86219@@ -534,7 +534,7 @@ struct ip_vs_conn {
86220 struct ip_vs_conn *control; /* Master control connection */
86221 atomic_t n_control; /* Number of controlled ones */
86222 struct ip_vs_dest *dest; /* real server */
86223- atomic_t in_pkts; /* incoming packet counter */
86224+ atomic_unchecked_t in_pkts; /* incoming packet counter */
86225
86226 /* Packet transmitter for different forwarding methods. If it
86227 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
86228@@ -682,7 +682,7 @@ struct ip_vs_dest {
86229 __be16 port; /* port number of the server */
86230 union nf_inet_addr addr; /* IP address of the server */
86231 volatile unsigned int flags; /* dest status flags */
86232- atomic_t conn_flags; /* flags to copy to conn */
86233+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
86234 atomic_t weight; /* server weight */
86235
86236 atomic_t refcnt; /* reference counter */
86237@@ -928,11 +928,11 @@ struct netns_ipvs {
86238 /* ip_vs_lblc */
86239 int sysctl_lblc_expiration;
86240 struct ctl_table_header *lblc_ctl_header;
86241- struct ctl_table *lblc_ctl_table;
86242+ ctl_table_no_const *lblc_ctl_table;
86243 /* ip_vs_lblcr */
86244 int sysctl_lblcr_expiration;
86245 struct ctl_table_header *lblcr_ctl_header;
86246- struct ctl_table *lblcr_ctl_table;
86247+ ctl_table_no_const *lblcr_ctl_table;
86248 /* ip_vs_est */
86249 struct list_head est_list; /* estimator list */
86250 spinlock_t est_lock;
86251diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
86252index 8d4f588..2e37ad2 100644
86253--- a/include/net/irda/ircomm_tty.h
86254+++ b/include/net/irda/ircomm_tty.h
86255@@ -33,6 +33,7 @@
86256 #include <linux/termios.h>
86257 #include <linux/timer.h>
86258 #include <linux/tty.h> /* struct tty_struct */
86259+#include <asm/local.h>
86260
86261 #include <net/irda/irias_object.h>
86262 #include <net/irda/ircomm_core.h>
86263diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
86264index 714cc9a..ea05f3e 100644
86265--- a/include/net/iucv/af_iucv.h
86266+++ b/include/net/iucv/af_iucv.h
86267@@ -149,7 +149,7 @@ struct iucv_skb_cb {
86268 struct iucv_sock_list {
86269 struct hlist_head head;
86270 rwlock_t lock;
86271- atomic_t autobind_name;
86272+ atomic_unchecked_t autobind_name;
86273 };
86274
86275 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
86276diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
86277index f3be818..bf46196 100644
86278--- a/include/net/llc_c_ac.h
86279+++ b/include/net/llc_c_ac.h
86280@@ -87,7 +87,7 @@
86281 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
86282 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
86283
86284-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86285+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86286
86287 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
86288 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
86289diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
86290index 3948cf1..83b28c4 100644
86291--- a/include/net/llc_c_ev.h
86292+++ b/include/net/llc_c_ev.h
86293@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
86294 return (struct llc_conn_state_ev *)skb->cb;
86295 }
86296
86297-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86298-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86299+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86300+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86301
86302 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
86303 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
86304diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
86305index 48f3f89..0e92c50 100644
86306--- a/include/net/llc_c_st.h
86307+++ b/include/net/llc_c_st.h
86308@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
86309 u8 next_state;
86310 const llc_conn_ev_qfyr_t *ev_qualifiers;
86311 const llc_conn_action_t *ev_actions;
86312-};
86313+} __do_const;
86314
86315 struct llc_conn_state {
86316 u8 current_state;
86317diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
86318index a61b98c..aade1eb 100644
86319--- a/include/net/llc_s_ac.h
86320+++ b/include/net/llc_s_ac.h
86321@@ -23,7 +23,7 @@
86322 #define SAP_ACT_TEST_IND 9
86323
86324 /* All action functions must look like this */
86325-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86326+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86327
86328 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
86329 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
86330diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
86331index c4359e2..76dbc4a 100644
86332--- a/include/net/llc_s_st.h
86333+++ b/include/net/llc_s_st.h
86334@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
86335 llc_sap_ev_t ev;
86336 u8 next_state;
86337 const llc_sap_action_t *ev_actions;
86338-};
86339+} __do_const;
86340
86341 struct llc_sap_state {
86342 u8 curr_state;
86343diff --git a/include/net/mac80211.h b/include/net/mac80211.h
86344index 29c7be8..746bd73 100644
86345--- a/include/net/mac80211.h
86346+++ b/include/net/mac80211.h
86347@@ -4869,7 +4869,7 @@ struct rate_control_ops {
86348 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
86349
86350 u32 (*get_expected_throughput)(void *priv_sta);
86351-};
86352+} __do_const;
86353
86354 static inline int rate_supported(struct ieee80211_sta *sta,
86355 enum ieee80211_band band,
86356diff --git a/include/net/neighbour.h b/include/net/neighbour.h
86357index 76f7084..8f36e39 100644
86358--- a/include/net/neighbour.h
86359+++ b/include/net/neighbour.h
86360@@ -163,7 +163,7 @@ struct neigh_ops {
86361 void (*error_report)(struct neighbour *, struct sk_buff *);
86362 int (*output)(struct neighbour *, struct sk_buff *);
86363 int (*connected_output)(struct neighbour *, struct sk_buff *);
86364-};
86365+} __do_const;
86366
86367 struct pneigh_entry {
86368 struct pneigh_entry *next;
86369@@ -217,7 +217,7 @@ struct neigh_table {
86370 struct neigh_statistics __percpu *stats;
86371 struct neigh_hash_table __rcu *nht;
86372 struct pneigh_entry **phash_buckets;
86373-};
86374+} __randomize_layout;
86375
86376 enum {
86377 NEIGH_ARP_TABLE = 0,
86378diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
86379index 2e8756b8..0bd0083 100644
86380--- a/include/net/net_namespace.h
86381+++ b/include/net/net_namespace.h
86382@@ -130,8 +130,8 @@ struct net {
86383 struct netns_ipvs *ipvs;
86384 #endif
86385 struct sock *diag_nlsk;
86386- atomic_t fnhe_genid;
86387-};
86388+ atomic_unchecked_t fnhe_genid;
86389+} __randomize_layout;
86390
86391 #include <linux/seq_file_net.h>
86392
86393@@ -287,7 +287,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
86394 #define __net_init __init
86395 #define __net_exit __exit_refok
86396 #define __net_initdata __initdata
86397+#ifdef CONSTIFY_PLUGIN
86398 #define __net_initconst __initconst
86399+#else
86400+#define __net_initconst __initdata
86401+#endif
86402 #endif
86403
86404 struct pernet_operations {
86405@@ -297,7 +301,7 @@ struct pernet_operations {
86406 void (*exit_batch)(struct list_head *net_exit_list);
86407 int *id;
86408 size_t size;
86409-};
86410+} __do_const;
86411
86412 /*
86413 * Use these carefully. If you implement a network device and it
86414@@ -345,12 +349,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
86415
86416 static inline int rt_genid_ipv4(struct net *net)
86417 {
86418- return atomic_read(&net->ipv4.rt_genid);
86419+ return atomic_read_unchecked(&net->ipv4.rt_genid);
86420 }
86421
86422 static inline void rt_genid_bump_ipv4(struct net *net)
86423 {
86424- atomic_inc(&net->ipv4.rt_genid);
86425+ atomic_inc_unchecked(&net->ipv4.rt_genid);
86426 }
86427
86428 extern void (*__fib6_flush_trees)(struct net *net);
86429@@ -377,12 +381,12 @@ static inline void rt_genid_bump_all(struct net *net)
86430
86431 static inline int fnhe_genid(struct net *net)
86432 {
86433- return atomic_read(&net->fnhe_genid);
86434+ return atomic_read_unchecked(&net->fnhe_genid);
86435 }
86436
86437 static inline void fnhe_genid_bump(struct net *net)
86438 {
86439- atomic_inc(&net->fnhe_genid);
86440+ atomic_inc_unchecked(&net->fnhe_genid);
86441 }
86442
86443 #endif /* __NET_NET_NAMESPACE_H */
86444diff --git a/include/net/netlink.h b/include/net/netlink.h
86445index 6415835..ab96d87 100644
86446--- a/include/net/netlink.h
86447+++ b/include/net/netlink.h
86448@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
86449 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
86450 {
86451 if (mark)
86452- skb_trim(skb, (unsigned char *) mark - skb->data);
86453+ skb_trim(skb, (const unsigned char *) mark - skb->data);
86454 }
86455
86456 /**
86457diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
86458index 29d6a94..235d3d84 100644
86459--- a/include/net/netns/conntrack.h
86460+++ b/include/net/netns/conntrack.h
86461@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
86462 struct nf_proto_net {
86463 #ifdef CONFIG_SYSCTL
86464 struct ctl_table_header *ctl_table_header;
86465- struct ctl_table *ctl_table;
86466+ ctl_table_no_const *ctl_table;
86467 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
86468 struct ctl_table_header *ctl_compat_header;
86469- struct ctl_table *ctl_compat_table;
86470+ ctl_table_no_const *ctl_compat_table;
86471 #endif
86472 #endif
86473 unsigned int users;
86474@@ -60,7 +60,7 @@ struct nf_ip_net {
86475 struct nf_icmp_net icmpv6;
86476 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
86477 struct ctl_table_header *ctl_table_header;
86478- struct ctl_table *ctl_table;
86479+ ctl_table_no_const *ctl_table;
86480 #endif
86481 };
86482
86483diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
86484index 0ffef1a..2ce1ceb 100644
86485--- a/include/net/netns/ipv4.h
86486+++ b/include/net/netns/ipv4.h
86487@@ -84,7 +84,7 @@ struct netns_ipv4 {
86488
86489 struct ping_group_range ping_group_range;
86490
86491- atomic_t dev_addr_genid;
86492+ atomic_unchecked_t dev_addr_genid;
86493
86494 #ifdef CONFIG_SYSCTL
86495 unsigned long *sysctl_local_reserved_ports;
86496@@ -98,6 +98,6 @@ struct netns_ipv4 {
86497 struct fib_rules_ops *mr_rules_ops;
86498 #endif
86499 #endif
86500- atomic_t rt_genid;
86501+ atomic_unchecked_t rt_genid;
86502 };
86503 #endif
86504diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
86505index 69ae41f..4f94868 100644
86506--- a/include/net/netns/ipv6.h
86507+++ b/include/net/netns/ipv6.h
86508@@ -75,8 +75,8 @@ struct netns_ipv6 {
86509 struct fib_rules_ops *mr6_rules_ops;
86510 #endif
86511 #endif
86512- atomic_t dev_addr_genid;
86513- atomic_t fib6_sernum;
86514+ atomic_unchecked_t dev_addr_genid;
86515+ atomic_unchecked_t fib6_sernum;
86516 };
86517
86518 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
86519diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
86520index 730d82a..045f2c4 100644
86521--- a/include/net/netns/xfrm.h
86522+++ b/include/net/netns/xfrm.h
86523@@ -78,7 +78,7 @@ struct netns_xfrm {
86524
86525 /* flow cache part */
86526 struct flow_cache flow_cache_global;
86527- atomic_t flow_cache_genid;
86528+ atomic_unchecked_t flow_cache_genid;
86529 struct list_head flow_cache_gc_list;
86530 spinlock_t flow_cache_gc_lock;
86531 struct work_struct flow_cache_gc_work;
86532diff --git a/include/net/ping.h b/include/net/ping.h
86533index f074060..830fba0 100644
86534--- a/include/net/ping.h
86535+++ b/include/net/ping.h
86536@@ -54,7 +54,7 @@ struct ping_iter_state {
86537
86538 extern struct proto ping_prot;
86539 #if IS_ENABLED(CONFIG_IPV6)
86540-extern struct pingv6_ops pingv6_ops;
86541+extern struct pingv6_ops *pingv6_ops;
86542 #endif
86543
86544 struct pingfakehdr {
86545diff --git a/include/net/protocol.h b/include/net/protocol.h
86546index d6fcc1f..ca277058 100644
86547--- a/include/net/protocol.h
86548+++ b/include/net/protocol.h
86549@@ -49,7 +49,7 @@ struct net_protocol {
86550 * socket lookup?
86551 */
86552 icmp_strict_tag_validation:1;
86553-};
86554+} __do_const;
86555
86556 #if IS_ENABLED(CONFIG_IPV6)
86557 struct inet6_protocol {
86558@@ -62,7 +62,7 @@ struct inet6_protocol {
86559 u8 type, u8 code, int offset,
86560 __be32 info);
86561 unsigned int flags; /* INET6_PROTO_xxx */
86562-};
86563+} __do_const;
86564
86565 #define INET6_PROTO_NOPOLICY 0x1
86566 #define INET6_PROTO_FINAL 0x2
86567diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
86568index e21b9f9..0191ef0 100644
86569--- a/include/net/rtnetlink.h
86570+++ b/include/net/rtnetlink.h
86571@@ -93,7 +93,7 @@ struct rtnl_link_ops {
86572 int (*fill_slave_info)(struct sk_buff *skb,
86573 const struct net_device *dev,
86574 const struct net_device *slave_dev);
86575-};
86576+} __do_const;
86577
86578 int __rtnl_link_register(struct rtnl_link_ops *ops);
86579 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
86580diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
86581index 4a5b9a3..ca27d73 100644
86582--- a/include/net/sctp/checksum.h
86583+++ b/include/net/sctp/checksum.h
86584@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
86585 unsigned int offset)
86586 {
86587 struct sctphdr *sh = sctp_hdr(skb);
86588- __le32 ret, old = sh->checksum;
86589- const struct skb_checksum_ops ops = {
86590+ __le32 ret, old = sh->checksum;
86591+ static const struct skb_checksum_ops ops = {
86592 .update = sctp_csum_update,
86593 .combine = sctp_csum_combine,
86594 };
86595diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
86596index 487ef34..d457f98 100644
86597--- a/include/net/sctp/sm.h
86598+++ b/include/net/sctp/sm.h
86599@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
86600 typedef struct {
86601 sctp_state_fn_t *fn;
86602 const char *name;
86603-} sctp_sm_table_entry_t;
86604+} __do_const sctp_sm_table_entry_t;
86605
86606 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
86607 * currently in use.
86608@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
86609 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
86610
86611 /* Extern declarations for major data structures. */
86612-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86613+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86614
86615
86616 /* Get the size of a DATA chunk payload. */
86617diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
86618index 2bb2fcf..d17c291 100644
86619--- a/include/net/sctp/structs.h
86620+++ b/include/net/sctp/structs.h
86621@@ -509,7 +509,7 @@ struct sctp_pf {
86622 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
86623 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
86624 struct sctp_af *af;
86625-};
86626+} __do_const;
86627
86628
86629 /* Structure to track chunk fragments that have been acked, but peer
86630diff --git a/include/net/sock.h b/include/net/sock.h
86631index 2210fec..2249ad0 100644
86632--- a/include/net/sock.h
86633+++ b/include/net/sock.h
86634@@ -362,7 +362,7 @@ struct sock {
86635 unsigned int sk_napi_id;
86636 unsigned int sk_ll_usec;
86637 #endif
86638- atomic_t sk_drops;
86639+ atomic_unchecked_t sk_drops;
86640 int sk_rcvbuf;
86641
86642 struct sk_filter __rcu *sk_filter;
86643@@ -1061,7 +1061,7 @@ struct proto {
86644 void (*destroy_cgroup)(struct mem_cgroup *memcg);
86645 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
86646 #endif
86647-};
86648+} __randomize_layout;
86649
86650 /*
86651 * Bits in struct cg_proto.flags
86652@@ -1239,7 +1239,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
86653 page_counter_uncharge(&prot->memory_allocated, amt);
86654 }
86655
86656-static inline long
86657+static inline long __intentional_overflow(-1)
86658 sk_memory_allocated(const struct sock *sk)
86659 {
86660 struct proto *prot = sk->sk_prot;
86661@@ -1385,7 +1385,7 @@ struct sock_iocb {
86662 struct scm_cookie *scm;
86663 struct msghdr *msg, async_msg;
86664 struct kiocb *kiocb;
86665-};
86666+} __randomize_layout;
86667
86668 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
86669 {
86670@@ -1826,7 +1826,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
86671 }
86672
86673 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
86674- char __user *from, char *to,
86675+ char __user *from, unsigned char *to,
86676 int copy, int offset)
86677 {
86678 if (skb->ip_summed == CHECKSUM_NONE) {
86679@@ -2075,7 +2075,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
86680 }
86681 }
86682
86683-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86684+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86685
86686 /**
86687 * sk_page_frag - return an appropriate page_frag
86688diff --git a/include/net/tcp.h b/include/net/tcp.h
86689index 9d9111e..349c847 100644
86690--- a/include/net/tcp.h
86691+++ b/include/net/tcp.h
86692@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
86693 void tcp_xmit_retransmit_queue(struct sock *);
86694 void tcp_simple_retransmit(struct sock *);
86695 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
86696-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86697+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86698
86699 void tcp_send_probe0(struct sock *);
86700 void tcp_send_partial(struct sock *);
86701@@ -689,8 +689,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
86702 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
86703 */
86704 struct tcp_skb_cb {
86705- __u32 seq; /* Starting sequence number */
86706- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
86707+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
86708+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
86709 union {
86710 /* Note : tcp_tw_isn is used in input path only
86711 * (isn chosen by tcp_timewait_state_process())
86712@@ -715,7 +715,7 @@ struct tcp_skb_cb {
86713
86714 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
86715 /* 1 byte hole */
86716- __u32 ack_seq; /* Sequence number ACK'd */
86717+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
86718 union {
86719 struct inet_skb_parm h4;
86720 #if IS_ENABLED(CONFIG_IPV6)
86721diff --git a/include/net/xfrm.h b/include/net/xfrm.h
86722index dc4865e..152ee4c 100644
86723--- a/include/net/xfrm.h
86724+++ b/include/net/xfrm.h
86725@@ -285,7 +285,6 @@ struct xfrm_dst;
86726 struct xfrm_policy_afinfo {
86727 unsigned short family;
86728 struct dst_ops *dst_ops;
86729- void (*garbage_collect)(struct net *net);
86730 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
86731 const xfrm_address_t *saddr,
86732 const xfrm_address_t *daddr);
86733@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
86734 struct net_device *dev,
86735 const struct flowi *fl);
86736 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
86737-};
86738+} __do_const;
86739
86740 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
86741 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
86742@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
86743 int (*transport_finish)(struct sk_buff *skb,
86744 int async);
86745 void (*local_error)(struct sk_buff *skb, u32 mtu);
86746-};
86747+} __do_const;
86748
86749 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
86750 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
86751@@ -437,7 +436,7 @@ struct xfrm_mode {
86752 struct module *owner;
86753 unsigned int encap;
86754 int flags;
86755-};
86756+} __do_const;
86757
86758 /* Flags for xfrm_mode. */
86759 enum {
86760@@ -534,7 +533,7 @@ struct xfrm_policy {
86761 struct timer_list timer;
86762
86763 struct flow_cache_object flo;
86764- atomic_t genid;
86765+ atomic_unchecked_t genid;
86766 u32 priority;
86767 u32 index;
86768 struct xfrm_mark mark;
86769@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
86770 }
86771
86772 void xfrm_garbage_collect(struct net *net);
86773+void xfrm_garbage_collect_deferred(struct net *net);
86774
86775 #else
86776
86777@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
86778 static inline void xfrm_garbage_collect(struct net *net)
86779 {
86780 }
86781+static inline void xfrm_garbage_collect_deferred(struct net *net)
86782+{
86783+}
86784 #endif
86785
86786 static __inline__
86787diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
86788index 1017e0b..227aa4d 100644
86789--- a/include/rdma/iw_cm.h
86790+++ b/include/rdma/iw_cm.h
86791@@ -122,7 +122,7 @@ struct iw_cm_verbs {
86792 int backlog);
86793
86794 int (*destroy_listen)(struct iw_cm_id *cm_id);
86795-};
86796+} __no_const;
86797
86798 /**
86799 * iw_create_cm_id - Create an IW CM identifier.
86800diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
86801index 93d14da..734b3d8 100644
86802--- a/include/scsi/libfc.h
86803+++ b/include/scsi/libfc.h
86804@@ -771,6 +771,7 @@ struct libfc_function_template {
86805 */
86806 void (*disc_stop_final) (struct fc_lport *);
86807 };
86808+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
86809
86810 /**
86811 * struct fc_disc - Discovery context
86812@@ -875,7 +876,7 @@ struct fc_lport {
86813 struct fc_vport *vport;
86814
86815 /* Operational Information */
86816- struct libfc_function_template tt;
86817+ libfc_function_template_no_const tt;
86818 u8 link_up;
86819 u8 qfull;
86820 enum fc_lport_state state;
86821diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
86822index 3a4edd1..feb2e3e 100644
86823--- a/include/scsi/scsi_device.h
86824+++ b/include/scsi/scsi_device.h
86825@@ -185,9 +185,9 @@ struct scsi_device {
86826 unsigned int max_device_blocked; /* what device_blocked counts down from */
86827 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
86828
86829- atomic_t iorequest_cnt;
86830- atomic_t iodone_cnt;
86831- atomic_t ioerr_cnt;
86832+ atomic_unchecked_t iorequest_cnt;
86833+ atomic_unchecked_t iodone_cnt;
86834+ atomic_unchecked_t ioerr_cnt;
86835
86836 struct device sdev_gendev,
86837 sdev_dev;
86838diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
86839index 007a0bc..7188db8 100644
86840--- a/include/scsi/scsi_transport_fc.h
86841+++ b/include/scsi/scsi_transport_fc.h
86842@@ -756,7 +756,8 @@ struct fc_function_template {
86843 unsigned long show_host_system_hostname:1;
86844
86845 unsigned long disable_target_scan:1;
86846-};
86847+} __do_const;
86848+typedef struct fc_function_template __no_const fc_function_template_no_const;
86849
86850
86851 /**
86852diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
86853index 396e8f7..b037e89 100644
86854--- a/include/sound/compress_driver.h
86855+++ b/include/sound/compress_driver.h
86856@@ -129,7 +129,7 @@ struct snd_compr_ops {
86857 struct snd_compr_caps *caps);
86858 int (*get_codec_caps) (struct snd_compr_stream *stream,
86859 struct snd_compr_codec_caps *codec);
86860-};
86861+} __no_const;
86862
86863 /**
86864 * struct snd_compr: Compressed device
86865diff --git a/include/sound/soc.h b/include/sound/soc.h
86866index ac8b333..59c3692 100644
86867--- a/include/sound/soc.h
86868+++ b/include/sound/soc.h
86869@@ -853,7 +853,7 @@ struct snd_soc_codec_driver {
86870 enum snd_soc_dapm_type, int);
86871
86872 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
86873-};
86874+} __do_const;
86875
86876 /* SoC platform interface */
86877 struct snd_soc_platform_driver {
86878@@ -880,7 +880,7 @@ struct snd_soc_platform_driver {
86879 const struct snd_compr_ops *compr_ops;
86880
86881 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
86882-};
86883+} __do_const;
86884
86885 struct snd_soc_dai_link_component {
86886 const char *name;
86887diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
86888index 672150b..9d4bec4 100644
86889--- a/include/target/target_core_base.h
86890+++ b/include/target/target_core_base.h
86891@@ -767,7 +767,7 @@ struct se_device {
86892 atomic_long_t write_bytes;
86893 /* Active commands on this virtual SE device */
86894 atomic_t simple_cmds;
86895- atomic_t dev_ordered_id;
86896+ atomic_unchecked_t dev_ordered_id;
86897 atomic_t dev_ordered_sync;
86898 atomic_t dev_qf_count;
86899 int export_count;
86900diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
86901new file mode 100644
86902index 0000000..fb634b7
86903--- /dev/null
86904+++ b/include/trace/events/fs.h
86905@@ -0,0 +1,53 @@
86906+#undef TRACE_SYSTEM
86907+#define TRACE_SYSTEM fs
86908+
86909+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
86910+#define _TRACE_FS_H
86911+
86912+#include <linux/fs.h>
86913+#include <linux/tracepoint.h>
86914+
86915+TRACE_EVENT(do_sys_open,
86916+
86917+ TP_PROTO(const char *filename, int flags, int mode),
86918+
86919+ TP_ARGS(filename, flags, mode),
86920+
86921+ TP_STRUCT__entry(
86922+ __string( filename, filename )
86923+ __field( int, flags )
86924+ __field( int, mode )
86925+ ),
86926+
86927+ TP_fast_assign(
86928+ __assign_str(filename, filename);
86929+ __entry->flags = flags;
86930+ __entry->mode = mode;
86931+ ),
86932+
86933+ TP_printk("\"%s\" %x %o",
86934+ __get_str(filename), __entry->flags, __entry->mode)
86935+);
86936+
86937+TRACE_EVENT(open_exec,
86938+
86939+ TP_PROTO(const char *filename),
86940+
86941+ TP_ARGS(filename),
86942+
86943+ TP_STRUCT__entry(
86944+ __string( filename, filename )
86945+ ),
86946+
86947+ TP_fast_assign(
86948+ __assign_str(filename, filename);
86949+ ),
86950+
86951+ TP_printk("\"%s\"",
86952+ __get_str(filename))
86953+);
86954+
86955+#endif /* _TRACE_FS_H */
86956+
86957+/* This part must be outside protection */
86958+#include <trace/define_trace.h>
86959diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
86960index 3608beb..df39d8a 100644
86961--- a/include/trace/events/irq.h
86962+++ b/include/trace/events/irq.h
86963@@ -36,7 +36,7 @@ struct softirq_action;
86964 */
86965 TRACE_EVENT(irq_handler_entry,
86966
86967- TP_PROTO(int irq, struct irqaction *action),
86968+ TP_PROTO(int irq, const struct irqaction *action),
86969
86970 TP_ARGS(irq, action),
86971
86972@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
86973 */
86974 TRACE_EVENT(irq_handler_exit,
86975
86976- TP_PROTO(int irq, struct irqaction *action, int ret),
86977+ TP_PROTO(int irq, const struct irqaction *action, int ret),
86978
86979 TP_ARGS(irq, action, ret),
86980
86981diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
86982index 7caf44c..23c6f27 100644
86983--- a/include/uapi/linux/a.out.h
86984+++ b/include/uapi/linux/a.out.h
86985@@ -39,6 +39,14 @@ enum machine_type {
86986 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
86987 };
86988
86989+/* Constants for the N_FLAGS field */
86990+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
86991+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
86992+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
86993+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
86994+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
86995+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
86996+
86997 #if !defined (N_MAGIC)
86998 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
86999 #endif
87000diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
87001index 22b6ad3..aeba37e 100644
87002--- a/include/uapi/linux/bcache.h
87003+++ b/include/uapi/linux/bcache.h
87004@@ -5,6 +5,7 @@
87005 * Bcache on disk data structures
87006 */
87007
87008+#include <linux/compiler.h>
87009 #include <asm/types.h>
87010
87011 #define BITMASK(name, type, field, offset, size) \
87012@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
87013 /* Btree keys - all units are in sectors */
87014
87015 struct bkey {
87016- __u64 high;
87017- __u64 low;
87018+ __u64 high __intentional_overflow(-1);
87019+ __u64 low __intentional_overflow(-1);
87020 __u64 ptr[];
87021 };
87022
87023diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
87024index d876736..ccce5c0 100644
87025--- a/include/uapi/linux/byteorder/little_endian.h
87026+++ b/include/uapi/linux/byteorder/little_endian.h
87027@@ -42,51 +42,51 @@
87028
87029 static inline __le64 __cpu_to_le64p(const __u64 *p)
87030 {
87031- return (__force __le64)*p;
87032+ return (__force const __le64)*p;
87033 }
87034-static inline __u64 __le64_to_cpup(const __le64 *p)
87035+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
87036 {
87037- return (__force __u64)*p;
87038+ return (__force const __u64)*p;
87039 }
87040 static inline __le32 __cpu_to_le32p(const __u32 *p)
87041 {
87042- return (__force __le32)*p;
87043+ return (__force const __le32)*p;
87044 }
87045 static inline __u32 __le32_to_cpup(const __le32 *p)
87046 {
87047- return (__force __u32)*p;
87048+ return (__force const __u32)*p;
87049 }
87050 static inline __le16 __cpu_to_le16p(const __u16 *p)
87051 {
87052- return (__force __le16)*p;
87053+ return (__force const __le16)*p;
87054 }
87055 static inline __u16 __le16_to_cpup(const __le16 *p)
87056 {
87057- return (__force __u16)*p;
87058+ return (__force const __u16)*p;
87059 }
87060 static inline __be64 __cpu_to_be64p(const __u64 *p)
87061 {
87062- return (__force __be64)__swab64p(p);
87063+ return (__force const __be64)__swab64p(p);
87064 }
87065 static inline __u64 __be64_to_cpup(const __be64 *p)
87066 {
87067- return __swab64p((__u64 *)p);
87068+ return __swab64p((const __u64 *)p);
87069 }
87070 static inline __be32 __cpu_to_be32p(const __u32 *p)
87071 {
87072- return (__force __be32)__swab32p(p);
87073+ return (__force const __be32)__swab32p(p);
87074 }
87075-static inline __u32 __be32_to_cpup(const __be32 *p)
87076+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
87077 {
87078- return __swab32p((__u32 *)p);
87079+ return __swab32p((const __u32 *)p);
87080 }
87081 static inline __be16 __cpu_to_be16p(const __u16 *p)
87082 {
87083- return (__force __be16)__swab16p(p);
87084+ return (__force const __be16)__swab16p(p);
87085 }
87086 static inline __u16 __be16_to_cpup(const __be16 *p)
87087 {
87088- return __swab16p((__u16 *)p);
87089+ return __swab16p((const __u16 *)p);
87090 }
87091 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
87092 #define __le64_to_cpus(x) do { (void)(x); } while (0)
87093diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
87094index 71e1d0e..6cc9caf 100644
87095--- a/include/uapi/linux/elf.h
87096+++ b/include/uapi/linux/elf.h
87097@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
87098 #define PT_GNU_EH_FRAME 0x6474e550
87099
87100 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
87101+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
87102+
87103+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
87104+
87105+/* Constants for the e_flags field */
87106+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87107+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
87108+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
87109+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
87110+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87111+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87112
87113 /*
87114 * Extended Numbering
87115@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
87116 #define DT_DEBUG 21
87117 #define DT_TEXTREL 22
87118 #define DT_JMPREL 23
87119+#define DT_FLAGS 30
87120+ #define DF_TEXTREL 0x00000004
87121 #define DT_ENCODING 32
87122 #define OLD_DT_LOOS 0x60000000
87123 #define DT_LOOS 0x6000000d
87124@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
87125 #define PF_W 0x2
87126 #define PF_X 0x1
87127
87128+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
87129+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
87130+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
87131+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
87132+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
87133+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
87134+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
87135+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
87136+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
87137+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
87138+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
87139+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
87140+
87141 typedef struct elf32_phdr{
87142 Elf32_Word p_type;
87143 Elf32_Off p_offset;
87144@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
87145 #define EI_OSABI 7
87146 #define EI_PAD 8
87147
87148+#define EI_PAX 14
87149+
87150 #define ELFMAG0 0x7f /* EI_MAG */
87151 #define ELFMAG1 'E'
87152 #define ELFMAG2 'L'
87153diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
87154index aa169c4..6a2771d 100644
87155--- a/include/uapi/linux/personality.h
87156+++ b/include/uapi/linux/personality.h
87157@@ -30,6 +30,7 @@ enum {
87158 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87159 ADDR_NO_RANDOMIZE | \
87160 ADDR_COMPAT_LAYOUT | \
87161+ ADDR_LIMIT_3GB | \
87162 MMAP_PAGE_ZERO)
87163
87164 /*
87165diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
87166index 7530e74..e714828 100644
87167--- a/include/uapi/linux/screen_info.h
87168+++ b/include/uapi/linux/screen_info.h
87169@@ -43,7 +43,8 @@ struct screen_info {
87170 __u16 pages; /* 0x32 */
87171 __u16 vesa_attributes; /* 0x34 */
87172 __u32 capabilities; /* 0x36 */
87173- __u8 _reserved[6]; /* 0x3a */
87174+ __u16 vesapm_size; /* 0x3a */
87175+ __u8 _reserved[4]; /* 0x3c */
87176 } __attribute__((packed));
87177
87178 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
87179diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
87180index 0e011eb..82681b1 100644
87181--- a/include/uapi/linux/swab.h
87182+++ b/include/uapi/linux/swab.h
87183@@ -43,7 +43,7 @@
87184 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
87185 */
87186
87187-static inline __attribute_const__ __u16 __fswab16(__u16 val)
87188+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
87189 {
87190 #ifdef __HAVE_BUILTIN_BSWAP16__
87191 return __builtin_bswap16(val);
87192@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
87193 #endif
87194 }
87195
87196-static inline __attribute_const__ __u32 __fswab32(__u32 val)
87197+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
87198 {
87199 #ifdef __HAVE_BUILTIN_BSWAP32__
87200 return __builtin_bswap32(val);
87201@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
87202 #endif
87203 }
87204
87205-static inline __attribute_const__ __u64 __fswab64(__u64 val)
87206+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
87207 {
87208 #ifdef __HAVE_BUILTIN_BSWAP64__
87209 return __builtin_bswap64(val);
87210diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
87211index 1590c49..5eab462 100644
87212--- a/include/uapi/linux/xattr.h
87213+++ b/include/uapi/linux/xattr.h
87214@@ -73,5 +73,9 @@
87215 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
87216 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
87217
87218+/* User namespace */
87219+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
87220+#define XATTR_PAX_FLAGS_SUFFIX "flags"
87221+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
87222
87223 #endif /* _UAPI_LINUX_XATTR_H */
87224diff --git a/include/video/udlfb.h b/include/video/udlfb.h
87225index f9466fa..f4e2b81 100644
87226--- a/include/video/udlfb.h
87227+++ b/include/video/udlfb.h
87228@@ -53,10 +53,10 @@ struct dlfb_data {
87229 u32 pseudo_palette[256];
87230 int blank_mode; /*one of FB_BLANK_ */
87231 /* blit-only rendering path metrics, exposed through sysfs */
87232- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87233- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
87234- atomic_t bytes_sent; /* to usb, after compression including overhead */
87235- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
87236+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87237+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
87238+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
87239+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
87240 };
87241
87242 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
87243diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
87244index 30f5362..8ed8ac9 100644
87245--- a/include/video/uvesafb.h
87246+++ b/include/video/uvesafb.h
87247@@ -122,6 +122,7 @@ struct uvesafb_par {
87248 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
87249 u8 pmi_setpal; /* PMI for palette changes */
87250 u16 *pmi_base; /* protected mode interface location */
87251+ u8 *pmi_code; /* protected mode code location */
87252 void *pmi_start;
87253 void *pmi_pal;
87254 u8 *vbe_state_orig; /*
87255diff --git a/init/Kconfig b/init/Kconfig
87256index 9afb971..27d6fca 100644
87257--- a/init/Kconfig
87258+++ b/init/Kconfig
87259@@ -1129,6 +1129,7 @@ endif # CGROUPS
87260
87261 config CHECKPOINT_RESTORE
87262 bool "Checkpoint/restore support" if EXPERT
87263+ depends on !GRKERNSEC
87264 default n
87265 help
87266 Enables additional kernel features in a sake of checkpoint/restore.
87267@@ -1654,7 +1655,7 @@ config SLUB_DEBUG
87268
87269 config COMPAT_BRK
87270 bool "Disable heap randomization"
87271- default y
87272+ default n
87273 help
87274 Randomizing heap placement makes heap exploits harder, but it
87275 also breaks ancient binaries (including anything libc5 based).
87276@@ -1985,7 +1986,7 @@ config INIT_ALL_POSSIBLE
87277 config STOP_MACHINE
87278 bool
87279 default y
87280- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
87281+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
87282 help
87283 Need stop_machine() primitive.
87284
87285diff --git a/init/Makefile b/init/Makefile
87286index 7bc47ee..6da2dc7 100644
87287--- a/init/Makefile
87288+++ b/init/Makefile
87289@@ -2,6 +2,9 @@
87290 # Makefile for the linux kernel.
87291 #
87292
87293+ccflags-y := $(GCC_PLUGINS_CFLAGS)
87294+asflags-y := $(GCC_PLUGINS_AFLAGS)
87295+
87296 obj-y := main.o version.o mounts.o
87297 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
87298 obj-y += noinitramfs.o
87299diff --git a/init/do_mounts.c b/init/do_mounts.c
87300index eb41008..f5dbbf9 100644
87301--- a/init/do_mounts.c
87302+++ b/init/do_mounts.c
87303@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
87304 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
87305 {
87306 struct super_block *s;
87307- int err = sys_mount(name, "/root", fs, flags, data);
87308+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
87309 if (err)
87310 return err;
87311
87312- sys_chdir("/root");
87313+ sys_chdir((const char __force_user *)"/root");
87314 s = current->fs->pwd.dentry->d_sb;
87315 ROOT_DEV = s->s_dev;
87316 printk(KERN_INFO
87317@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
87318 va_start(args, fmt);
87319 vsprintf(buf, fmt, args);
87320 va_end(args);
87321- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
87322+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
87323 if (fd >= 0) {
87324 sys_ioctl(fd, FDEJECT, 0);
87325 sys_close(fd);
87326 }
87327 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
87328- fd = sys_open("/dev/console", O_RDWR, 0);
87329+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
87330 if (fd >= 0) {
87331 sys_ioctl(fd, TCGETS, (long)&termios);
87332 termios.c_lflag &= ~ICANON;
87333 sys_ioctl(fd, TCSETSF, (long)&termios);
87334- sys_read(fd, &c, 1);
87335+ sys_read(fd, (char __user *)&c, 1);
87336 termios.c_lflag |= ICANON;
87337 sys_ioctl(fd, TCSETSF, (long)&termios);
87338 sys_close(fd);
87339@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
87340 mount_root();
87341 out:
87342 devtmpfs_mount("dev");
87343- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87344- sys_chroot(".");
87345+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87346+ sys_chroot((const char __force_user *)".");
87347 }
87348
87349 static bool is_tmpfs;
87350diff --git a/init/do_mounts.h b/init/do_mounts.h
87351index f5b978a..69dbfe8 100644
87352--- a/init/do_mounts.h
87353+++ b/init/do_mounts.h
87354@@ -15,15 +15,15 @@ extern int root_mountflags;
87355
87356 static inline int create_dev(char *name, dev_t dev)
87357 {
87358- sys_unlink(name);
87359- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
87360+ sys_unlink((char __force_user *)name);
87361+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
87362 }
87363
87364 #if BITS_PER_LONG == 32
87365 static inline u32 bstat(char *name)
87366 {
87367 struct stat64 stat;
87368- if (sys_stat64(name, &stat) != 0)
87369+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
87370 return 0;
87371 if (!S_ISBLK(stat.st_mode))
87372 return 0;
87373@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
87374 static inline u32 bstat(char *name)
87375 {
87376 struct stat stat;
87377- if (sys_newstat(name, &stat) != 0)
87378+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
87379 return 0;
87380 if (!S_ISBLK(stat.st_mode))
87381 return 0;
87382diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
87383index 3e0878e..8a9d7a0 100644
87384--- a/init/do_mounts_initrd.c
87385+++ b/init/do_mounts_initrd.c
87386@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
87387 {
87388 sys_unshare(CLONE_FS | CLONE_FILES);
87389 /* stdin/stdout/stderr for /linuxrc */
87390- sys_open("/dev/console", O_RDWR, 0);
87391+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
87392 sys_dup(0);
87393 sys_dup(0);
87394 /* move initrd over / and chdir/chroot in initrd root */
87395- sys_chdir("/root");
87396- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87397- sys_chroot(".");
87398+ sys_chdir((const char __force_user *)"/root");
87399+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87400+ sys_chroot((const char __force_user *)".");
87401 sys_setsid();
87402 return 0;
87403 }
87404@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
87405 create_dev("/dev/root.old", Root_RAM0);
87406 /* mount initrd on rootfs' /root */
87407 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
87408- sys_mkdir("/old", 0700);
87409- sys_chdir("/old");
87410+ sys_mkdir((const char __force_user *)"/old", 0700);
87411+ sys_chdir((const char __force_user *)"/old");
87412
87413 /* try loading default modules from initrd */
87414 load_default_modules();
87415@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
87416 current->flags &= ~PF_FREEZER_SKIP;
87417
87418 /* move initrd to rootfs' /old */
87419- sys_mount("..", ".", NULL, MS_MOVE, NULL);
87420+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
87421 /* switch root and cwd back to / of rootfs */
87422- sys_chroot("..");
87423+ sys_chroot((const char __force_user *)"..");
87424
87425 if (new_decode_dev(real_root_dev) == Root_RAM0) {
87426- sys_chdir("/old");
87427+ sys_chdir((const char __force_user *)"/old");
87428 return;
87429 }
87430
87431- sys_chdir("/");
87432+ sys_chdir((const char __force_user *)"/");
87433 ROOT_DEV = new_decode_dev(real_root_dev);
87434 mount_root();
87435
87436 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
87437- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
87438+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
87439 if (!error)
87440 printk("okay\n");
87441 else {
87442- int fd = sys_open("/dev/root.old", O_RDWR, 0);
87443+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
87444 if (error == -ENOENT)
87445 printk("/initrd does not exist. Ignored.\n");
87446 else
87447 printk("failed\n");
87448 printk(KERN_NOTICE "Unmounting old root\n");
87449- sys_umount("/old", MNT_DETACH);
87450+ sys_umount((char __force_user *)"/old", MNT_DETACH);
87451 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
87452 if (fd < 0) {
87453 error = fd;
87454@@ -127,11 +127,11 @@ int __init initrd_load(void)
87455 * mounted in the normal path.
87456 */
87457 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
87458- sys_unlink("/initrd.image");
87459+ sys_unlink((const char __force_user *)"/initrd.image");
87460 handle_initrd();
87461 return 1;
87462 }
87463 }
87464- sys_unlink("/initrd.image");
87465+ sys_unlink((const char __force_user *)"/initrd.image");
87466 return 0;
87467 }
87468diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
87469index 8cb6db5..d729f50 100644
87470--- a/init/do_mounts_md.c
87471+++ b/init/do_mounts_md.c
87472@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
87473 partitioned ? "_d" : "", minor,
87474 md_setup_args[ent].device_names);
87475
87476- fd = sys_open(name, 0, 0);
87477+ fd = sys_open((char __force_user *)name, 0, 0);
87478 if (fd < 0) {
87479 printk(KERN_ERR "md: open failed - cannot start "
87480 "array %s\n", name);
87481@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
87482 * array without it
87483 */
87484 sys_close(fd);
87485- fd = sys_open(name, 0, 0);
87486+ fd = sys_open((char __force_user *)name, 0, 0);
87487 sys_ioctl(fd, BLKRRPART, 0);
87488 }
87489 sys_close(fd);
87490@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
87491
87492 wait_for_device_probe();
87493
87494- fd = sys_open("/dev/md0", 0, 0);
87495+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
87496 if (fd >= 0) {
87497 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
87498 sys_close(fd);
87499diff --git a/init/init_task.c b/init/init_task.c
87500index ba0a7f36..2bcf1d5 100644
87501--- a/init/init_task.c
87502+++ b/init/init_task.c
87503@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
87504 * Initial thread structure. Alignment of this is handled by a special
87505 * linker map entry.
87506 */
87507+#ifdef CONFIG_X86
87508+union thread_union init_thread_union __init_task_data;
87509+#else
87510 union thread_union init_thread_union __init_task_data =
87511 { INIT_THREAD_INFO(init_task) };
87512+#endif
87513diff --git a/init/initramfs.c b/init/initramfs.c
87514index ad1bd77..dca2c1b 100644
87515--- a/init/initramfs.c
87516+++ b/init/initramfs.c
87517@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
87518
87519 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
87520 while (count) {
87521- ssize_t rv = sys_write(fd, p, count);
87522+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
87523
87524 if (rv < 0) {
87525 if (rv == -EINTR || rv == -EAGAIN)
87526@@ -107,7 +107,7 @@ static void __init free_hash(void)
87527 }
87528 }
87529
87530-static long __init do_utime(char *filename, time_t mtime)
87531+static long __init do_utime(char __force_user *filename, time_t mtime)
87532 {
87533 struct timespec t[2];
87534
87535@@ -142,7 +142,7 @@ static void __init dir_utime(void)
87536 struct dir_entry *de, *tmp;
87537 list_for_each_entry_safe(de, tmp, &dir_list, list) {
87538 list_del(&de->list);
87539- do_utime(de->name, de->mtime);
87540+ do_utime((char __force_user *)de->name, de->mtime);
87541 kfree(de->name);
87542 kfree(de);
87543 }
87544@@ -304,7 +304,7 @@ static int __init maybe_link(void)
87545 if (nlink >= 2) {
87546 char *old = find_link(major, minor, ino, mode, collected);
87547 if (old)
87548- return (sys_link(old, collected) < 0) ? -1 : 1;
87549+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
87550 }
87551 return 0;
87552 }
87553@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
87554 {
87555 struct stat st;
87556
87557- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
87558+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
87559 if (S_ISDIR(st.st_mode))
87560- sys_rmdir(path);
87561+ sys_rmdir((char __force_user *)path);
87562 else
87563- sys_unlink(path);
87564+ sys_unlink((char __force_user *)path);
87565 }
87566 }
87567
87568@@ -338,7 +338,7 @@ static int __init do_name(void)
87569 int openflags = O_WRONLY|O_CREAT;
87570 if (ml != 1)
87571 openflags |= O_TRUNC;
87572- wfd = sys_open(collected, openflags, mode);
87573+ wfd = sys_open((char __force_user *)collected, openflags, mode);
87574
87575 if (wfd >= 0) {
87576 sys_fchown(wfd, uid, gid);
87577@@ -350,17 +350,17 @@ static int __init do_name(void)
87578 }
87579 }
87580 } else if (S_ISDIR(mode)) {
87581- sys_mkdir(collected, mode);
87582- sys_chown(collected, uid, gid);
87583- sys_chmod(collected, mode);
87584+ sys_mkdir((char __force_user *)collected, mode);
87585+ sys_chown((char __force_user *)collected, uid, gid);
87586+ sys_chmod((char __force_user *)collected, mode);
87587 dir_add(collected, mtime);
87588 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
87589 S_ISFIFO(mode) || S_ISSOCK(mode)) {
87590 if (maybe_link() == 0) {
87591- sys_mknod(collected, mode, rdev);
87592- sys_chown(collected, uid, gid);
87593- sys_chmod(collected, mode);
87594- do_utime(collected, mtime);
87595+ sys_mknod((char __force_user *)collected, mode, rdev);
87596+ sys_chown((char __force_user *)collected, uid, gid);
87597+ sys_chmod((char __force_user *)collected, mode);
87598+ do_utime((char __force_user *)collected, mtime);
87599 }
87600 }
87601 return 0;
87602@@ -372,7 +372,7 @@ static int __init do_copy(void)
87603 if (xwrite(wfd, victim, body_len) != body_len)
87604 error("write error");
87605 sys_close(wfd);
87606- do_utime(vcollected, mtime);
87607+ do_utime((char __force_user *)vcollected, mtime);
87608 kfree(vcollected);
87609 eat(body_len);
87610 state = SkipIt;
87611@@ -390,9 +390,9 @@ static int __init do_symlink(void)
87612 {
87613 collected[N_ALIGN(name_len) + body_len] = '\0';
87614 clean_path(collected, 0);
87615- sys_symlink(collected + N_ALIGN(name_len), collected);
87616- sys_lchown(collected, uid, gid);
87617- do_utime(collected, mtime);
87618+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
87619+ sys_lchown((char __force_user *)collected, uid, gid);
87620+ do_utime((char __force_user *)collected, mtime);
87621 state = SkipIt;
87622 next_state = Reset;
87623 return 0;
87624diff --git a/init/main.c b/init/main.c
87625index 61b99376..1e346cb 100644
87626--- a/init/main.c
87627+++ b/init/main.c
87628@@ -100,6 +100,8 @@ extern void radix_tree_init(void);
87629 static inline void mark_rodata_ro(void) { }
87630 #endif
87631
87632+extern void grsecurity_init(void);
87633+
87634 /*
87635 * Debug helper: via this flag we know that we are in 'early bootup code'
87636 * where only the boot processor is running with IRQ disabled. This means
87637@@ -161,6 +163,85 @@ static int __init set_reset_devices(char *str)
87638
87639 __setup("reset_devices", set_reset_devices);
87640
87641+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
87642+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
87643+static int __init setup_grsec_proc_gid(char *str)
87644+{
87645+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
87646+ return 1;
87647+}
87648+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
87649+#endif
87650+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
87651+int grsec_enable_sysfs_restrict = 1;
87652+static int __init setup_grsec_sysfs_restrict(char *str)
87653+{
87654+ if (!simple_strtol(str, NULL, 0))
87655+ grsec_enable_sysfs_restrict = 0;
87656+ return 1;
87657+}
87658+__setup("grsec_sysfs_restrict", setup_grsec_sysfs_restrict);
87659+#endif
87660+
87661+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
87662+unsigned long pax_user_shadow_base __read_only;
87663+EXPORT_SYMBOL(pax_user_shadow_base);
87664+extern char pax_enter_kernel_user[];
87665+extern char pax_exit_kernel_user[];
87666+#endif
87667+
87668+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
87669+static int __init setup_pax_nouderef(char *str)
87670+{
87671+#ifdef CONFIG_X86_32
87672+ unsigned int cpu;
87673+ struct desc_struct *gdt;
87674+
87675+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
87676+ gdt = get_cpu_gdt_table(cpu);
87677+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
87678+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
87679+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
87680+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
87681+ }
87682+ loadsegment(ds, __KERNEL_DS);
87683+ loadsegment(es, __KERNEL_DS);
87684+ loadsegment(ss, __KERNEL_DS);
87685+#else
87686+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
87687+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
87688+ clone_pgd_mask = ~(pgdval_t)0UL;
87689+ pax_user_shadow_base = 0UL;
87690+ setup_clear_cpu_cap(X86_FEATURE_PCID);
87691+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
87692+#endif
87693+
87694+ return 0;
87695+}
87696+early_param("pax_nouderef", setup_pax_nouderef);
87697+
87698+#ifdef CONFIG_X86_64
87699+static int __init setup_pax_weakuderef(char *str)
87700+{
87701+ if (clone_pgd_mask != ~(pgdval_t)0UL)
87702+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
87703+ return 1;
87704+}
87705+__setup("pax_weakuderef", setup_pax_weakuderef);
87706+#endif
87707+#endif
87708+
87709+#ifdef CONFIG_PAX_SOFTMODE
87710+int pax_softmode;
87711+
87712+static int __init setup_pax_softmode(char *str)
87713+{
87714+ get_option(&str, &pax_softmode);
87715+ return 1;
87716+}
87717+__setup("pax_softmode=", setup_pax_softmode);
87718+#endif
87719+
87720 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
87721 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
87722 static const char *panic_later, *panic_param;
87723@@ -735,7 +816,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
87724 struct blacklist_entry *entry;
87725 char *fn_name;
87726
87727- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
87728+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
87729 if (!fn_name)
87730 return false;
87731
87732@@ -787,7 +868,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
87733 {
87734 int count = preempt_count();
87735 int ret;
87736- char msgbuf[64];
87737+ const char *msg1 = "", *msg2 = "";
87738
87739 if (initcall_blacklisted(fn))
87740 return -EPERM;
87741@@ -797,18 +878,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
87742 else
87743 ret = fn();
87744
87745- msgbuf[0] = 0;
87746-
87747 if (preempt_count() != count) {
87748- sprintf(msgbuf, "preemption imbalance ");
87749+ msg1 = " preemption imbalance";
87750 preempt_count_set(count);
87751 }
87752 if (irqs_disabled()) {
87753- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
87754+ msg2 = " disabled interrupts";
87755 local_irq_enable();
87756 }
87757- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
87758+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
87759
87760+ add_latent_entropy();
87761 return ret;
87762 }
87763
87764@@ -914,8 +994,8 @@ static int run_init_process(const char *init_filename)
87765 {
87766 argv_init[0] = init_filename;
87767 return do_execve(getname_kernel(init_filename),
87768- (const char __user *const __user *)argv_init,
87769- (const char __user *const __user *)envp_init);
87770+ (const char __user *const __force_user *)argv_init,
87771+ (const char __user *const __force_user *)envp_init);
87772 }
87773
87774 static int try_to_run_init_process(const char *init_filename)
87775@@ -932,6 +1012,10 @@ static int try_to_run_init_process(const char *init_filename)
87776 return ret;
87777 }
87778
87779+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87780+extern int gr_init_ran;
87781+#endif
87782+
87783 static noinline void __init kernel_init_freeable(void);
87784
87785 static int __ref kernel_init(void *unused)
87786@@ -956,6 +1040,11 @@ static int __ref kernel_init(void *unused)
87787 ramdisk_execute_command, ret);
87788 }
87789
87790+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87791+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
87792+ gr_init_ran = 1;
87793+#endif
87794+
87795 /*
87796 * We try each of these until one succeeds.
87797 *
87798@@ -1016,7 +1105,7 @@ static noinline void __init kernel_init_freeable(void)
87799 do_basic_setup();
87800
87801 /* Open the /dev/console on the rootfs, this should never fail */
87802- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
87803+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
87804 pr_err("Warning: unable to open an initial console.\n");
87805
87806 (void) sys_dup(0);
87807@@ -1029,11 +1118,13 @@ static noinline void __init kernel_init_freeable(void)
87808 if (!ramdisk_execute_command)
87809 ramdisk_execute_command = "/init";
87810
87811- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
87812+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
87813 ramdisk_execute_command = NULL;
87814 prepare_namespace();
87815 }
87816
87817+ grsecurity_init();
87818+
87819 /*
87820 * Ok, we have completed the initial bootup, and
87821 * we're essentially up and running. Get rid of the
87822diff --git a/ipc/compat.c b/ipc/compat.c
87823index 9b3c85f..1c4d897 100644
87824--- a/ipc/compat.c
87825+++ b/ipc/compat.c
87826@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
87827 COMPAT_SHMLBA);
87828 if (err < 0)
87829 return err;
87830- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
87831+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
87832 }
87833 case SHMDT:
87834 return sys_shmdt(compat_ptr(ptr));
87835diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
87836index 8ad93c2..efd80f8 100644
87837--- a/ipc/ipc_sysctl.c
87838+++ b/ipc/ipc_sysctl.c
87839@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
87840 static int proc_ipc_dointvec(struct ctl_table *table, int write,
87841 void __user *buffer, size_t *lenp, loff_t *ppos)
87842 {
87843- struct ctl_table ipc_table;
87844+ ctl_table_no_const ipc_table;
87845
87846 memcpy(&ipc_table, table, sizeof(ipc_table));
87847 ipc_table.data = get_ipc(table);
87848@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
87849 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
87850 void __user *buffer, size_t *lenp, loff_t *ppos)
87851 {
87852- struct ctl_table ipc_table;
87853+ ctl_table_no_const ipc_table;
87854
87855 memcpy(&ipc_table, table, sizeof(ipc_table));
87856 ipc_table.data = get_ipc(table);
87857@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
87858 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
87859 void __user *buffer, size_t *lenp, loff_t *ppos)
87860 {
87861- struct ctl_table ipc_table;
87862+ ctl_table_no_const ipc_table;
87863 memcpy(&ipc_table, table, sizeof(ipc_table));
87864 ipc_table.data = get_ipc(table);
87865
87866@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
87867 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
87868 void __user *buffer, size_t *lenp, loff_t *ppos)
87869 {
87870- struct ctl_table ipc_table;
87871+ ctl_table_no_const ipc_table;
87872 int dummy = 0;
87873
87874 memcpy(&ipc_table, table, sizeof(ipc_table));
87875diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
87876index 68d4e95..1477ded 100644
87877--- a/ipc/mq_sysctl.c
87878+++ b/ipc/mq_sysctl.c
87879@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
87880 static int proc_mq_dointvec(struct ctl_table *table, int write,
87881 void __user *buffer, size_t *lenp, loff_t *ppos)
87882 {
87883- struct ctl_table mq_table;
87884+ ctl_table_no_const mq_table;
87885 memcpy(&mq_table, table, sizeof(mq_table));
87886 mq_table.data = get_mq(table);
87887
87888@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
87889 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
87890 void __user *buffer, size_t *lenp, loff_t *ppos)
87891 {
87892- struct ctl_table mq_table;
87893+ ctl_table_no_const mq_table;
87894 memcpy(&mq_table, table, sizeof(mq_table));
87895 mq_table.data = get_mq(table);
87896
87897diff --git a/ipc/mqueue.c b/ipc/mqueue.c
87898index 7635a1c..7432cb6 100644
87899--- a/ipc/mqueue.c
87900+++ b/ipc/mqueue.c
87901@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
87902 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
87903 info->attr.mq_msgsize);
87904
87905+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
87906 spin_lock(&mq_lock);
87907 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
87908 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
87909diff --git a/ipc/shm.c b/ipc/shm.c
87910index 19633b4..d454904 100644
87911--- a/ipc/shm.c
87912+++ b/ipc/shm.c
87913@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
87914 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
87915 #endif
87916
87917+#ifdef CONFIG_GRKERNSEC
87918+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
87919+ const u64 shm_createtime, const kuid_t cuid,
87920+ const int shmid);
87921+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
87922+ const u64 shm_createtime);
87923+#endif
87924+
87925 void shm_init_ns(struct ipc_namespace *ns)
87926 {
87927 ns->shm_ctlmax = SHMMAX;
87928@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
87929 shp->shm_lprid = 0;
87930 shp->shm_atim = shp->shm_dtim = 0;
87931 shp->shm_ctim = get_seconds();
87932+#ifdef CONFIG_GRKERNSEC
87933+ shp->shm_createtime = ktime_get_ns();
87934+#endif
87935 shp->shm_segsz = size;
87936 shp->shm_nattch = 0;
87937 shp->shm_file = file;
87938@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
87939 f_mode = FMODE_READ | FMODE_WRITE;
87940 }
87941 if (shmflg & SHM_EXEC) {
87942+
87943+#ifdef CONFIG_PAX_MPROTECT
87944+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
87945+ goto out;
87946+#endif
87947+
87948 prot |= PROT_EXEC;
87949 acc_mode |= S_IXUGO;
87950 }
87951@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
87952 if (err)
87953 goto out_unlock;
87954
87955+#ifdef CONFIG_GRKERNSEC
87956+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
87957+ shp->shm_perm.cuid, shmid) ||
87958+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
87959+ err = -EACCES;
87960+ goto out_unlock;
87961+ }
87962+#endif
87963+
87964 ipc_lock_object(&shp->shm_perm);
87965
87966 /* check if shm_destroy() is tearing down shp */
87967@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
87968 path = shp->shm_file->f_path;
87969 path_get(&path);
87970 shp->shm_nattch++;
87971+#ifdef CONFIG_GRKERNSEC
87972+ shp->shm_lapid = current->pid;
87973+#endif
87974 size = i_size_read(path.dentry->d_inode);
87975 ipc_unlock_object(&shp->shm_perm);
87976 rcu_read_unlock();
87977diff --git a/ipc/util.c b/ipc/util.c
87978index 106bed0..f851429 100644
87979--- a/ipc/util.c
87980+++ b/ipc/util.c
87981@@ -71,6 +71,8 @@ struct ipc_proc_iface {
87982 int (*show)(struct seq_file *, void *);
87983 };
87984
87985+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
87986+
87987 /**
87988 * ipc_init - initialise ipc subsystem
87989 *
87990@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
87991 granted_mode >>= 6;
87992 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
87993 granted_mode >>= 3;
87994+
87995+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
87996+ return -1;
87997+
87998 /* is there some bit set in requested_mode but not in granted_mode? */
87999 if ((requested_mode & ~granted_mode & 0007) &&
88000 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
88001diff --git a/kernel/audit.c b/kernel/audit.c
88002index 72ab759..757deba 100644
88003--- a/kernel/audit.c
88004+++ b/kernel/audit.c
88005@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
88006 3) suppressed due to audit_rate_limit
88007 4) suppressed due to audit_backlog_limit
88008 */
88009-static atomic_t audit_lost = ATOMIC_INIT(0);
88010+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
88011
88012 /* The netlink socket. */
88013 static struct sock *audit_sock;
88014@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
88015 unsigned long now;
88016 int print;
88017
88018- atomic_inc(&audit_lost);
88019+ atomic_inc_unchecked(&audit_lost);
88020
88021 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
88022
88023@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
88024 if (print) {
88025 if (printk_ratelimit())
88026 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
88027- atomic_read(&audit_lost),
88028+ atomic_read_unchecked(&audit_lost),
88029 audit_rate_limit,
88030 audit_backlog_limit);
88031 audit_panic(message);
88032@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
88033 s.pid = audit_pid;
88034 s.rate_limit = audit_rate_limit;
88035 s.backlog_limit = audit_backlog_limit;
88036- s.lost = atomic_read(&audit_lost);
88037+ s.lost = atomic_read_unchecked(&audit_lost);
88038 s.backlog = skb_queue_len(&audit_skb_queue);
88039 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
88040 s.backlog_wait_time = audit_backlog_wait_time;
88041diff --git a/kernel/auditsc.c b/kernel/auditsc.c
88042index 072566d..1190489 100644
88043--- a/kernel/auditsc.c
88044+++ b/kernel/auditsc.c
88045@@ -2056,7 +2056,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
88046 }
88047
88048 /* global counter which is incremented every time something logs in */
88049-static atomic_t session_id = ATOMIC_INIT(0);
88050+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
88051
88052 static int audit_set_loginuid_perm(kuid_t loginuid)
88053 {
88054@@ -2123,7 +2123,7 @@ int audit_set_loginuid(kuid_t loginuid)
88055
88056 /* are we setting or clearing? */
88057 if (uid_valid(loginuid))
88058- sessionid = (unsigned int)atomic_inc_return(&session_id);
88059+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
88060
88061 task->sessionid = sessionid;
88062 task->loginuid = loginuid;
88063diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
88064index a64e7a2..2e69448 100644
88065--- a/kernel/bpf/core.c
88066+++ b/kernel/bpf/core.c
88067@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88068 * random section of illegal instructions.
88069 */
88070 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
88071- hdr = module_alloc(size);
88072+ hdr = module_alloc_exec(size);
88073 if (hdr == NULL)
88074 return NULL;
88075
88076 /* Fill space with illegal/arch-dep instructions. */
88077 bpf_fill_ill_insns(hdr, size);
88078
88079+ pax_open_kernel();
88080 hdr->pages = size / PAGE_SIZE;
88081+ pax_close_kernel();
88082+
88083 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
88084 PAGE_SIZE - sizeof(*hdr));
88085 start = (prandom_u32() % hole) & ~(alignment - 1);
88086@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88087
88088 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
88089 {
88090- module_memfree(hdr);
88091+ module_memfree_exec(hdr);
88092 }
88093 #endif /* CONFIG_BPF_JIT */
88094
88095diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
88096index 536edc2..d28c85d 100644
88097--- a/kernel/bpf/syscall.c
88098+++ b/kernel/bpf/syscall.c
88099@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
88100 int err;
88101
88102 /* the syscall is limited to root temporarily. This restriction will be
88103- * lifted when security audit is clean. Note that eBPF+tracing must have
88104- * this restriction, since it may pass kernel data to user space
88105+ * lifted by upstream when a half-assed security audit is clean. Note
88106+ * that eBPF+tracing must have this restriction, since it may pass
88107+ * kernel data to user space
88108 */
88109 if (!capable(CAP_SYS_ADMIN))
88110 return -EPERM;
88111+#ifdef CONFIG_GRKERNSEC
88112+ return -EPERM;
88113+#endif
88114
88115 if (!access_ok(VERIFY_READ, uattr, 1))
88116 return -EFAULT;
88117diff --git a/kernel/capability.c b/kernel/capability.c
88118index 989f5bf..d317ca0 100644
88119--- a/kernel/capability.c
88120+++ b/kernel/capability.c
88121@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
88122 * before modification is attempted and the application
88123 * fails.
88124 */
88125+ if (tocopy > ARRAY_SIZE(kdata))
88126+ return -EFAULT;
88127+
88128 if (copy_to_user(dataptr, kdata, tocopy
88129 * sizeof(struct __user_cap_data_struct))) {
88130 return -EFAULT;
88131@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
88132 int ret;
88133
88134 rcu_read_lock();
88135- ret = security_capable(__task_cred(t), ns, cap);
88136+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
88137+ gr_task_is_capable(t, __task_cred(t), cap);
88138 rcu_read_unlock();
88139
88140- return (ret == 0);
88141+ return ret;
88142 }
88143
88144 /**
88145@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
88146 int ret;
88147
88148 rcu_read_lock();
88149- ret = security_capable_noaudit(__task_cred(t), ns, cap);
88150+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
88151 rcu_read_unlock();
88152
88153- return (ret == 0);
88154+ return ret;
88155 }
88156
88157 /**
88158@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
88159 BUG();
88160 }
88161
88162- if (security_capable(current_cred(), ns, cap) == 0) {
88163+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
88164 current->flags |= PF_SUPERPRIV;
88165 return true;
88166 }
88167@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
88168 }
88169 EXPORT_SYMBOL(ns_capable);
88170
88171+bool ns_capable_nolog(struct user_namespace *ns, int cap)
88172+{
88173+ if (unlikely(!cap_valid(cap))) {
88174+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
88175+ BUG();
88176+ }
88177+
88178+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
88179+ current->flags |= PF_SUPERPRIV;
88180+ return true;
88181+ }
88182+ return false;
88183+}
88184+EXPORT_SYMBOL(ns_capable_nolog);
88185+
88186 /**
88187 * file_ns_capable - Determine if the file's opener had a capability in effect
88188 * @file: The file we want to check
88189@@ -427,6 +446,12 @@ bool capable(int cap)
88190 }
88191 EXPORT_SYMBOL(capable);
88192
88193+bool capable_nolog(int cap)
88194+{
88195+ return ns_capable_nolog(&init_user_ns, cap);
88196+}
88197+EXPORT_SYMBOL(capable_nolog);
88198+
88199 /**
88200 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
88201 * @inode: The inode in question
88202@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
88203 kgid_has_mapping(ns, inode->i_gid);
88204 }
88205 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
88206+
88207+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
88208+{
88209+ struct user_namespace *ns = current_user_ns();
88210+
88211+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
88212+ kgid_has_mapping(ns, inode->i_gid);
88213+}
88214+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
88215diff --git a/kernel/cgroup.c b/kernel/cgroup.c
88216index 04cfe8a..adadcc0 100644
88217--- a/kernel/cgroup.c
88218+++ b/kernel/cgroup.c
88219@@ -5343,6 +5343,9 @@ static void cgroup_release_agent(struct work_struct *work)
88220 if (!pathbuf || !agentbuf)
88221 goto out;
88222
88223+ if (agentbuf[0] == '\0')
88224+ goto out;
88225+
88226 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
88227 if (!path)
88228 goto out;
88229@@ -5528,7 +5531,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
88230 struct task_struct *task;
88231 int count = 0;
88232
88233- seq_printf(seq, "css_set %p\n", cset);
88234+ seq_printf(seq, "css_set %pK\n", cset);
88235
88236 list_for_each_entry(task, &cset->tasks, cg_list) {
88237 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
88238diff --git a/kernel/compat.c b/kernel/compat.c
88239index ebb3c36..1df606e 100644
88240--- a/kernel/compat.c
88241+++ b/kernel/compat.c
88242@@ -13,6 +13,7 @@
88243
88244 #include <linux/linkage.h>
88245 #include <linux/compat.h>
88246+#include <linux/module.h>
88247 #include <linux/errno.h>
88248 #include <linux/time.h>
88249 #include <linux/signal.h>
88250@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
88251 mm_segment_t oldfs;
88252 long ret;
88253
88254- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
88255+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
88256 oldfs = get_fs();
88257 set_fs(KERNEL_DS);
88258 ret = hrtimer_nanosleep_restart(restart);
88259@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
88260 oldfs = get_fs();
88261 set_fs(KERNEL_DS);
88262 ret = hrtimer_nanosleep(&tu,
88263- rmtp ? (struct timespec __user *)&rmt : NULL,
88264+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
88265 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
88266 set_fs(oldfs);
88267
88268@@ -379,7 +380,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
88269 mm_segment_t old_fs = get_fs();
88270
88271 set_fs(KERNEL_DS);
88272- ret = sys_sigpending((old_sigset_t __user *) &s);
88273+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
88274 set_fs(old_fs);
88275 if (ret == 0)
88276 ret = put_user(s, set);
88277@@ -469,7 +470,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
88278 mm_segment_t old_fs = get_fs();
88279
88280 set_fs(KERNEL_DS);
88281- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
88282+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
88283 set_fs(old_fs);
88284
88285 if (!ret) {
88286@@ -551,8 +552,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
88287 set_fs (KERNEL_DS);
88288 ret = sys_wait4(pid,
88289 (stat_addr ?
88290- (unsigned int __user *) &status : NULL),
88291- options, (struct rusage __user *) &r);
88292+ (unsigned int __force_user *) &status : NULL),
88293+ options, (struct rusage __force_user *) &r);
88294 set_fs (old_fs);
88295
88296 if (ret > 0) {
88297@@ -578,8 +579,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
88298 memset(&info, 0, sizeof(info));
88299
88300 set_fs(KERNEL_DS);
88301- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
88302- uru ? (struct rusage __user *)&ru : NULL);
88303+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
88304+ uru ? (struct rusage __force_user *)&ru : NULL);
88305 set_fs(old_fs);
88306
88307 if ((ret < 0) || (info.si_signo == 0))
88308@@ -713,8 +714,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
88309 oldfs = get_fs();
88310 set_fs(KERNEL_DS);
88311 err = sys_timer_settime(timer_id, flags,
88312- (struct itimerspec __user *) &newts,
88313- (struct itimerspec __user *) &oldts);
88314+ (struct itimerspec __force_user *) &newts,
88315+ (struct itimerspec __force_user *) &oldts);
88316 set_fs(oldfs);
88317 if (!err && old && put_compat_itimerspec(old, &oldts))
88318 return -EFAULT;
88319@@ -731,7 +732,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
88320 oldfs = get_fs();
88321 set_fs(KERNEL_DS);
88322 err = sys_timer_gettime(timer_id,
88323- (struct itimerspec __user *) &ts);
88324+ (struct itimerspec __force_user *) &ts);
88325 set_fs(oldfs);
88326 if (!err && put_compat_itimerspec(setting, &ts))
88327 return -EFAULT;
88328@@ -750,7 +751,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
88329 oldfs = get_fs();
88330 set_fs(KERNEL_DS);
88331 err = sys_clock_settime(which_clock,
88332- (struct timespec __user *) &ts);
88333+ (struct timespec __force_user *) &ts);
88334 set_fs(oldfs);
88335 return err;
88336 }
88337@@ -765,7 +766,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
88338 oldfs = get_fs();
88339 set_fs(KERNEL_DS);
88340 err = sys_clock_gettime(which_clock,
88341- (struct timespec __user *) &ts);
88342+ (struct timespec __force_user *) &ts);
88343 set_fs(oldfs);
88344 if (!err && compat_put_timespec(&ts, tp))
88345 return -EFAULT;
88346@@ -785,7 +786,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
88347
88348 oldfs = get_fs();
88349 set_fs(KERNEL_DS);
88350- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
88351+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
88352 set_fs(oldfs);
88353
88354 err = compat_put_timex(utp, &txc);
88355@@ -805,7 +806,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
88356 oldfs = get_fs();
88357 set_fs(KERNEL_DS);
88358 err = sys_clock_getres(which_clock,
88359- (struct timespec __user *) &ts);
88360+ (struct timespec __force_user *) &ts);
88361 set_fs(oldfs);
88362 if (!err && tp && compat_put_timespec(&ts, tp))
88363 return -EFAULT;
88364@@ -819,7 +820,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
88365 struct timespec tu;
88366 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
88367
88368- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
88369+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
88370 oldfs = get_fs();
88371 set_fs(KERNEL_DS);
88372 err = clock_nanosleep_restart(restart);
88373@@ -851,8 +852,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
88374 oldfs = get_fs();
88375 set_fs(KERNEL_DS);
88376 err = sys_clock_nanosleep(which_clock, flags,
88377- (struct timespec __user *) &in,
88378- (struct timespec __user *) &out);
88379+ (struct timespec __force_user *) &in,
88380+ (struct timespec __force_user *) &out);
88381 set_fs(oldfs);
88382
88383 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
88384@@ -1146,7 +1147,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
88385 mm_segment_t old_fs = get_fs();
88386
88387 set_fs(KERNEL_DS);
88388- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
88389+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
88390 set_fs(old_fs);
88391 if (compat_put_timespec(&t, interval))
88392 return -EFAULT;
88393diff --git a/kernel/configs.c b/kernel/configs.c
88394index c18b1f1..b9a0132 100644
88395--- a/kernel/configs.c
88396+++ b/kernel/configs.c
88397@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
88398 struct proc_dir_entry *entry;
88399
88400 /* create the current config file */
88401+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
88402+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
88403+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
88404+ &ikconfig_file_ops);
88405+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
88406+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
88407+ &ikconfig_file_ops);
88408+#endif
88409+#else
88410 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
88411 &ikconfig_file_ops);
88412+#endif
88413+
88414 if (!entry)
88415 return -ENOMEM;
88416
88417diff --git a/kernel/cred.c b/kernel/cred.c
88418index e0573a4..26c0fd3 100644
88419--- a/kernel/cred.c
88420+++ b/kernel/cred.c
88421@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
88422 validate_creds(cred);
88423 alter_cred_subscribers(cred, -1);
88424 put_cred(cred);
88425+
88426+#ifdef CONFIG_GRKERNSEC_SETXID
88427+ cred = (struct cred *) tsk->delayed_cred;
88428+ if (cred != NULL) {
88429+ tsk->delayed_cred = NULL;
88430+ validate_creds(cred);
88431+ alter_cred_subscribers(cred, -1);
88432+ put_cred(cred);
88433+ }
88434+#endif
88435 }
88436
88437 /**
88438@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
88439 * Always returns 0 thus allowing this function to be tail-called at the end
88440 * of, say, sys_setgid().
88441 */
88442-int commit_creds(struct cred *new)
88443+static int __commit_creds(struct cred *new)
88444 {
88445 struct task_struct *task = current;
88446 const struct cred *old = task->real_cred;
88447@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
88448
88449 get_cred(new); /* we will require a ref for the subj creds too */
88450
88451+ gr_set_role_label(task, new->uid, new->gid);
88452+
88453 /* dumpability changes */
88454 if (!uid_eq(old->euid, new->euid) ||
88455 !gid_eq(old->egid, new->egid) ||
88456@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
88457 put_cred(old);
88458 return 0;
88459 }
88460+#ifdef CONFIG_GRKERNSEC_SETXID
88461+extern int set_user(struct cred *new);
88462+
88463+void gr_delayed_cred_worker(void)
88464+{
88465+ const struct cred *new = current->delayed_cred;
88466+ struct cred *ncred;
88467+
88468+ current->delayed_cred = NULL;
88469+
88470+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
88471+ // from doing get_cred on it when queueing this
88472+ put_cred(new);
88473+ return;
88474+ } else if (new == NULL)
88475+ return;
88476+
88477+ ncred = prepare_creds();
88478+ if (!ncred)
88479+ goto die;
88480+ // uids
88481+ ncred->uid = new->uid;
88482+ ncred->euid = new->euid;
88483+ ncred->suid = new->suid;
88484+ ncred->fsuid = new->fsuid;
88485+ // gids
88486+ ncred->gid = new->gid;
88487+ ncred->egid = new->egid;
88488+ ncred->sgid = new->sgid;
88489+ ncred->fsgid = new->fsgid;
88490+ // groups
88491+ set_groups(ncred, new->group_info);
88492+ // caps
88493+ ncred->securebits = new->securebits;
88494+ ncred->cap_inheritable = new->cap_inheritable;
88495+ ncred->cap_permitted = new->cap_permitted;
88496+ ncred->cap_effective = new->cap_effective;
88497+ ncred->cap_bset = new->cap_bset;
88498+
88499+ if (set_user(ncred)) {
88500+ abort_creds(ncred);
88501+ goto die;
88502+ }
88503+
88504+ // from doing get_cred on it when queueing this
88505+ put_cred(new);
88506+
88507+ __commit_creds(ncred);
88508+ return;
88509+die:
88510+ // from doing get_cred on it when queueing this
88511+ put_cred(new);
88512+ do_group_exit(SIGKILL);
88513+}
88514+#endif
88515+
88516+int commit_creds(struct cred *new)
88517+{
88518+#ifdef CONFIG_GRKERNSEC_SETXID
88519+ int ret;
88520+ int schedule_it = 0;
88521+ struct task_struct *t;
88522+ unsigned oldsecurebits = current_cred()->securebits;
88523+
88524+ /* we won't get called with tasklist_lock held for writing
88525+ and interrupts disabled as the cred struct in that case is
88526+ init_cred
88527+ */
88528+ if (grsec_enable_setxid && !current_is_single_threaded() &&
88529+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
88530+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
88531+ schedule_it = 1;
88532+ }
88533+ ret = __commit_creds(new);
88534+ if (schedule_it) {
88535+ rcu_read_lock();
88536+ read_lock(&tasklist_lock);
88537+ for (t = next_thread(current); t != current;
88538+ t = next_thread(t)) {
88539+ /* we'll check if the thread has uid 0 in
88540+ * the delayed worker routine
88541+ */
88542+ if (task_securebits(t) == oldsecurebits &&
88543+ t->delayed_cred == NULL) {
88544+ t->delayed_cred = get_cred(new);
88545+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
88546+ set_tsk_need_resched(t);
88547+ }
88548+ }
88549+ read_unlock(&tasklist_lock);
88550+ rcu_read_unlock();
88551+ }
88552+
88553+ return ret;
88554+#else
88555+ return __commit_creds(new);
88556+#endif
88557+}
88558+
88559 EXPORT_SYMBOL(commit_creds);
88560
88561 /**
88562diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
88563index ac5c0f9..4b1c6c2 100644
88564--- a/kernel/debug/debug_core.c
88565+++ b/kernel/debug/debug_core.c
88566@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
88567 */
88568 static atomic_t masters_in_kgdb;
88569 static atomic_t slaves_in_kgdb;
88570-static atomic_t kgdb_break_tasklet_var;
88571+static atomic_unchecked_t kgdb_break_tasklet_var;
88572 atomic_t kgdb_setting_breakpoint;
88573
88574 struct task_struct *kgdb_usethread;
88575@@ -137,7 +137,7 @@ int kgdb_single_step;
88576 static pid_t kgdb_sstep_pid;
88577
88578 /* to keep track of the CPU which is doing the single stepping*/
88579-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88580+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88581
88582 /*
88583 * If you are debugging a problem where roundup (the collection of
88584@@ -552,7 +552,7 @@ return_normal:
88585 * kernel will only try for the value of sstep_tries before
88586 * giving up and continuing on.
88587 */
88588- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
88589+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
88590 (kgdb_info[cpu].task &&
88591 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
88592 atomic_set(&kgdb_active, -1);
88593@@ -654,8 +654,8 @@ cpu_master_loop:
88594 }
88595
88596 kgdb_restore:
88597- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
88598- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
88599+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
88600+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
88601 if (kgdb_info[sstep_cpu].task)
88602 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
88603 else
88604@@ -932,18 +932,18 @@ static void kgdb_unregister_callbacks(void)
88605 static void kgdb_tasklet_bpt(unsigned long ing)
88606 {
88607 kgdb_breakpoint();
88608- atomic_set(&kgdb_break_tasklet_var, 0);
88609+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
88610 }
88611
88612 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
88613
88614 void kgdb_schedule_breakpoint(void)
88615 {
88616- if (atomic_read(&kgdb_break_tasklet_var) ||
88617+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
88618 atomic_read(&kgdb_active) != -1 ||
88619 atomic_read(&kgdb_setting_breakpoint))
88620 return;
88621- atomic_inc(&kgdb_break_tasklet_var);
88622+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
88623 tasklet_schedule(&kgdb_tasklet_breakpoint);
88624 }
88625 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
88626diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
88627index 60f6bb8..104bb07 100644
88628--- a/kernel/debug/kdb/kdb_main.c
88629+++ b/kernel/debug/kdb/kdb_main.c
88630@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
88631 continue;
88632
88633 kdb_printf("%-20s%8u 0x%p ", mod->name,
88634- mod->core_size, (void *)mod);
88635+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
88636 #ifdef CONFIG_MODULE_UNLOAD
88637 kdb_printf("%4d ", module_refcount(mod));
88638 #endif
88639@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
88640 kdb_printf(" (Loading)");
88641 else
88642 kdb_printf(" (Live)");
88643- kdb_printf(" 0x%p", mod->module_core);
88644+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
88645
88646 #ifdef CONFIG_MODULE_UNLOAD
88647 {
88648diff --git a/kernel/events/core.c b/kernel/events/core.c
88649index 19efcf133..7c05c93 100644
88650--- a/kernel/events/core.c
88651+++ b/kernel/events/core.c
88652@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
88653 * 0 - disallow raw tracepoint access for unpriv
88654 * 1 - disallow cpu events for unpriv
88655 * 2 - disallow kernel profiling for unpriv
88656+ * 3 - disallow all unpriv perf event use
88657 */
88658-int sysctl_perf_event_paranoid __read_mostly = 1;
88659+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88660+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
88661+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
88662+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
88663+#else
88664+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
88665+#endif
88666
88667 /* Minimum for 512 kiB + 1 user control page */
88668 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
88669@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
88670
88671 tmp *= sysctl_perf_cpu_time_max_percent;
88672 do_div(tmp, 100);
88673- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
88674+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
88675 }
88676
88677 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
88678@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
88679 }
88680 }
88681
88682-static atomic64_t perf_event_id;
88683+static atomic64_unchecked_t perf_event_id;
88684
88685 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
88686 enum event_type_t event_type);
88687@@ -3102,7 +3109,7 @@ static void __perf_event_read(void *info)
88688
88689 static inline u64 perf_event_count(struct perf_event *event)
88690 {
88691- return local64_read(&event->count) + atomic64_read(&event->child_count);
88692+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
88693 }
88694
88695 static u64 perf_event_read(struct perf_event *event)
88696@@ -3528,9 +3535,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
88697 mutex_lock(&event->child_mutex);
88698 total += perf_event_read(event);
88699 *enabled += event->total_time_enabled +
88700- atomic64_read(&event->child_total_time_enabled);
88701+ atomic64_read_unchecked(&event->child_total_time_enabled);
88702 *running += event->total_time_running +
88703- atomic64_read(&event->child_total_time_running);
88704+ atomic64_read_unchecked(&event->child_total_time_running);
88705
88706 list_for_each_entry(child, &event->child_list, child_list) {
88707 total += perf_event_read(child);
88708@@ -3994,10 +4001,10 @@ void perf_event_update_userpage(struct perf_event *event)
88709 userpg->offset -= local64_read(&event->hw.prev_count);
88710
88711 userpg->time_enabled = enabled +
88712- atomic64_read(&event->child_total_time_enabled);
88713+ atomic64_read_unchecked(&event->child_total_time_enabled);
88714
88715 userpg->time_running = running +
88716- atomic64_read(&event->child_total_time_running);
88717+ atomic64_read_unchecked(&event->child_total_time_running);
88718
88719 arch_perf_update_userpage(userpg, now);
88720
88721@@ -4568,7 +4575,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
88722
88723 /* Data. */
88724 sp = perf_user_stack_pointer(regs);
88725- rem = __output_copy_user(handle, (void *) sp, dump_size);
88726+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
88727 dyn_size = dump_size - rem;
88728
88729 perf_output_skip(handle, rem);
88730@@ -4659,11 +4666,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
88731 values[n++] = perf_event_count(event);
88732 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
88733 values[n++] = enabled +
88734- atomic64_read(&event->child_total_time_enabled);
88735+ atomic64_read_unchecked(&event->child_total_time_enabled);
88736 }
88737 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
88738 values[n++] = running +
88739- atomic64_read(&event->child_total_time_running);
88740+ atomic64_read_unchecked(&event->child_total_time_running);
88741 }
88742 if (read_format & PERF_FORMAT_ID)
88743 values[n++] = primary_event_id(event);
88744@@ -6994,7 +7001,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
88745 event->parent = parent_event;
88746
88747 event->ns = get_pid_ns(task_active_pid_ns(current));
88748- event->id = atomic64_inc_return(&perf_event_id);
88749+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
88750
88751 event->state = PERF_EVENT_STATE_INACTIVE;
88752
88753@@ -7275,6 +7282,11 @@ SYSCALL_DEFINE5(perf_event_open,
88754 if (flags & ~PERF_FLAG_ALL)
88755 return -EINVAL;
88756
88757+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88758+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
88759+ return -EACCES;
88760+#endif
88761+
88762 err = perf_copy_attr(attr_uptr, &attr);
88763 if (err)
88764 return err;
88765@@ -7642,10 +7654,10 @@ static void sync_child_event(struct perf_event *child_event,
88766 /*
88767 * Add back the child's count to the parent's count:
88768 */
88769- atomic64_add(child_val, &parent_event->child_count);
88770- atomic64_add(child_event->total_time_enabled,
88771+ atomic64_add_unchecked(child_val, &parent_event->child_count);
88772+ atomic64_add_unchecked(child_event->total_time_enabled,
88773 &parent_event->child_total_time_enabled);
88774- atomic64_add(child_event->total_time_running,
88775+ atomic64_add_unchecked(child_event->total_time_running,
88776 &parent_event->child_total_time_running);
88777
88778 /*
88779diff --git a/kernel/events/internal.h b/kernel/events/internal.h
88780index 569b2187..19940d9 100644
88781--- a/kernel/events/internal.h
88782+++ b/kernel/events/internal.h
88783@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
88784 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
88785 }
88786
88787-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
88788+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
88789 static inline unsigned long \
88790 func_name(struct perf_output_handle *handle, \
88791- const void *buf, unsigned long len) \
88792+ const void user *buf, unsigned long len) \
88793 { \
88794 unsigned long size, written; \
88795 \
88796@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
88797 return 0;
88798 }
88799
88800-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
88801+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
88802
88803 static inline unsigned long
88804 memcpy_skip(void *dst, const void *src, unsigned long n)
88805@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
88806 return 0;
88807 }
88808
88809-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
88810+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
88811
88812 #ifndef arch_perf_out_copy_user
88813 #define arch_perf_out_copy_user arch_perf_out_copy_user
88814@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
88815 }
88816 #endif
88817
88818-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
88819+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
88820
88821 /* Callchain handling */
88822 extern struct perf_callchain_entry *
88823diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
88824index cb346f2..e4dc317 100644
88825--- a/kernel/events/uprobes.c
88826+++ b/kernel/events/uprobes.c
88827@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
88828 {
88829 struct page *page;
88830 uprobe_opcode_t opcode;
88831- int result;
88832+ long result;
88833
88834 pagefault_disable();
88835 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
88836diff --git a/kernel/exit.c b/kernel/exit.c
88837index 6806c55..a5fb128 100644
88838--- a/kernel/exit.c
88839+++ b/kernel/exit.c
88840@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
88841 struct task_struct *leader;
88842 int zap_leader;
88843 repeat:
88844+#ifdef CONFIG_NET
88845+ gr_del_task_from_ip_table(p);
88846+#endif
88847+
88848 /* don't need to get the RCU readlock here - the process is dead and
88849 * can't be modifying its own credentials. But shut RCU-lockdep up */
88850 rcu_read_lock();
88851@@ -655,6 +659,8 @@ void do_exit(long code)
88852 int group_dead;
88853 TASKS_RCU(int tasks_rcu_i);
88854
88855+ set_fs(USER_DS);
88856+
88857 profile_task_exit(tsk);
88858
88859 WARN_ON(blk_needs_flush_plug(tsk));
88860@@ -671,7 +677,6 @@ void do_exit(long code)
88861 * mm_release()->clear_child_tid() from writing to a user-controlled
88862 * kernel address.
88863 */
88864- set_fs(USER_DS);
88865
88866 ptrace_event(PTRACE_EVENT_EXIT, code);
88867
88868@@ -729,6 +734,9 @@ void do_exit(long code)
88869 tsk->exit_code = code;
88870 taskstats_exit(tsk, group_dead);
88871
88872+ gr_acl_handle_psacct(tsk, code);
88873+ gr_acl_handle_exit();
88874+
88875 exit_mm(tsk);
88876
88877 if (group_dead)
88878@@ -848,7 +856,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
88879 * Take down every thread in the group. This is called by fatal signals
88880 * as well as by sys_exit_group (below).
88881 */
88882-void
88883+__noreturn void
88884 do_group_exit(int exit_code)
88885 {
88886 struct signal_struct *sig = current->signal;
88887diff --git a/kernel/fork.c b/kernel/fork.c
88888index 4dc2dda..651add0 100644
88889--- a/kernel/fork.c
88890+++ b/kernel/fork.c
88891@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
88892 void thread_info_cache_init(void)
88893 {
88894 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
88895- THREAD_SIZE, 0, NULL);
88896+ THREAD_SIZE, SLAB_USERCOPY, NULL);
88897 BUG_ON(thread_info_cache == NULL);
88898 }
88899 # endif
88900 #endif
88901
88902+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88903+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
88904+ int node, void **lowmem_stack)
88905+{
88906+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
88907+ void *ret = NULL;
88908+ unsigned int i;
88909+
88910+ *lowmem_stack = alloc_thread_info_node(tsk, node);
88911+ if (*lowmem_stack == NULL)
88912+ goto out;
88913+
88914+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
88915+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
88916+
88917+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
88918+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
88919+ if (ret == NULL) {
88920+ free_thread_info(*lowmem_stack);
88921+ *lowmem_stack = NULL;
88922+ }
88923+
88924+out:
88925+ return ret;
88926+}
88927+
88928+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
88929+{
88930+ unmap_process_stacks(tsk);
88931+}
88932+#else
88933+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
88934+ int node, void **lowmem_stack)
88935+{
88936+ return alloc_thread_info_node(tsk, node);
88937+}
88938+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
88939+{
88940+ free_thread_info(ti);
88941+}
88942+#endif
88943+
88944 /* SLAB cache for signal_struct structures (tsk->signal) */
88945 static struct kmem_cache *signal_cachep;
88946
88947@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
88948 /* SLAB cache for mm_struct structures (tsk->mm) */
88949 static struct kmem_cache *mm_cachep;
88950
88951-static void account_kernel_stack(struct thread_info *ti, int account)
88952+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
88953 {
88954+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88955+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
88956+#else
88957 struct zone *zone = page_zone(virt_to_page(ti));
88958+#endif
88959
88960 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
88961 }
88962
88963 void free_task(struct task_struct *tsk)
88964 {
88965- account_kernel_stack(tsk->stack, -1);
88966+ account_kernel_stack(tsk, tsk->stack, -1);
88967 arch_release_thread_info(tsk->stack);
88968- free_thread_info(tsk->stack);
88969+ gr_free_thread_info(tsk, tsk->stack);
88970 rt_mutex_debug_task_free(tsk);
88971 ftrace_graph_exit_task(tsk);
88972 put_seccomp_filter(tsk);
88973@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88974 {
88975 struct task_struct *tsk;
88976 struct thread_info *ti;
88977+ void *lowmem_stack;
88978 int node = tsk_fork_get_node(orig);
88979 int err;
88980
88981@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88982 if (!tsk)
88983 return NULL;
88984
88985- ti = alloc_thread_info_node(tsk, node);
88986+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
88987 if (!ti)
88988 goto free_tsk;
88989
88990@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
88991 goto free_ti;
88992
88993 tsk->stack = ti;
88994+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
88995+ tsk->lowmem_stack = lowmem_stack;
88996+#endif
88997 #ifdef CONFIG_SECCOMP
88998 /*
88999 * We must handle setting up seccomp filters once we're under
89000@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89001 set_task_stack_end_magic(tsk);
89002
89003 #ifdef CONFIG_CC_STACKPROTECTOR
89004- tsk->stack_canary = get_random_int();
89005+ tsk->stack_canary = pax_get_random_long();
89006 #endif
89007
89008 /*
89009@@ -352,24 +402,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89010 tsk->splice_pipe = NULL;
89011 tsk->task_frag.page = NULL;
89012
89013- account_kernel_stack(ti, 1);
89014+ account_kernel_stack(tsk, ti, 1);
89015
89016 return tsk;
89017
89018 free_ti:
89019- free_thread_info(ti);
89020+ gr_free_thread_info(tsk, ti);
89021 free_tsk:
89022 free_task_struct(tsk);
89023 return NULL;
89024 }
89025
89026 #ifdef CONFIG_MMU
89027-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89028+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
89029+{
89030+ struct vm_area_struct *tmp;
89031+ unsigned long charge;
89032+ struct file *file;
89033+ int retval;
89034+
89035+ charge = 0;
89036+ if (mpnt->vm_flags & VM_ACCOUNT) {
89037+ unsigned long len = vma_pages(mpnt);
89038+
89039+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89040+ goto fail_nomem;
89041+ charge = len;
89042+ }
89043+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89044+ if (!tmp)
89045+ goto fail_nomem;
89046+ *tmp = *mpnt;
89047+ tmp->vm_mm = mm;
89048+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
89049+ retval = vma_dup_policy(mpnt, tmp);
89050+ if (retval)
89051+ goto fail_nomem_policy;
89052+ if (anon_vma_fork(tmp, mpnt))
89053+ goto fail_nomem_anon_vma_fork;
89054+ tmp->vm_flags &= ~VM_LOCKED;
89055+ tmp->vm_next = tmp->vm_prev = NULL;
89056+ tmp->vm_mirror = NULL;
89057+ file = tmp->vm_file;
89058+ if (file) {
89059+ struct inode *inode = file_inode(file);
89060+ struct address_space *mapping = file->f_mapping;
89061+
89062+ get_file(file);
89063+ if (tmp->vm_flags & VM_DENYWRITE)
89064+ atomic_dec(&inode->i_writecount);
89065+ i_mmap_lock_write(mapping);
89066+ if (tmp->vm_flags & VM_SHARED)
89067+ atomic_inc(&mapping->i_mmap_writable);
89068+ flush_dcache_mmap_lock(mapping);
89069+ /* insert tmp into the share list, just after mpnt */
89070+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89071+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
89072+ else
89073+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
89074+ flush_dcache_mmap_unlock(mapping);
89075+ i_mmap_unlock_write(mapping);
89076+ }
89077+
89078+ /*
89079+ * Clear hugetlb-related page reserves for children. This only
89080+ * affects MAP_PRIVATE mappings. Faults generated by the child
89081+ * are not guaranteed to succeed, even if read-only
89082+ */
89083+ if (is_vm_hugetlb_page(tmp))
89084+ reset_vma_resv_huge_pages(tmp);
89085+
89086+ return tmp;
89087+
89088+fail_nomem_anon_vma_fork:
89089+ mpol_put(vma_policy(tmp));
89090+fail_nomem_policy:
89091+ kmem_cache_free(vm_area_cachep, tmp);
89092+fail_nomem:
89093+ vm_unacct_memory(charge);
89094+ return NULL;
89095+}
89096+
89097+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89098 {
89099 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
89100 struct rb_node **rb_link, *rb_parent;
89101 int retval;
89102- unsigned long charge;
89103
89104 uprobe_start_dup_mmap();
89105 down_write(&oldmm->mmap_sem);
89106@@ -397,55 +515,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89107
89108 prev = NULL;
89109 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
89110- struct file *file;
89111-
89112 if (mpnt->vm_flags & VM_DONTCOPY) {
89113 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
89114 -vma_pages(mpnt));
89115 continue;
89116 }
89117- charge = 0;
89118- if (mpnt->vm_flags & VM_ACCOUNT) {
89119- unsigned long len = vma_pages(mpnt);
89120-
89121- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89122- goto fail_nomem;
89123- charge = len;
89124- }
89125- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89126- if (!tmp)
89127- goto fail_nomem;
89128- *tmp = *mpnt;
89129- INIT_LIST_HEAD(&tmp->anon_vma_chain);
89130- retval = vma_dup_policy(mpnt, tmp);
89131- if (retval)
89132- goto fail_nomem_policy;
89133- tmp->vm_mm = mm;
89134- if (anon_vma_fork(tmp, mpnt))
89135- goto fail_nomem_anon_vma_fork;
89136- tmp->vm_flags &= ~VM_LOCKED;
89137- tmp->vm_next = tmp->vm_prev = NULL;
89138- file = tmp->vm_file;
89139- if (file) {
89140- struct inode *inode = file_inode(file);
89141- struct address_space *mapping = file->f_mapping;
89142-
89143- get_file(file);
89144- if (tmp->vm_flags & VM_DENYWRITE)
89145- atomic_dec(&inode->i_writecount);
89146- i_mmap_lock_write(mapping);
89147- if (tmp->vm_flags & VM_SHARED)
89148- atomic_inc(&mapping->i_mmap_writable);
89149- flush_dcache_mmap_lock(mapping);
89150- /* insert tmp into the share list, just after mpnt */
89151- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89152- vma_nonlinear_insert(tmp,
89153- &mapping->i_mmap_nonlinear);
89154- else
89155- vma_interval_tree_insert_after(tmp, mpnt,
89156- &mapping->i_mmap);
89157- flush_dcache_mmap_unlock(mapping);
89158- i_mmap_unlock_write(mapping);
89159+ tmp = dup_vma(mm, oldmm, mpnt);
89160+ if (!tmp) {
89161+ retval = -ENOMEM;
89162+ goto out;
89163 }
89164
89165 /*
89166@@ -477,6 +555,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89167 if (retval)
89168 goto out;
89169 }
89170+
89171+#ifdef CONFIG_PAX_SEGMEXEC
89172+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
89173+ struct vm_area_struct *mpnt_m;
89174+
89175+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
89176+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
89177+
89178+ if (!mpnt->vm_mirror)
89179+ continue;
89180+
89181+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
89182+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
89183+ mpnt->vm_mirror = mpnt_m;
89184+ } else {
89185+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
89186+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
89187+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
89188+ mpnt->vm_mirror->vm_mirror = mpnt;
89189+ }
89190+ }
89191+ BUG_ON(mpnt_m);
89192+ }
89193+#endif
89194+
89195 /* a new mm has just been created */
89196 arch_dup_mmap(oldmm, mm);
89197 retval = 0;
89198@@ -486,14 +589,6 @@ out:
89199 up_write(&oldmm->mmap_sem);
89200 uprobe_end_dup_mmap();
89201 return retval;
89202-fail_nomem_anon_vma_fork:
89203- mpol_put(vma_policy(tmp));
89204-fail_nomem_policy:
89205- kmem_cache_free(vm_area_cachep, tmp);
89206-fail_nomem:
89207- retval = -ENOMEM;
89208- vm_unacct_memory(charge);
89209- goto out;
89210 }
89211
89212 static inline int mm_alloc_pgd(struct mm_struct *mm)
89213@@ -734,8 +829,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
89214 return ERR_PTR(err);
89215
89216 mm = get_task_mm(task);
89217- if (mm && mm != current->mm &&
89218- !ptrace_may_access(task, mode)) {
89219+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
89220+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
89221 mmput(mm);
89222 mm = ERR_PTR(-EACCES);
89223 }
89224@@ -938,13 +1033,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
89225 spin_unlock(&fs->lock);
89226 return -EAGAIN;
89227 }
89228- fs->users++;
89229+ atomic_inc(&fs->users);
89230 spin_unlock(&fs->lock);
89231 return 0;
89232 }
89233 tsk->fs = copy_fs_struct(fs);
89234 if (!tsk->fs)
89235 return -ENOMEM;
89236+ /* Carry through gr_chroot_dentry and is_chrooted instead
89237+ of recomputing it here. Already copied when the task struct
89238+ is duplicated. This allows pivot_root to not be treated as
89239+ a chroot
89240+ */
89241+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
89242+
89243 return 0;
89244 }
89245
89246@@ -1182,7 +1284,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
89247 * parts of the process environment (as per the clone
89248 * flags). The actual kick-off is left to the caller.
89249 */
89250-static struct task_struct *copy_process(unsigned long clone_flags,
89251+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
89252 unsigned long stack_start,
89253 unsigned long stack_size,
89254 int __user *child_tidptr,
89255@@ -1253,6 +1355,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89256 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
89257 #endif
89258 retval = -EAGAIN;
89259+
89260+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
89261+
89262 if (atomic_read(&p->real_cred->user->processes) >=
89263 task_rlimit(p, RLIMIT_NPROC)) {
89264 if (p->real_cred->user != INIT_USER &&
89265@@ -1502,6 +1607,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89266 goto bad_fork_free_pid;
89267 }
89268
89269+ /* synchronizes with gr_set_acls()
89270+ we need to call this past the point of no return for fork()
89271+ */
89272+ gr_copy_label(p);
89273+
89274 if (likely(p->pid)) {
89275 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
89276
89277@@ -1592,6 +1702,8 @@ bad_fork_cleanup_count:
89278 bad_fork_free:
89279 free_task(p);
89280 fork_out:
89281+ gr_log_forkfail(retval);
89282+
89283 return ERR_PTR(retval);
89284 }
89285
89286@@ -1653,6 +1765,7 @@ long do_fork(unsigned long clone_flags,
89287
89288 p = copy_process(clone_flags, stack_start, stack_size,
89289 child_tidptr, NULL, trace);
89290+ add_latent_entropy();
89291 /*
89292 * Do this prior waking up the new thread - the thread pointer
89293 * might get invalid after that point, if the thread exits quickly.
89294@@ -1669,6 +1782,8 @@ long do_fork(unsigned long clone_flags,
89295 if (clone_flags & CLONE_PARENT_SETTID)
89296 put_user(nr, parent_tidptr);
89297
89298+ gr_handle_brute_check();
89299+
89300 if (clone_flags & CLONE_VFORK) {
89301 p->vfork_done = &vfork;
89302 init_completion(&vfork);
89303@@ -1787,7 +1902,7 @@ void __init proc_caches_init(void)
89304 mm_cachep = kmem_cache_create("mm_struct",
89305 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
89306 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
89307- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
89308+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
89309 mmap_init();
89310 nsproxy_cache_init();
89311 }
89312@@ -1827,7 +1942,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
89313 return 0;
89314
89315 /* don't need lock here; in the worst case we'll do useless copy */
89316- if (fs->users == 1)
89317+ if (atomic_read(&fs->users) == 1)
89318 return 0;
89319
89320 *new_fsp = copy_fs_struct(fs);
89321@@ -1939,7 +2054,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
89322 fs = current->fs;
89323 spin_lock(&fs->lock);
89324 current->fs = new_fs;
89325- if (--fs->users)
89326+ gr_set_chroot_entries(current, &current->fs->root);
89327+ if (atomic_dec_return(&fs->users))
89328 new_fs = NULL;
89329 else
89330 new_fs = fs;
89331diff --git a/kernel/futex.c b/kernel/futex.c
89332index 63678b5..512f9af 100644
89333--- a/kernel/futex.c
89334+++ b/kernel/futex.c
89335@@ -201,7 +201,7 @@ struct futex_pi_state {
89336 atomic_t refcount;
89337
89338 union futex_key key;
89339-};
89340+} __randomize_layout;
89341
89342 /**
89343 * struct futex_q - The hashed futex queue entry, one per waiting task
89344@@ -235,7 +235,7 @@ struct futex_q {
89345 struct rt_mutex_waiter *rt_waiter;
89346 union futex_key *requeue_pi_key;
89347 u32 bitset;
89348-};
89349+} __randomize_layout;
89350
89351 static const struct futex_q futex_q_init = {
89352 /* list gets initialized in queue_me()*/
89353@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
89354 struct page *page, *page_head;
89355 int err, ro = 0;
89356
89357+#ifdef CONFIG_PAX_SEGMEXEC
89358+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
89359+ return -EFAULT;
89360+#endif
89361+
89362 /*
89363 * The futex address must be "naturally" aligned.
89364 */
89365@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
89366
89367 static int get_futex_value_locked(u32 *dest, u32 __user *from)
89368 {
89369- int ret;
89370+ unsigned long ret;
89371
89372 pagefault_disable();
89373 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
89374@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
89375 {
89376 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
89377 u32 curval;
89378+ mm_segment_t oldfs;
89379
89380 /*
89381 * This will fail and we want it. Some arch implementations do
89382@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
89383 * implementation, the non-functional ones will return
89384 * -ENOSYS.
89385 */
89386+ oldfs = get_fs();
89387+ set_fs(USER_DS);
89388 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
89389 futex_cmpxchg_enabled = 1;
89390+ set_fs(oldfs);
89391 #endif
89392 }
89393
89394diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
89395index 55c8c93..9ba7ad6 100644
89396--- a/kernel/futex_compat.c
89397+++ b/kernel/futex_compat.c
89398@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
89399 return 0;
89400 }
89401
89402-static void __user *futex_uaddr(struct robust_list __user *entry,
89403+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
89404 compat_long_t futex_offset)
89405 {
89406 compat_uptr_t base = ptr_to_compat(entry);
89407diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
89408index b358a80..fc25240 100644
89409--- a/kernel/gcov/base.c
89410+++ b/kernel/gcov/base.c
89411@@ -114,11 +114,6 @@ void gcov_enable_events(void)
89412 }
89413
89414 #ifdef CONFIG_MODULES
89415-static inline int within(void *addr, void *start, unsigned long size)
89416-{
89417- return ((addr >= start) && (addr < start + size));
89418-}
89419-
89420 /* Update list and generate events when modules are unloaded. */
89421 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89422 void *data)
89423@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89424
89425 /* Remove entries located in module from linked list. */
89426 while ((info = gcov_info_next(info))) {
89427- if (within(info, mod->module_core, mod->core_size)) {
89428+ if (within_module_core_rw((unsigned long)info, mod)) {
89429 gcov_info_unlink(prev, info);
89430 if (gcov_events_enabled)
89431 gcov_event(GCOV_REMOVE, info);
89432diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
89433index 8069237..fe712d0 100644
89434--- a/kernel/irq/manage.c
89435+++ b/kernel/irq/manage.c
89436@@ -871,7 +871,7 @@ static int irq_thread(void *data)
89437
89438 action_ret = handler_fn(desc, action);
89439 if (action_ret == IRQ_HANDLED)
89440- atomic_inc(&desc->threads_handled);
89441+ atomic_inc_unchecked(&desc->threads_handled);
89442
89443 wake_threads_waitq(desc);
89444 }
89445diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
89446index e2514b0..de3dfe0 100644
89447--- a/kernel/irq/spurious.c
89448+++ b/kernel/irq/spurious.c
89449@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
89450 * count. We just care about the count being
89451 * different than the one we saw before.
89452 */
89453- handled = atomic_read(&desc->threads_handled);
89454+ handled = atomic_read_unchecked(&desc->threads_handled);
89455 handled |= SPURIOUS_DEFERRED;
89456 if (handled != desc->threads_handled_last) {
89457 action_ret = IRQ_HANDLED;
89458diff --git a/kernel/jump_label.c b/kernel/jump_label.c
89459index 9019f15..9a3c42e 100644
89460--- a/kernel/jump_label.c
89461+++ b/kernel/jump_label.c
89462@@ -14,6 +14,7 @@
89463 #include <linux/err.h>
89464 #include <linux/static_key.h>
89465 #include <linux/jump_label_ratelimit.h>
89466+#include <linux/mm.h>
89467
89468 #ifdef HAVE_JUMP_LABEL
89469
89470@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
89471
89472 size = (((unsigned long)stop - (unsigned long)start)
89473 / sizeof(struct jump_entry));
89474+ pax_open_kernel();
89475 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
89476+ pax_close_kernel();
89477 }
89478
89479 static void jump_label_update(struct static_key *key, int enable);
89480@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
89481 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
89482 struct jump_entry *iter;
89483
89484+ pax_open_kernel();
89485 for (iter = iter_start; iter < iter_stop; iter++) {
89486 if (within_module_init(iter->code, mod))
89487 iter->code = 0;
89488 }
89489+ pax_close_kernel();
89490 }
89491
89492 static int
89493diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
89494index 5c5987f..bc502b0 100644
89495--- a/kernel/kallsyms.c
89496+++ b/kernel/kallsyms.c
89497@@ -11,6 +11,9 @@
89498 * Changed the compression method from stem compression to "table lookup"
89499 * compression (see scripts/kallsyms.c for a more complete description)
89500 */
89501+#ifdef CONFIG_GRKERNSEC_HIDESYM
89502+#define __INCLUDED_BY_HIDESYM 1
89503+#endif
89504 #include <linux/kallsyms.h>
89505 #include <linux/module.h>
89506 #include <linux/init.h>
89507@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
89508
89509 static inline int is_kernel_inittext(unsigned long addr)
89510 {
89511+ if (system_state != SYSTEM_BOOTING)
89512+ return 0;
89513+
89514 if (addr >= (unsigned long)_sinittext
89515 && addr <= (unsigned long)_einittext)
89516 return 1;
89517 return 0;
89518 }
89519
89520+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89521+#ifdef CONFIG_MODULES
89522+static inline int is_module_text(unsigned long addr)
89523+{
89524+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
89525+ return 1;
89526+
89527+ addr = ktla_ktva(addr);
89528+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
89529+}
89530+#else
89531+static inline int is_module_text(unsigned long addr)
89532+{
89533+ return 0;
89534+}
89535+#endif
89536+#endif
89537+
89538 static inline int is_kernel_text(unsigned long addr)
89539 {
89540 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
89541@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
89542
89543 static inline int is_kernel(unsigned long addr)
89544 {
89545+
89546+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89547+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
89548+ return 1;
89549+
89550+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
89551+#else
89552 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
89553+#endif
89554+
89555 return 1;
89556 return in_gate_area_no_mm(addr);
89557 }
89558
89559 static int is_ksym_addr(unsigned long addr)
89560 {
89561+
89562+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89563+ if (is_module_text(addr))
89564+ return 0;
89565+#endif
89566+
89567 if (all_var)
89568 return is_kernel(addr);
89569
89570@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
89571
89572 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
89573 {
89574- iter->name[0] = '\0';
89575 iter->nameoff = get_symbol_offset(new_pos);
89576 iter->pos = new_pos;
89577 }
89578@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
89579 {
89580 struct kallsym_iter *iter = m->private;
89581
89582+#ifdef CONFIG_GRKERNSEC_HIDESYM
89583+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
89584+ return 0;
89585+#endif
89586+
89587 /* Some debugging symbols have no name. Ignore them. */
89588 if (!iter->name[0])
89589 return 0;
89590@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
89591 */
89592 type = iter->exported ? toupper(iter->type) :
89593 tolower(iter->type);
89594+
89595 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
89596 type, iter->name, iter->module_name);
89597 } else
89598diff --git a/kernel/kcmp.c b/kernel/kcmp.c
89599index 0aa69ea..a7fcafb 100644
89600--- a/kernel/kcmp.c
89601+++ b/kernel/kcmp.c
89602@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
89603 struct task_struct *task1, *task2;
89604 int ret;
89605
89606+#ifdef CONFIG_GRKERNSEC
89607+ return -ENOSYS;
89608+#endif
89609+
89610 rcu_read_lock();
89611
89612 /*
89613diff --git a/kernel/kexec.c b/kernel/kexec.c
89614index 9a8a01a..3c35dd6 100644
89615--- a/kernel/kexec.c
89616+++ b/kernel/kexec.c
89617@@ -1349,7 +1349,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
89618 compat_ulong_t, flags)
89619 {
89620 struct compat_kexec_segment in;
89621- struct kexec_segment out, __user *ksegments;
89622+ struct kexec_segment out;
89623+ struct kexec_segment __user *ksegments;
89624 unsigned long i, result;
89625
89626 /* Don't allow clients that don't understand the native
89627diff --git a/kernel/kmod.c b/kernel/kmod.c
89628index 2777f40..a26e825 100644
89629--- a/kernel/kmod.c
89630+++ b/kernel/kmod.c
89631@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
89632 kfree(info->argv);
89633 }
89634
89635-static int call_modprobe(char *module_name, int wait)
89636+static int call_modprobe(char *module_name, char *module_param, int wait)
89637 {
89638 struct subprocess_info *info;
89639 static char *envp[] = {
89640@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
89641 NULL
89642 };
89643
89644- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
89645+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
89646 if (!argv)
89647 goto out;
89648
89649@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
89650 argv[1] = "-q";
89651 argv[2] = "--";
89652 argv[3] = module_name; /* check free_modprobe_argv() */
89653- argv[4] = NULL;
89654+ argv[4] = module_param;
89655+ argv[5] = NULL;
89656
89657 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
89658 NULL, free_modprobe_argv, NULL);
89659@@ -122,9 +123,8 @@ out:
89660 * If module auto-loading support is disabled then this function
89661 * becomes a no-operation.
89662 */
89663-int __request_module(bool wait, const char *fmt, ...)
89664+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
89665 {
89666- va_list args;
89667 char module_name[MODULE_NAME_LEN];
89668 unsigned int max_modprobes;
89669 int ret;
89670@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
89671 if (!modprobe_path[0])
89672 return 0;
89673
89674- va_start(args, fmt);
89675- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
89676- va_end(args);
89677+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
89678 if (ret >= MODULE_NAME_LEN)
89679 return -ENAMETOOLONG;
89680
89681@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
89682 if (ret)
89683 return ret;
89684
89685+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89686+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89687+ /* hack to workaround consolekit/udisks stupidity */
89688+ read_lock(&tasklist_lock);
89689+ if (!strcmp(current->comm, "mount") &&
89690+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
89691+ read_unlock(&tasklist_lock);
89692+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
89693+ return -EPERM;
89694+ }
89695+ read_unlock(&tasklist_lock);
89696+ }
89697+#endif
89698+
89699 /* If modprobe needs a service that is in a module, we get a recursive
89700 * loop. Limit the number of running kmod threads to max_threads/2 or
89701 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
89702@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
89703
89704 trace_module_request(module_name, wait, _RET_IP_);
89705
89706- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89707+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89708
89709 atomic_dec(&kmod_concurrent);
89710 return ret;
89711 }
89712+
89713+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
89714+{
89715+ va_list args;
89716+ int ret;
89717+
89718+ va_start(args, fmt);
89719+ ret = ____request_module(wait, module_param, fmt, args);
89720+ va_end(args);
89721+
89722+ return ret;
89723+}
89724+
89725+int __request_module(bool wait, const char *fmt, ...)
89726+{
89727+ va_list args;
89728+ int ret;
89729+
89730+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89731+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89732+ char module_param[MODULE_NAME_LEN];
89733+
89734+ memset(module_param, 0, sizeof(module_param));
89735+
89736+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
89737+
89738+ va_start(args, fmt);
89739+ ret = ____request_module(wait, module_param, fmt, args);
89740+ va_end(args);
89741+
89742+ return ret;
89743+ }
89744+#endif
89745+
89746+ va_start(args, fmt);
89747+ ret = ____request_module(wait, NULL, fmt, args);
89748+ va_end(args);
89749+
89750+ return ret;
89751+}
89752+
89753 EXPORT_SYMBOL(__request_module);
89754 #endif /* CONFIG_MODULES */
89755
89756 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
89757 {
89758+#ifdef CONFIG_GRKERNSEC
89759+ kfree(info->path);
89760+ info->path = info->origpath;
89761+#endif
89762 if (info->cleanup)
89763 (*info->cleanup)(info);
89764 kfree(info);
89765@@ -232,6 +289,21 @@ static int ____call_usermodehelper(void *data)
89766 */
89767 set_user_nice(current, 0);
89768
89769+#ifdef CONFIG_GRKERNSEC
89770+ /* this is race-free as far as userland is concerned as we copied
89771+ out the path to be used prior to this point and are now operating
89772+ on that copy
89773+ */
89774+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
89775+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
89776+ strncmp(sub_info->path, "/usr/libexec/", 13) &&
89777+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
89778+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
89779+ retval = -EPERM;
89780+ goto out;
89781+ }
89782+#endif
89783+
89784 retval = -ENOMEM;
89785 new = prepare_kernel_cred(current);
89786 if (!new)
89787@@ -254,8 +326,8 @@ static int ____call_usermodehelper(void *data)
89788 commit_creds(new);
89789
89790 retval = do_execve(getname_kernel(sub_info->path),
89791- (const char __user *const __user *)sub_info->argv,
89792- (const char __user *const __user *)sub_info->envp);
89793+ (const char __user *const __force_user *)sub_info->argv,
89794+ (const char __user *const __force_user *)sub_info->envp);
89795 out:
89796 sub_info->retval = retval;
89797 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
89798@@ -288,7 +360,7 @@ static int wait_for_helper(void *data)
89799 *
89800 * Thus the __user pointer cast is valid here.
89801 */
89802- sys_wait4(pid, (int __user *)&ret, 0, NULL);
89803+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
89804
89805 /*
89806 * If ret is 0, either ____call_usermodehelper failed and the
89807@@ -510,7 +582,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
89808 goto out;
89809
89810 INIT_WORK(&sub_info->work, __call_usermodehelper);
89811+#ifdef CONFIG_GRKERNSEC
89812+ sub_info->origpath = path;
89813+ sub_info->path = kstrdup(path, gfp_mask);
89814+#else
89815 sub_info->path = path;
89816+#endif
89817 sub_info->argv = argv;
89818 sub_info->envp = envp;
89819
89820@@ -612,7 +689,7 @@ EXPORT_SYMBOL(call_usermodehelper);
89821 static int proc_cap_handler(struct ctl_table *table, int write,
89822 void __user *buffer, size_t *lenp, loff_t *ppos)
89823 {
89824- struct ctl_table t;
89825+ ctl_table_no_const t;
89826 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
89827 kernel_cap_t new_cap;
89828 int err, i;
89829diff --git a/kernel/kprobes.c b/kernel/kprobes.c
89830index ee61992..62142b1 100644
89831--- a/kernel/kprobes.c
89832+++ b/kernel/kprobes.c
89833@@ -31,6 +31,9 @@
89834 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
89835 * <prasanna@in.ibm.com> added function-return probes.
89836 */
89837+#ifdef CONFIG_GRKERNSEC_HIDESYM
89838+#define __INCLUDED_BY_HIDESYM 1
89839+#endif
89840 #include <linux/kprobes.h>
89841 #include <linux/hash.h>
89842 #include <linux/init.h>
89843@@ -122,12 +125,12 @@ enum kprobe_slot_state {
89844
89845 static void *alloc_insn_page(void)
89846 {
89847- return module_alloc(PAGE_SIZE);
89848+ return module_alloc_exec(PAGE_SIZE);
89849 }
89850
89851 static void free_insn_page(void *page)
89852 {
89853- module_memfree(page);
89854+ module_memfree_exec(page);
89855 }
89856
89857 struct kprobe_insn_cache kprobe_insn_slots = {
89858@@ -2191,11 +2194,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
89859 kprobe_type = "k";
89860
89861 if (sym)
89862- seq_printf(pi, "%p %s %s+0x%x %s ",
89863+ seq_printf(pi, "%pK %s %s+0x%x %s ",
89864 p->addr, kprobe_type, sym, offset,
89865 (modname ? modname : " "));
89866 else
89867- seq_printf(pi, "%p %s %p ",
89868+ seq_printf(pi, "%pK %s %pK ",
89869 p->addr, kprobe_type, p->addr);
89870
89871 if (!pp)
89872diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
89873index 6683cce..daf8999 100644
89874--- a/kernel/ksysfs.c
89875+++ b/kernel/ksysfs.c
89876@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
89877 {
89878 if (count+1 > UEVENT_HELPER_PATH_LEN)
89879 return -ENOENT;
89880+ if (!capable(CAP_SYS_ADMIN))
89881+ return -EPERM;
89882 memcpy(uevent_helper, buf, count);
89883 uevent_helper[count] = '\0';
89884 if (count && uevent_helper[count-1] == '\n')
89885@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
89886 return count;
89887 }
89888
89889-static struct bin_attribute notes_attr = {
89890+static bin_attribute_no_const notes_attr __read_only = {
89891 .attr = {
89892 .name = "notes",
89893 .mode = S_IRUGO,
89894diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
89895index 88d0d44..e9ce0ee 100644
89896--- a/kernel/locking/lockdep.c
89897+++ b/kernel/locking/lockdep.c
89898@@ -599,6 +599,10 @@ static int static_obj(void *obj)
89899 end = (unsigned long) &_end,
89900 addr = (unsigned long) obj;
89901
89902+#ifdef CONFIG_PAX_KERNEXEC
89903+ start = ktla_ktva(start);
89904+#endif
89905+
89906 /*
89907 * static variable?
89908 */
89909@@ -740,6 +744,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
89910 if (!static_obj(lock->key)) {
89911 debug_locks_off();
89912 printk("INFO: trying to register non-static key.\n");
89913+ printk("lock:%pS key:%pS.\n", lock, lock->key);
89914 printk("the code is fine but needs lockdep annotation.\n");
89915 printk("turning off the locking correctness validator.\n");
89916 dump_stack();
89917@@ -3081,7 +3086,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
89918 if (!class)
89919 return 0;
89920 }
89921- atomic_inc((atomic_t *)&class->ops);
89922+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
89923 if (very_verbose(class)) {
89924 printk("\nacquire class [%p] %s", class->key, class->name);
89925 if (class->name_version > 1)
89926diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
89927index ef43ac4..2720dfa 100644
89928--- a/kernel/locking/lockdep_proc.c
89929+++ b/kernel/locking/lockdep_proc.c
89930@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
89931 return 0;
89932 }
89933
89934- seq_printf(m, "%p", class->key);
89935+ seq_printf(m, "%pK", class->key);
89936 #ifdef CONFIG_DEBUG_LOCKDEP
89937 seq_printf(m, " OPS:%8ld", class->ops);
89938 #endif
89939@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
89940
89941 list_for_each_entry(entry, &class->locks_after, entry) {
89942 if (entry->distance == 1) {
89943- seq_printf(m, " -> [%p] ", entry->class->key);
89944+ seq_printf(m, " -> [%pK] ", entry->class->key);
89945 print_name(m, entry->class);
89946 seq_puts(m, "\n");
89947 }
89948@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
89949 if (!class->key)
89950 continue;
89951
89952- seq_printf(m, "[%p] ", class->key);
89953+ seq_printf(m, "[%pK] ", class->key);
89954 print_name(m, class);
89955 seq_puts(m, "\n");
89956 }
89957@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
89958 if (!i)
89959 seq_line(m, '-', 40-namelen, namelen);
89960
89961- snprintf(ip, sizeof(ip), "[<%p>]",
89962+ snprintf(ip, sizeof(ip), "[<%pK>]",
89963 (void *)class->contention_point[i]);
89964 seq_printf(m, "%40s %14lu %29s %pS\n",
89965 name, stats->contention_point[i],
89966@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
89967 if (!i)
89968 seq_line(m, '-', 40-namelen, namelen);
89969
89970- snprintf(ip, sizeof(ip), "[<%p>]",
89971+ snprintf(ip, sizeof(ip), "[<%pK>]",
89972 (void *)class->contending_point[i]);
89973 seq_printf(m, "%40s %14lu %29s %pS\n",
89974 name, stats->contending_point[i],
89975diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
89976index 9887a90..0cd2b1d 100644
89977--- a/kernel/locking/mcs_spinlock.c
89978+++ b/kernel/locking/mcs_spinlock.c
89979@@ -100,7 +100,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
89980
89981 prev = decode_cpu(old);
89982 node->prev = prev;
89983- ACCESS_ONCE(prev->next) = node;
89984+ ACCESS_ONCE_RW(prev->next) = node;
89985
89986 /*
89987 * Normally @prev is untouchable after the above store; because at that
89988@@ -172,8 +172,8 @@ unqueue:
89989 * it will wait in Step-A.
89990 */
89991
89992- ACCESS_ONCE(next->prev) = prev;
89993- ACCESS_ONCE(prev->next) = next;
89994+ ACCESS_ONCE_RW(next->prev) = prev;
89995+ ACCESS_ONCE_RW(prev->next) = next;
89996
89997 return false;
89998 }
89999@@ -195,13 +195,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
90000 node = this_cpu_ptr(&osq_node);
90001 next = xchg(&node->next, NULL);
90002 if (next) {
90003- ACCESS_ONCE(next->locked) = 1;
90004+ ACCESS_ONCE_RW(next->locked) = 1;
90005 return;
90006 }
90007
90008 next = osq_wait_next(lock, node, NULL);
90009 if (next)
90010- ACCESS_ONCE(next->locked) = 1;
90011+ ACCESS_ONCE_RW(next->locked) = 1;
90012 }
90013
90014 #endif
90015diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
90016index 4d60986..5d351c1 100644
90017--- a/kernel/locking/mcs_spinlock.h
90018+++ b/kernel/locking/mcs_spinlock.h
90019@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
90020 */
90021 return;
90022 }
90023- ACCESS_ONCE(prev->next) = node;
90024+ ACCESS_ONCE_RW(prev->next) = node;
90025
90026 /* Wait until the lock holder passes the lock down. */
90027 arch_mcs_spin_lock_contended(&node->locked);
90028diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
90029index 3ef3736..9c951fa 100644
90030--- a/kernel/locking/mutex-debug.c
90031+++ b/kernel/locking/mutex-debug.c
90032@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
90033 }
90034
90035 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90036- struct thread_info *ti)
90037+ struct task_struct *task)
90038 {
90039 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
90040
90041 /* Mark the current thread as blocked on the lock: */
90042- ti->task->blocked_on = waiter;
90043+ task->blocked_on = waiter;
90044 }
90045
90046 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90047- struct thread_info *ti)
90048+ struct task_struct *task)
90049 {
90050 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
90051- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
90052- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
90053- ti->task->blocked_on = NULL;
90054+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
90055+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
90056+ task->blocked_on = NULL;
90057
90058 list_del_init(&waiter->list);
90059 waiter->task = NULL;
90060diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
90061index 0799fd3..d06ae3b 100644
90062--- a/kernel/locking/mutex-debug.h
90063+++ b/kernel/locking/mutex-debug.h
90064@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
90065 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
90066 extern void debug_mutex_add_waiter(struct mutex *lock,
90067 struct mutex_waiter *waiter,
90068- struct thread_info *ti);
90069+ struct task_struct *task);
90070 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90071- struct thread_info *ti);
90072+ struct task_struct *task);
90073 extern void debug_mutex_unlock(struct mutex *lock);
90074 extern void debug_mutex_init(struct mutex *lock, const char *name,
90075 struct lock_class_key *key);
90076diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
90077index 4541951..39fe90a 100644
90078--- a/kernel/locking/mutex.c
90079+++ b/kernel/locking/mutex.c
90080@@ -524,7 +524,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90081 goto skip_wait;
90082
90083 debug_mutex_lock_common(lock, &waiter);
90084- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
90085+ debug_mutex_add_waiter(lock, &waiter, task);
90086
90087 /* add waiting tasks to the end of the waitqueue (FIFO): */
90088 list_add_tail(&waiter.list, &lock->wait_list);
90089@@ -569,7 +569,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90090 schedule_preempt_disabled();
90091 spin_lock_mutex(&lock->wait_lock, flags);
90092 }
90093- mutex_remove_waiter(lock, &waiter, current_thread_info());
90094+ mutex_remove_waiter(lock, &waiter, task);
90095 /* set it to 0 if there are no waiters left: */
90096 if (likely(list_empty(&lock->wait_list)))
90097 atomic_set(&lock->count, 0);
90098@@ -606,7 +606,7 @@ skip_wait:
90099 return 0;
90100
90101 err:
90102- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
90103+ mutex_remove_waiter(lock, &waiter, task);
90104 spin_unlock_mutex(&lock->wait_lock, flags);
90105 debug_mutex_free_waiter(&waiter);
90106 mutex_release(&lock->dep_map, 1, ip);
90107diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
90108index 1d96dd0..994ff19 100644
90109--- a/kernel/locking/rtmutex-tester.c
90110+++ b/kernel/locking/rtmutex-tester.c
90111@@ -22,7 +22,7 @@
90112 #define MAX_RT_TEST_MUTEXES 8
90113
90114 static spinlock_t rttest_lock;
90115-static atomic_t rttest_event;
90116+static atomic_unchecked_t rttest_event;
90117
90118 struct test_thread_data {
90119 int opcode;
90120@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90121
90122 case RTTEST_LOCKCONT:
90123 td->mutexes[td->opdata] = 1;
90124- td->event = atomic_add_return(1, &rttest_event);
90125+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90126 return 0;
90127
90128 case RTTEST_RESET:
90129@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90130 return 0;
90131
90132 case RTTEST_RESETEVENT:
90133- atomic_set(&rttest_event, 0);
90134+ atomic_set_unchecked(&rttest_event, 0);
90135 return 0;
90136
90137 default:
90138@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90139 return ret;
90140
90141 td->mutexes[id] = 1;
90142- td->event = atomic_add_return(1, &rttest_event);
90143+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90144 rt_mutex_lock(&mutexes[id]);
90145- td->event = atomic_add_return(1, &rttest_event);
90146+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90147 td->mutexes[id] = 4;
90148 return 0;
90149
90150@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90151 return ret;
90152
90153 td->mutexes[id] = 1;
90154- td->event = atomic_add_return(1, &rttest_event);
90155+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90156 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
90157- td->event = atomic_add_return(1, &rttest_event);
90158+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90159 td->mutexes[id] = ret ? 0 : 4;
90160 return ret ? -EINTR : 0;
90161
90162@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90163 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
90164 return ret;
90165
90166- td->event = atomic_add_return(1, &rttest_event);
90167+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90168 rt_mutex_unlock(&mutexes[id]);
90169- td->event = atomic_add_return(1, &rttest_event);
90170+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90171 td->mutexes[id] = 0;
90172 return 0;
90173
90174@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90175 break;
90176
90177 td->mutexes[dat] = 2;
90178- td->event = atomic_add_return(1, &rttest_event);
90179+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90180 break;
90181
90182 default:
90183@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90184 return;
90185
90186 td->mutexes[dat] = 3;
90187- td->event = atomic_add_return(1, &rttest_event);
90188+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90189 break;
90190
90191 case RTTEST_LOCKNOWAIT:
90192@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90193 return;
90194
90195 td->mutexes[dat] = 1;
90196- td->event = atomic_add_return(1, &rttest_event);
90197+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90198 return;
90199
90200 default:
90201diff --git a/kernel/module.c b/kernel/module.c
90202index d856e96..b82225c 100644
90203--- a/kernel/module.c
90204+++ b/kernel/module.c
90205@@ -59,6 +59,7 @@
90206 #include <linux/jump_label.h>
90207 #include <linux/pfn.h>
90208 #include <linux/bsearch.h>
90209+#include <linux/grsecurity.h>
90210 #include <uapi/linux/module.h>
90211 #include "module-internal.h"
90212
90213@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
90214
90215 /* Bounds of module allocation, for speeding __module_address.
90216 * Protected by module_mutex. */
90217-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
90218+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
90219+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
90220
90221 int register_module_notifier(struct notifier_block *nb)
90222 {
90223@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90224 return true;
90225
90226 list_for_each_entry_rcu(mod, &modules, list) {
90227- struct symsearch arr[] = {
90228+ struct symsearch modarr[] = {
90229 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
90230 NOT_GPL_ONLY, false },
90231 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
90232@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90233 if (mod->state == MODULE_STATE_UNFORMED)
90234 continue;
90235
90236- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
90237+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
90238 return true;
90239 }
90240 return false;
90241@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
90242 if (!pcpusec->sh_size)
90243 return 0;
90244
90245- if (align > PAGE_SIZE) {
90246+ if (align-1 >= PAGE_SIZE) {
90247 pr_warn("%s: per-cpu alignment %li > %li\n",
90248 mod->name, align, PAGE_SIZE);
90249 align = PAGE_SIZE;
90250@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
90251 static ssize_t show_coresize(struct module_attribute *mattr,
90252 struct module_kobject *mk, char *buffer)
90253 {
90254- return sprintf(buffer, "%u\n", mk->mod->core_size);
90255+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
90256 }
90257
90258 static struct module_attribute modinfo_coresize =
90259@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
90260 static ssize_t show_initsize(struct module_attribute *mattr,
90261 struct module_kobject *mk, char *buffer)
90262 {
90263- return sprintf(buffer, "%u\n", mk->mod->init_size);
90264+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
90265 }
90266
90267 static struct module_attribute modinfo_initsize =
90268@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
90269 goto bad_version;
90270 }
90271
90272+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90273+ /*
90274+ * avoid potentially printing jibberish on attempted load
90275+ * of a module randomized with a different seed
90276+ */
90277+ pr_warn("no symbol version for %s\n", symname);
90278+#else
90279 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
90280+#endif
90281 return 0;
90282
90283 bad_version:
90284+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90285+ /*
90286+ * avoid potentially printing jibberish on attempted load
90287+ * of a module randomized with a different seed
90288+ */
90289+ pr_warn("attempted module disagrees about version of symbol %s\n",
90290+ symname);
90291+#else
90292 pr_warn("%s: disagrees about version of symbol %s\n",
90293 mod->name, symname);
90294+#endif
90295 return 0;
90296 }
90297
90298@@ -1275,7 +1294,7 @@ resolve_symbol_wait(struct module *mod,
90299 */
90300 #ifdef CONFIG_SYSFS
90301
90302-#ifdef CONFIG_KALLSYMS
90303+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
90304 static inline bool sect_empty(const Elf_Shdr *sect)
90305 {
90306 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
90307@@ -1413,7 +1432,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
90308 {
90309 unsigned int notes, loaded, i;
90310 struct module_notes_attrs *notes_attrs;
90311- struct bin_attribute *nattr;
90312+ bin_attribute_no_const *nattr;
90313
90314 /* failed to create section attributes, so can't create notes */
90315 if (!mod->sect_attrs)
90316@@ -1525,7 +1544,7 @@ static void del_usage_links(struct module *mod)
90317 static int module_add_modinfo_attrs(struct module *mod)
90318 {
90319 struct module_attribute *attr;
90320- struct module_attribute *temp_attr;
90321+ module_attribute_no_const *temp_attr;
90322 int error = 0;
90323 int i;
90324
90325@@ -1735,21 +1754,21 @@ static void set_section_ro_nx(void *base,
90326
90327 static void unset_module_core_ro_nx(struct module *mod)
90328 {
90329- set_page_attributes(mod->module_core + mod->core_text_size,
90330- mod->module_core + mod->core_size,
90331+ set_page_attributes(mod->module_core_rw,
90332+ mod->module_core_rw + mod->core_size_rw,
90333 set_memory_x);
90334- set_page_attributes(mod->module_core,
90335- mod->module_core + mod->core_ro_size,
90336+ set_page_attributes(mod->module_core_rx,
90337+ mod->module_core_rx + mod->core_size_rx,
90338 set_memory_rw);
90339 }
90340
90341 static void unset_module_init_ro_nx(struct module *mod)
90342 {
90343- set_page_attributes(mod->module_init + mod->init_text_size,
90344- mod->module_init + mod->init_size,
90345+ set_page_attributes(mod->module_init_rw,
90346+ mod->module_init_rw + mod->init_size_rw,
90347 set_memory_x);
90348- set_page_attributes(mod->module_init,
90349- mod->module_init + mod->init_ro_size,
90350+ set_page_attributes(mod->module_init_rx,
90351+ mod->module_init_rx + mod->init_size_rx,
90352 set_memory_rw);
90353 }
90354
90355@@ -1762,14 +1781,14 @@ void set_all_modules_text_rw(void)
90356 list_for_each_entry_rcu(mod, &modules, list) {
90357 if (mod->state == MODULE_STATE_UNFORMED)
90358 continue;
90359- if ((mod->module_core) && (mod->core_text_size)) {
90360- set_page_attributes(mod->module_core,
90361- mod->module_core + mod->core_text_size,
90362+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90363+ set_page_attributes(mod->module_core_rx,
90364+ mod->module_core_rx + mod->core_size_rx,
90365 set_memory_rw);
90366 }
90367- if ((mod->module_init) && (mod->init_text_size)) {
90368- set_page_attributes(mod->module_init,
90369- mod->module_init + mod->init_text_size,
90370+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90371+ set_page_attributes(mod->module_init_rx,
90372+ mod->module_init_rx + mod->init_size_rx,
90373 set_memory_rw);
90374 }
90375 }
90376@@ -1785,14 +1804,14 @@ void set_all_modules_text_ro(void)
90377 list_for_each_entry_rcu(mod, &modules, list) {
90378 if (mod->state == MODULE_STATE_UNFORMED)
90379 continue;
90380- if ((mod->module_core) && (mod->core_text_size)) {
90381- set_page_attributes(mod->module_core,
90382- mod->module_core + mod->core_text_size,
90383+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90384+ set_page_attributes(mod->module_core_rx,
90385+ mod->module_core_rx + mod->core_size_rx,
90386 set_memory_ro);
90387 }
90388- if ((mod->module_init) && (mod->init_text_size)) {
90389- set_page_attributes(mod->module_init,
90390- mod->module_init + mod->init_text_size,
90391+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90392+ set_page_attributes(mod->module_init_rx,
90393+ mod->module_init_rx + mod->init_size_rx,
90394 set_memory_ro);
90395 }
90396 }
90397@@ -1801,7 +1820,15 @@ void set_all_modules_text_ro(void)
90398 #else
90399 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
90400 static void unset_module_core_ro_nx(struct module *mod) { }
90401-static void unset_module_init_ro_nx(struct module *mod) { }
90402+static void unset_module_init_ro_nx(struct module *mod)
90403+{
90404+
90405+#ifdef CONFIG_PAX_KERNEXEC
90406+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90407+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90408+#endif
90409+
90410+}
90411 #endif
90412
90413 void __weak module_memfree(void *module_region)
90414@@ -1855,16 +1882,19 @@ static void free_module(struct module *mod)
90415 /* This may be NULL, but that's OK */
90416 unset_module_init_ro_nx(mod);
90417 module_arch_freeing_init(mod);
90418- module_memfree(mod->module_init);
90419+ module_memfree(mod->module_init_rw);
90420+ module_memfree_exec(mod->module_init_rx);
90421 kfree(mod->args);
90422 percpu_modfree(mod);
90423
90424 /* Free lock-classes: */
90425- lockdep_free_key_range(mod->module_core, mod->core_size);
90426+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
90427+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
90428
90429 /* Finally, free the core (containing the module structure) */
90430 unset_module_core_ro_nx(mod);
90431- module_memfree(mod->module_core);
90432+ module_memfree_exec(mod->module_core_rx);
90433+ module_memfree(mod->module_core_rw);
90434
90435 #ifdef CONFIG_MPU
90436 update_protections(current->mm);
90437@@ -1933,9 +1963,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90438 int ret = 0;
90439 const struct kernel_symbol *ksym;
90440
90441+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90442+ int is_fs_load = 0;
90443+ int register_filesystem_found = 0;
90444+ char *p;
90445+
90446+ p = strstr(mod->args, "grsec_modharden_fs");
90447+ if (p) {
90448+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
90449+ /* copy \0 as well */
90450+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
90451+ is_fs_load = 1;
90452+ }
90453+#endif
90454+
90455 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
90456 const char *name = info->strtab + sym[i].st_name;
90457
90458+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90459+ /* it's a real shame this will never get ripped and copied
90460+ upstream! ;(
90461+ */
90462+ if (is_fs_load && !strcmp(name, "register_filesystem"))
90463+ register_filesystem_found = 1;
90464+#endif
90465+
90466 switch (sym[i].st_shndx) {
90467 case SHN_COMMON:
90468 /* Ignore common symbols */
90469@@ -1960,7 +2012,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90470 ksym = resolve_symbol_wait(mod, info, name);
90471 /* Ok if resolved. */
90472 if (ksym && !IS_ERR(ksym)) {
90473+ pax_open_kernel();
90474 sym[i].st_value = ksym->value;
90475+ pax_close_kernel();
90476 break;
90477 }
90478
90479@@ -1979,11 +2033,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90480 secbase = (unsigned long)mod_percpu(mod);
90481 else
90482 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
90483+ pax_open_kernel();
90484 sym[i].st_value += secbase;
90485+ pax_close_kernel();
90486 break;
90487 }
90488 }
90489
90490+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90491+ if (is_fs_load && !register_filesystem_found) {
90492+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
90493+ ret = -EPERM;
90494+ }
90495+#endif
90496+
90497 return ret;
90498 }
90499
90500@@ -2067,22 +2130,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
90501 || s->sh_entsize != ~0UL
90502 || strstarts(sname, ".init"))
90503 continue;
90504- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
90505+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90506+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
90507+ else
90508+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
90509 pr_debug("\t%s\n", sname);
90510 }
90511- switch (m) {
90512- case 0: /* executable */
90513- mod->core_size = debug_align(mod->core_size);
90514- mod->core_text_size = mod->core_size;
90515- break;
90516- case 1: /* RO: text and ro-data */
90517- mod->core_size = debug_align(mod->core_size);
90518- mod->core_ro_size = mod->core_size;
90519- break;
90520- case 3: /* whole core */
90521- mod->core_size = debug_align(mod->core_size);
90522- break;
90523- }
90524 }
90525
90526 pr_debug("Init section allocation order:\n");
90527@@ -2096,23 +2149,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
90528 || s->sh_entsize != ~0UL
90529 || !strstarts(sname, ".init"))
90530 continue;
90531- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
90532- | INIT_OFFSET_MASK);
90533+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90534+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
90535+ else
90536+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
90537+ s->sh_entsize |= INIT_OFFSET_MASK;
90538 pr_debug("\t%s\n", sname);
90539 }
90540- switch (m) {
90541- case 0: /* executable */
90542- mod->init_size = debug_align(mod->init_size);
90543- mod->init_text_size = mod->init_size;
90544- break;
90545- case 1: /* RO: text and ro-data */
90546- mod->init_size = debug_align(mod->init_size);
90547- mod->init_ro_size = mod->init_size;
90548- break;
90549- case 3: /* whole init */
90550- mod->init_size = debug_align(mod->init_size);
90551- break;
90552- }
90553 }
90554 }
90555
90556@@ -2285,7 +2328,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90557
90558 /* Put symbol section at end of init part of module. */
90559 symsect->sh_flags |= SHF_ALLOC;
90560- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
90561+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
90562 info->index.sym) | INIT_OFFSET_MASK;
90563 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
90564
90565@@ -2302,13 +2345,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90566 }
90567
90568 /* Append room for core symbols at end of core part. */
90569- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
90570- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
90571- mod->core_size += strtab_size;
90572+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
90573+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
90574+ mod->core_size_rx += strtab_size;
90575
90576 /* Put string table section at end of init part of module. */
90577 strsect->sh_flags |= SHF_ALLOC;
90578- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
90579+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
90580 info->index.str) | INIT_OFFSET_MASK;
90581 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
90582 }
90583@@ -2326,12 +2369,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90584 /* Make sure we get permanent strtab: don't use info->strtab. */
90585 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
90586
90587+ pax_open_kernel();
90588+
90589 /* Set types up while we still have access to sections. */
90590 for (i = 0; i < mod->num_symtab; i++)
90591 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
90592
90593- mod->core_symtab = dst = mod->module_core + info->symoffs;
90594- mod->core_strtab = s = mod->module_core + info->stroffs;
90595+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
90596+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
90597 src = mod->symtab;
90598 for (ndst = i = 0; i < mod->num_symtab; i++) {
90599 if (i == 0 ||
90600@@ -2343,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90601 }
90602 }
90603 mod->core_num_syms = ndst;
90604+
90605+ pax_close_kernel();
90606 }
90607 #else
90608 static inline void layout_symtab(struct module *mod, struct load_info *info)
90609@@ -2376,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
90610 return vmalloc_exec(size);
90611 }
90612
90613-static void *module_alloc_update_bounds(unsigned long size)
90614+static void *module_alloc_update_bounds_rw(unsigned long size)
90615 {
90616 void *ret = module_alloc(size);
90617
90618 if (ret) {
90619 mutex_lock(&module_mutex);
90620 /* Update module bounds. */
90621- if ((unsigned long)ret < module_addr_min)
90622- module_addr_min = (unsigned long)ret;
90623- if ((unsigned long)ret + size > module_addr_max)
90624- module_addr_max = (unsigned long)ret + size;
90625+ if ((unsigned long)ret < module_addr_min_rw)
90626+ module_addr_min_rw = (unsigned long)ret;
90627+ if ((unsigned long)ret + size > module_addr_max_rw)
90628+ module_addr_max_rw = (unsigned long)ret + size;
90629+ mutex_unlock(&module_mutex);
90630+ }
90631+ return ret;
90632+}
90633+
90634+static void *module_alloc_update_bounds_rx(unsigned long size)
90635+{
90636+ void *ret = module_alloc_exec(size);
90637+
90638+ if (ret) {
90639+ mutex_lock(&module_mutex);
90640+ /* Update module bounds. */
90641+ if ((unsigned long)ret < module_addr_min_rx)
90642+ module_addr_min_rx = (unsigned long)ret;
90643+ if ((unsigned long)ret + size > module_addr_max_rx)
90644+ module_addr_max_rx = (unsigned long)ret + size;
90645 mutex_unlock(&module_mutex);
90646 }
90647 return ret;
90648@@ -2640,7 +2703,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90649 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
90650
90651 if (info->index.sym == 0) {
90652+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90653+ /*
90654+ * avoid potentially printing jibberish on attempted load
90655+ * of a module randomized with a different seed
90656+ */
90657+ pr_warn("module has no symbols (stripped?)\n");
90658+#else
90659 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
90660+#endif
90661 return ERR_PTR(-ENOEXEC);
90662 }
90663
90664@@ -2656,8 +2727,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90665 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90666 {
90667 const char *modmagic = get_modinfo(info, "vermagic");
90668+ const char *license = get_modinfo(info, "license");
90669 int err;
90670
90671+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
90672+ if (!license || !license_is_gpl_compatible(license))
90673+ return -ENOEXEC;
90674+#endif
90675+
90676 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
90677 modmagic = NULL;
90678
90679@@ -2682,7 +2759,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90680 }
90681
90682 /* Set up license info based on the info section */
90683- set_license(mod, get_modinfo(info, "license"));
90684+ set_license(mod, license);
90685
90686 return 0;
90687 }
90688@@ -2776,7 +2853,7 @@ static int move_module(struct module *mod, struct load_info *info)
90689 void *ptr;
90690
90691 /* Do the allocs. */
90692- ptr = module_alloc_update_bounds(mod->core_size);
90693+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
90694 /*
90695 * The pointer to this block is stored in the module structure
90696 * which is inside the block. Just mark it as not being a
90697@@ -2786,11 +2863,11 @@ static int move_module(struct module *mod, struct load_info *info)
90698 if (!ptr)
90699 return -ENOMEM;
90700
90701- memset(ptr, 0, mod->core_size);
90702- mod->module_core = ptr;
90703+ memset(ptr, 0, mod->core_size_rw);
90704+ mod->module_core_rw = ptr;
90705
90706- if (mod->init_size) {
90707- ptr = module_alloc_update_bounds(mod->init_size);
90708+ if (mod->init_size_rw) {
90709+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
90710 /*
90711 * The pointer to this block is stored in the module structure
90712 * which is inside the block. This block doesn't need to be
90713@@ -2799,13 +2876,45 @@ static int move_module(struct module *mod, struct load_info *info)
90714 */
90715 kmemleak_ignore(ptr);
90716 if (!ptr) {
90717- module_memfree(mod->module_core);
90718+ module_memfree(mod->module_core_rw);
90719 return -ENOMEM;
90720 }
90721- memset(ptr, 0, mod->init_size);
90722- mod->module_init = ptr;
90723+ memset(ptr, 0, mod->init_size_rw);
90724+ mod->module_init_rw = ptr;
90725 } else
90726- mod->module_init = NULL;
90727+ mod->module_init_rw = NULL;
90728+
90729+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
90730+ kmemleak_not_leak(ptr);
90731+ if (!ptr) {
90732+ if (mod->module_init_rw)
90733+ module_memfree(mod->module_init_rw);
90734+ module_memfree(mod->module_core_rw);
90735+ return -ENOMEM;
90736+ }
90737+
90738+ pax_open_kernel();
90739+ memset(ptr, 0, mod->core_size_rx);
90740+ pax_close_kernel();
90741+ mod->module_core_rx = ptr;
90742+
90743+ if (mod->init_size_rx) {
90744+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
90745+ kmemleak_ignore(ptr);
90746+ if (!ptr && mod->init_size_rx) {
90747+ module_memfree_exec(mod->module_core_rx);
90748+ if (mod->module_init_rw)
90749+ module_memfree(mod->module_init_rw);
90750+ module_memfree(mod->module_core_rw);
90751+ return -ENOMEM;
90752+ }
90753+
90754+ pax_open_kernel();
90755+ memset(ptr, 0, mod->init_size_rx);
90756+ pax_close_kernel();
90757+ mod->module_init_rx = ptr;
90758+ } else
90759+ mod->module_init_rx = NULL;
90760
90761 /* Transfer each section which specifies SHF_ALLOC */
90762 pr_debug("final section addresses:\n");
90763@@ -2816,16 +2925,45 @@ static int move_module(struct module *mod, struct load_info *info)
90764 if (!(shdr->sh_flags & SHF_ALLOC))
90765 continue;
90766
90767- if (shdr->sh_entsize & INIT_OFFSET_MASK)
90768- dest = mod->module_init
90769- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90770- else
90771- dest = mod->module_core + shdr->sh_entsize;
90772+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
90773+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90774+ dest = mod->module_init_rw
90775+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90776+ else
90777+ dest = mod->module_init_rx
90778+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90779+ } else {
90780+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90781+ dest = mod->module_core_rw + shdr->sh_entsize;
90782+ else
90783+ dest = mod->module_core_rx + shdr->sh_entsize;
90784+ }
90785+
90786+ if (shdr->sh_type != SHT_NOBITS) {
90787+
90788+#ifdef CONFIG_PAX_KERNEXEC
90789+#ifdef CONFIG_X86_64
90790+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
90791+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
90792+#endif
90793+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
90794+ pax_open_kernel();
90795+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
90796+ pax_close_kernel();
90797+ } else
90798+#endif
90799
90800- if (shdr->sh_type != SHT_NOBITS)
90801 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
90802+ }
90803 /* Update sh_addr to point to copy in image. */
90804- shdr->sh_addr = (unsigned long)dest;
90805+
90806+#ifdef CONFIG_PAX_KERNEXEC
90807+ if (shdr->sh_flags & SHF_EXECINSTR)
90808+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
90809+ else
90810+#endif
90811+
90812+ shdr->sh_addr = (unsigned long)dest;
90813 pr_debug("\t0x%lx %s\n",
90814 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
90815 }
90816@@ -2882,12 +3020,12 @@ static void flush_module_icache(const struct module *mod)
90817 * Do it before processing of module parameters, so the module
90818 * can provide parameter accessor functions of its own.
90819 */
90820- if (mod->module_init)
90821- flush_icache_range((unsigned long)mod->module_init,
90822- (unsigned long)mod->module_init
90823- + mod->init_size);
90824- flush_icache_range((unsigned long)mod->module_core,
90825- (unsigned long)mod->module_core + mod->core_size);
90826+ if (mod->module_init_rx)
90827+ flush_icache_range((unsigned long)mod->module_init_rx,
90828+ (unsigned long)mod->module_init_rx
90829+ + mod->init_size_rx);
90830+ flush_icache_range((unsigned long)mod->module_core_rx,
90831+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
90832
90833 set_fs(old_fs);
90834 }
90835@@ -2945,8 +3083,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
90836 {
90837 percpu_modfree(mod);
90838 module_arch_freeing_init(mod);
90839- module_memfree(mod->module_init);
90840- module_memfree(mod->module_core);
90841+ module_memfree_exec(mod->module_init_rx);
90842+ module_memfree_exec(mod->module_core_rx);
90843+ module_memfree(mod->module_init_rw);
90844+ module_memfree(mod->module_core_rw);
90845 }
90846
90847 int __weak module_finalize(const Elf_Ehdr *hdr,
90848@@ -2959,7 +3099,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
90849 static int post_relocation(struct module *mod, const struct load_info *info)
90850 {
90851 /* Sort exception table now relocations are done. */
90852+ pax_open_kernel();
90853 sort_extable(mod->extable, mod->extable + mod->num_exentries);
90854+ pax_close_kernel();
90855
90856 /* Copy relocated percpu area over. */
90857 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
90858@@ -3001,13 +3143,15 @@ static void do_mod_ctors(struct module *mod)
90859 /* For freeing module_init on success, in case kallsyms traversing */
90860 struct mod_initfree {
90861 struct rcu_head rcu;
90862- void *module_init;
90863+ void *module_init_rw;
90864+ void *module_init_rx;
90865 };
90866
90867 static void do_free_init(struct rcu_head *head)
90868 {
90869 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
90870- module_memfree(m->module_init);
90871+ module_memfree(m->module_init_rw);
90872+ module_memfree_exec(m->module_init_rx);
90873 kfree(m);
90874 }
90875
90876@@ -3022,7 +3166,8 @@ static int do_init_module(struct module *mod)
90877 ret = -ENOMEM;
90878 goto fail;
90879 }
90880- freeinit->module_init = mod->module_init;
90881+ freeinit->module_init_rw = mod->module_init_rw;
90882+ freeinit->module_init_rx = mod->module_init_rx;
90883
90884 /*
90885 * We want to find out whether @mod uses async during init. Clear
90886@@ -3081,10 +3226,10 @@ static int do_init_module(struct module *mod)
90887 #endif
90888 unset_module_init_ro_nx(mod);
90889 module_arch_freeing_init(mod);
90890- mod->module_init = NULL;
90891- mod->init_size = 0;
90892- mod->init_ro_size = 0;
90893- mod->init_text_size = 0;
90894+ mod->module_init_rw = NULL;
90895+ mod->module_init_rx = NULL;
90896+ mod->init_size_rw = 0;
90897+ mod->init_size_rx = 0;
90898 /*
90899 * We want to free module_init, but be aware that kallsyms may be
90900 * walking this with preempt disabled. In all the failure paths,
90901@@ -3198,16 +3343,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
90902 module_bug_finalize(info->hdr, info->sechdrs, mod);
90903
90904 /* Set RO and NX regions for core */
90905- set_section_ro_nx(mod->module_core,
90906- mod->core_text_size,
90907- mod->core_ro_size,
90908- mod->core_size);
90909+ set_section_ro_nx(mod->module_core_rx,
90910+ mod->core_size_rx,
90911+ mod->core_size_rx,
90912+ mod->core_size_rx);
90913
90914 /* Set RO and NX regions for init */
90915- set_section_ro_nx(mod->module_init,
90916- mod->init_text_size,
90917- mod->init_ro_size,
90918- mod->init_size);
90919+ set_section_ro_nx(mod->module_init_rx,
90920+ mod->init_size_rx,
90921+ mod->init_size_rx,
90922+ mod->init_size_rx);
90923
90924 /* Mark state as coming so strong_try_module_get() ignores us,
90925 * but kallsyms etc. can see us. */
90926@@ -3291,9 +3436,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
90927 if (err)
90928 goto free_unload;
90929
90930+ /* Now copy in args */
90931+ mod->args = strndup_user(uargs, ~0UL >> 1);
90932+ if (IS_ERR(mod->args)) {
90933+ err = PTR_ERR(mod->args);
90934+ goto free_unload;
90935+ }
90936+
90937 /* Set up MODINFO_ATTR fields */
90938 setup_modinfo(mod, info);
90939
90940+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90941+ {
90942+ char *p, *p2;
90943+
90944+ if (strstr(mod->args, "grsec_modharden_netdev")) {
90945+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
90946+ err = -EPERM;
90947+ goto free_modinfo;
90948+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
90949+ p += sizeof("grsec_modharden_normal") - 1;
90950+ p2 = strstr(p, "_");
90951+ if (p2) {
90952+ *p2 = '\0';
90953+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
90954+ *p2 = '_';
90955+ }
90956+ err = -EPERM;
90957+ goto free_modinfo;
90958+ }
90959+ }
90960+#endif
90961+
90962 /* Fix up syms, so that st_value is a pointer to location. */
90963 err = simplify_symbols(mod, info);
90964 if (err < 0)
90965@@ -3309,13 +3483,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
90966
90967 flush_module_icache(mod);
90968
90969- /* Now copy in args */
90970- mod->args = strndup_user(uargs, ~0UL >> 1);
90971- if (IS_ERR(mod->args)) {
90972- err = PTR_ERR(mod->args);
90973- goto free_arch_cleanup;
90974- }
90975-
90976 dynamic_debug_setup(info->debug, info->num_debug);
90977
90978 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
90979@@ -3363,11 +3530,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
90980 ddebug_cleanup:
90981 dynamic_debug_remove(info->debug);
90982 synchronize_sched();
90983- kfree(mod->args);
90984- free_arch_cleanup:
90985 module_arch_cleanup(mod);
90986 free_modinfo:
90987 free_modinfo(mod);
90988+ kfree(mod->args);
90989 free_unload:
90990 module_unload_free(mod);
90991 unlink_mod:
90992@@ -3454,10 +3620,16 @@ static const char *get_ksymbol(struct module *mod,
90993 unsigned long nextval;
90994
90995 /* At worse, next value is at end of module */
90996- if (within_module_init(addr, mod))
90997- nextval = (unsigned long)mod->module_init+mod->init_text_size;
90998+ if (within_module_init_rx(addr, mod))
90999+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
91000+ else if (within_module_init_rw(addr, mod))
91001+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
91002+ else if (within_module_core_rx(addr, mod))
91003+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
91004+ else if (within_module_core_rw(addr, mod))
91005+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
91006 else
91007- nextval = (unsigned long)mod->module_core+mod->core_text_size;
91008+ return NULL;
91009
91010 /* Scan for closest preceding symbol, and next symbol. (ELF
91011 starts real symbols at 1). */
91012@@ -3705,7 +3877,7 @@ static int m_show(struct seq_file *m, void *p)
91013 return 0;
91014
91015 seq_printf(m, "%s %u",
91016- mod->name, mod->init_size + mod->core_size);
91017+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
91018 print_unload_info(m, mod);
91019
91020 /* Informative for users. */
91021@@ -3714,7 +3886,7 @@ static int m_show(struct seq_file *m, void *p)
91022 mod->state == MODULE_STATE_COMING ? "Loading" :
91023 "Live");
91024 /* Used by oprofile and other similar tools. */
91025- seq_printf(m, " 0x%pK", mod->module_core);
91026+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
91027
91028 /* Taints info */
91029 if (mod->taints)
91030@@ -3750,7 +3922,17 @@ static const struct file_operations proc_modules_operations = {
91031
91032 static int __init proc_modules_init(void)
91033 {
91034+#ifndef CONFIG_GRKERNSEC_HIDESYM
91035+#ifdef CONFIG_GRKERNSEC_PROC_USER
91036+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91037+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
91038+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
91039+#else
91040 proc_create("modules", 0, NULL, &proc_modules_operations);
91041+#endif
91042+#else
91043+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91044+#endif
91045 return 0;
91046 }
91047 module_init(proc_modules_init);
91048@@ -3811,7 +3993,8 @@ struct module *__module_address(unsigned long addr)
91049 {
91050 struct module *mod;
91051
91052- if (addr < module_addr_min || addr > module_addr_max)
91053+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
91054+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
91055 return NULL;
91056
91057 list_for_each_entry_rcu(mod, &modules, list) {
91058@@ -3852,11 +4035,20 @@ bool is_module_text_address(unsigned long addr)
91059 */
91060 struct module *__module_text_address(unsigned long addr)
91061 {
91062- struct module *mod = __module_address(addr);
91063+ struct module *mod;
91064+
91065+#ifdef CONFIG_X86_32
91066+ addr = ktla_ktva(addr);
91067+#endif
91068+
91069+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
91070+ return NULL;
91071+
91072+ mod = __module_address(addr);
91073+
91074 if (mod) {
91075 /* Make sure it's within the text section. */
91076- if (!within(addr, mod->module_init, mod->init_text_size)
91077- && !within(addr, mod->module_core, mod->core_text_size))
91078+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
91079 mod = NULL;
91080 }
91081 return mod;
91082diff --git a/kernel/notifier.c b/kernel/notifier.c
91083index 4803da6..1c5eea6 100644
91084--- a/kernel/notifier.c
91085+++ b/kernel/notifier.c
91086@@ -5,6 +5,7 @@
91087 #include <linux/rcupdate.h>
91088 #include <linux/vmalloc.h>
91089 #include <linux/reboot.h>
91090+#include <linux/mm.h>
91091
91092 /*
91093 * Notifier list for kernel code which wants to be called
91094@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
91095 while ((*nl) != NULL) {
91096 if (n->priority > (*nl)->priority)
91097 break;
91098- nl = &((*nl)->next);
91099+ nl = (struct notifier_block **)&((*nl)->next);
91100 }
91101- n->next = *nl;
91102+ pax_open_kernel();
91103+ *(const void **)&n->next = *nl;
91104 rcu_assign_pointer(*nl, n);
91105+ pax_close_kernel();
91106 return 0;
91107 }
91108
91109@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
91110 return 0;
91111 if (n->priority > (*nl)->priority)
91112 break;
91113- nl = &((*nl)->next);
91114+ nl = (struct notifier_block **)&((*nl)->next);
91115 }
91116- n->next = *nl;
91117+ pax_open_kernel();
91118+ *(const void **)&n->next = *nl;
91119 rcu_assign_pointer(*nl, n);
91120+ pax_close_kernel();
91121 return 0;
91122 }
91123
91124@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
91125 {
91126 while ((*nl) != NULL) {
91127 if ((*nl) == n) {
91128+ pax_open_kernel();
91129 rcu_assign_pointer(*nl, n->next);
91130+ pax_close_kernel();
91131 return 0;
91132 }
91133- nl = &((*nl)->next);
91134+ nl = (struct notifier_block **)&((*nl)->next);
91135 }
91136 return -ENOENT;
91137 }
91138diff --git a/kernel/padata.c b/kernel/padata.c
91139index 161402f..598814c 100644
91140--- a/kernel/padata.c
91141+++ b/kernel/padata.c
91142@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
91143 * seq_nr mod. number of cpus in use.
91144 */
91145
91146- seq_nr = atomic_inc_return(&pd->seq_nr);
91147+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
91148 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
91149
91150 return padata_index_to_cpu(pd, cpu_index);
91151@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
91152 padata_init_pqueues(pd);
91153 padata_init_squeues(pd);
91154 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
91155- atomic_set(&pd->seq_nr, -1);
91156+ atomic_set_unchecked(&pd->seq_nr, -1);
91157 atomic_set(&pd->reorder_objects, 0);
91158 atomic_set(&pd->refcnt, 0);
91159 pd->pinst = pinst;
91160diff --git a/kernel/panic.c b/kernel/panic.c
91161index 4d8d6f9..97b9b9c 100644
91162--- a/kernel/panic.c
91163+++ b/kernel/panic.c
91164@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
91165 /*
91166 * Stop ourself in panic -- architecture code may override this
91167 */
91168-void __weak panic_smp_self_stop(void)
91169+void __weak __noreturn panic_smp_self_stop(void)
91170 {
91171 while (1)
91172 cpu_relax();
91173@@ -423,7 +423,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
91174 disable_trace_on_warning();
91175
91176 pr_warn("------------[ cut here ]------------\n");
91177- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
91178+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
91179 raw_smp_processor_id(), current->pid, file, line, caller);
91180
91181 if (args)
91182@@ -488,7 +488,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
91183 */
91184 __visible void __stack_chk_fail(void)
91185 {
91186- panic("stack-protector: Kernel stack is corrupted in: %p\n",
91187+ dump_stack();
91188+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
91189 __builtin_return_address(0));
91190 }
91191 EXPORT_SYMBOL(__stack_chk_fail);
91192diff --git a/kernel/pid.c b/kernel/pid.c
91193index cd36a5e..11f185d 100644
91194--- a/kernel/pid.c
91195+++ b/kernel/pid.c
91196@@ -33,6 +33,7 @@
91197 #include <linux/rculist.h>
91198 #include <linux/bootmem.h>
91199 #include <linux/hash.h>
91200+#include <linux/security.h>
91201 #include <linux/pid_namespace.h>
91202 #include <linux/init_task.h>
91203 #include <linux/syscalls.h>
91204@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
91205
91206 int pid_max = PID_MAX_DEFAULT;
91207
91208-#define RESERVED_PIDS 300
91209+#define RESERVED_PIDS 500
91210
91211 int pid_max_min = RESERVED_PIDS + 1;
91212 int pid_max_max = PID_MAX_LIMIT;
91213@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
91214 */
91215 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
91216 {
91217+ struct task_struct *task;
91218+
91219 rcu_lockdep_assert(rcu_read_lock_held(),
91220 "find_task_by_pid_ns() needs rcu_read_lock()"
91221 " protection");
91222- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91223+
91224+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91225+
91226+ if (gr_pid_is_chrooted(task))
91227+ return NULL;
91228+
91229+ return task;
91230 }
91231
91232 struct task_struct *find_task_by_vpid(pid_t vnr)
91233@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
91234 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
91235 }
91236
91237+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
91238+{
91239+ rcu_lockdep_assert(rcu_read_lock_held(),
91240+ "find_task_by_pid_ns() needs rcu_read_lock()"
91241+ " protection");
91242+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
91243+}
91244+
91245 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
91246 {
91247 struct pid *pid;
91248diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
91249index a65ba13..f600dbb 100644
91250--- a/kernel/pid_namespace.c
91251+++ b/kernel/pid_namespace.c
91252@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
91253 void __user *buffer, size_t *lenp, loff_t *ppos)
91254 {
91255 struct pid_namespace *pid_ns = task_active_pid_ns(current);
91256- struct ctl_table tmp = *table;
91257+ ctl_table_no_const tmp = *table;
91258
91259 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
91260 return -EPERM;
91261diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
91262index 48b28d3..c63ccaf 100644
91263--- a/kernel/power/Kconfig
91264+++ b/kernel/power/Kconfig
91265@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
91266 config HIBERNATION
91267 bool "Hibernation (aka 'suspend to disk')"
91268 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
91269+ depends on !GRKERNSEC_KMEM
91270+ depends on !PAX_MEMORY_SANITIZE
91271 select HIBERNATE_CALLBACKS
91272 select LZO_COMPRESS
91273 select LZO_DECOMPRESS
91274diff --git a/kernel/power/process.c b/kernel/power/process.c
91275index 5a6ec86..3a8c884 100644
91276--- a/kernel/power/process.c
91277+++ b/kernel/power/process.c
91278@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
91279 unsigned int elapsed_msecs;
91280 bool wakeup = false;
91281 int sleep_usecs = USEC_PER_MSEC;
91282+ bool timedout = false;
91283
91284 do_gettimeofday(&start);
91285
91286@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
91287
91288 while (true) {
91289 todo = 0;
91290+ if (time_after(jiffies, end_time))
91291+ timedout = true;
91292 read_lock(&tasklist_lock);
91293 for_each_process_thread(g, p) {
91294 if (p == current || !freeze_task(p))
91295 continue;
91296
91297- if (!freezer_should_skip(p))
91298+ if (!freezer_should_skip(p)) {
91299 todo++;
91300+ if (timedout) {
91301+ printk(KERN_ERR "Task refusing to freeze:\n");
91302+ sched_show_task(p);
91303+ }
91304+ }
91305 }
91306 read_unlock(&tasklist_lock);
91307
91308@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
91309 todo += wq_busy;
91310 }
91311
91312- if (!todo || time_after(jiffies, end_time))
91313+ if (!todo || timedout)
91314 break;
91315
91316 if (pm_wakeup_pending()) {
91317diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
91318index 2cdd353..7df1786 100644
91319--- a/kernel/printk/printk.c
91320+++ b/kernel/printk/printk.c
91321@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
91322 if (from_file && type != SYSLOG_ACTION_OPEN)
91323 return 0;
91324
91325+#ifdef CONFIG_GRKERNSEC_DMESG
91326+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
91327+ return -EPERM;
91328+#endif
91329+
91330 if (syslog_action_restricted(type)) {
91331 if (capable(CAP_SYSLOG))
91332 return 0;
91333diff --git a/kernel/profile.c b/kernel/profile.c
91334index 54bf5ba..df6e0a2 100644
91335--- a/kernel/profile.c
91336+++ b/kernel/profile.c
91337@@ -37,7 +37,7 @@ struct profile_hit {
91338 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
91339 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
91340
91341-static atomic_t *prof_buffer;
91342+static atomic_unchecked_t *prof_buffer;
91343 static unsigned long prof_len, prof_shift;
91344
91345 int prof_on __read_mostly;
91346@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
91347 hits[i].pc = 0;
91348 continue;
91349 }
91350- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91351+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91352 hits[i].hits = hits[i].pc = 0;
91353 }
91354 }
91355@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91356 * Add the current hit(s) and flush the write-queue out
91357 * to the global buffer:
91358 */
91359- atomic_add(nr_hits, &prof_buffer[pc]);
91360+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
91361 for (i = 0; i < NR_PROFILE_HIT; ++i) {
91362- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91363+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91364 hits[i].pc = hits[i].hits = 0;
91365 }
91366 out:
91367@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91368 {
91369 unsigned long pc;
91370 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
91371- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91372+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91373 }
91374 #endif /* !CONFIG_SMP */
91375
91376@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
91377 return -EFAULT;
91378 buf++; p++; count--; read++;
91379 }
91380- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
91381+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
91382 if (copy_to_user(buf, (void *)pnt, count))
91383 return -EFAULT;
91384 read += count;
91385@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
91386 }
91387 #endif
91388 profile_discard_flip_buffers();
91389- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
91390+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
91391 return count;
91392 }
91393
91394diff --git a/kernel/ptrace.c b/kernel/ptrace.c
91395index 1eb9d90..d40d21e 100644
91396--- a/kernel/ptrace.c
91397+++ b/kernel/ptrace.c
91398@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
91399 if (seize)
91400 flags |= PT_SEIZED;
91401 rcu_read_lock();
91402- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91403+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91404 flags |= PT_PTRACE_CAP;
91405 rcu_read_unlock();
91406 task->ptrace = flags;
91407@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
91408 break;
91409 return -EIO;
91410 }
91411- if (copy_to_user(dst, buf, retval))
91412+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
91413 return -EFAULT;
91414 copied += retval;
91415 src += retval;
91416@@ -783,7 +783,7 @@ int ptrace_request(struct task_struct *child, long request,
91417 bool seized = child->ptrace & PT_SEIZED;
91418 int ret = -EIO;
91419 siginfo_t siginfo, *si;
91420- void __user *datavp = (void __user *) data;
91421+ void __user *datavp = (__force void __user *) data;
91422 unsigned long __user *datalp = datavp;
91423 unsigned long flags;
91424
91425@@ -1029,14 +1029,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
91426 goto out;
91427 }
91428
91429+ if (gr_handle_ptrace(child, request)) {
91430+ ret = -EPERM;
91431+ goto out_put_task_struct;
91432+ }
91433+
91434 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91435 ret = ptrace_attach(child, request, addr, data);
91436 /*
91437 * Some architectures need to do book-keeping after
91438 * a ptrace attach.
91439 */
91440- if (!ret)
91441+ if (!ret) {
91442 arch_ptrace_attach(child);
91443+ gr_audit_ptrace(child);
91444+ }
91445 goto out_put_task_struct;
91446 }
91447
91448@@ -1064,7 +1071,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
91449 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
91450 if (copied != sizeof(tmp))
91451 return -EIO;
91452- return put_user(tmp, (unsigned long __user *)data);
91453+ return put_user(tmp, (__force unsigned long __user *)data);
91454 }
91455
91456 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
91457@@ -1158,7 +1165,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
91458 }
91459
91460 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91461- compat_long_t, addr, compat_long_t, data)
91462+ compat_ulong_t, addr, compat_ulong_t, data)
91463 {
91464 struct task_struct *child;
91465 long ret;
91466@@ -1174,14 +1181,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91467 goto out;
91468 }
91469
91470+ if (gr_handle_ptrace(child, request)) {
91471+ ret = -EPERM;
91472+ goto out_put_task_struct;
91473+ }
91474+
91475 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91476 ret = ptrace_attach(child, request, addr, data);
91477 /*
91478 * Some architectures need to do book-keeping after
91479 * a ptrace attach.
91480 */
91481- if (!ret)
91482+ if (!ret) {
91483 arch_ptrace_attach(child);
91484+ gr_audit_ptrace(child);
91485+ }
91486 goto out_put_task_struct;
91487 }
91488
91489diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
91490index 4d559ba..053da37 100644
91491--- a/kernel/rcu/rcutorture.c
91492+++ b/kernel/rcu/rcutorture.c
91493@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91494 rcu_torture_count) = { 0 };
91495 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91496 rcu_torture_batch) = { 0 };
91497-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91498-static atomic_t n_rcu_torture_alloc;
91499-static atomic_t n_rcu_torture_alloc_fail;
91500-static atomic_t n_rcu_torture_free;
91501-static atomic_t n_rcu_torture_mberror;
91502-static atomic_t n_rcu_torture_error;
91503+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91504+static atomic_unchecked_t n_rcu_torture_alloc;
91505+static atomic_unchecked_t n_rcu_torture_alloc_fail;
91506+static atomic_unchecked_t n_rcu_torture_free;
91507+static atomic_unchecked_t n_rcu_torture_mberror;
91508+static atomic_unchecked_t n_rcu_torture_error;
91509 static long n_rcu_torture_barrier_error;
91510 static long n_rcu_torture_boost_ktrerror;
91511 static long n_rcu_torture_boost_rterror;
91512@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
91513 static long n_rcu_torture_timers;
91514 static long n_barrier_attempts;
91515 static long n_barrier_successes;
91516-static atomic_long_t n_cbfloods;
91517+static atomic_long_unchecked_t n_cbfloods;
91518 static struct list_head rcu_torture_removed;
91519
91520 static int rcu_torture_writer_state;
91521@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
91522
91523 spin_lock_bh(&rcu_torture_lock);
91524 if (list_empty(&rcu_torture_freelist)) {
91525- atomic_inc(&n_rcu_torture_alloc_fail);
91526+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
91527 spin_unlock_bh(&rcu_torture_lock);
91528 return NULL;
91529 }
91530- atomic_inc(&n_rcu_torture_alloc);
91531+ atomic_inc_unchecked(&n_rcu_torture_alloc);
91532 p = rcu_torture_freelist.next;
91533 list_del_init(p);
91534 spin_unlock_bh(&rcu_torture_lock);
91535@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
91536 static void
91537 rcu_torture_free(struct rcu_torture *p)
91538 {
91539- atomic_inc(&n_rcu_torture_free);
91540+ atomic_inc_unchecked(&n_rcu_torture_free);
91541 spin_lock_bh(&rcu_torture_lock);
91542 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
91543 spin_unlock_bh(&rcu_torture_lock);
91544@@ -312,7 +312,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
91545 i = rp->rtort_pipe_count;
91546 if (i > RCU_TORTURE_PIPE_LEN)
91547 i = RCU_TORTURE_PIPE_LEN;
91548- atomic_inc(&rcu_torture_wcount[i]);
91549+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91550 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
91551 rp->rtort_mbtest = 0;
91552 return true;
91553@@ -799,7 +799,7 @@ rcu_torture_cbflood(void *arg)
91554 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
91555 do {
91556 schedule_timeout_interruptible(cbflood_inter_holdoff);
91557- atomic_long_inc(&n_cbfloods);
91558+ atomic_long_inc_unchecked(&n_cbfloods);
91559 WARN_ON(signal_pending(current));
91560 for (i = 0; i < cbflood_n_burst; i++) {
91561 for (j = 0; j < cbflood_n_per_burst; j++) {
91562@@ -918,7 +918,7 @@ rcu_torture_writer(void *arg)
91563 i = old_rp->rtort_pipe_count;
91564 if (i > RCU_TORTURE_PIPE_LEN)
91565 i = RCU_TORTURE_PIPE_LEN;
91566- atomic_inc(&rcu_torture_wcount[i]);
91567+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91568 old_rp->rtort_pipe_count++;
91569 switch (synctype[torture_random(&rand) % nsynctypes]) {
91570 case RTWS_DEF_FREE:
91571@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
91572 return;
91573 }
91574 if (p->rtort_mbtest == 0)
91575- atomic_inc(&n_rcu_torture_mberror);
91576+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91577 spin_lock(&rand_lock);
91578 cur_ops->read_delay(&rand);
91579 n_rcu_torture_timers++;
91580@@ -1106,7 +1106,7 @@ rcu_torture_reader(void *arg)
91581 continue;
91582 }
91583 if (p->rtort_mbtest == 0)
91584- atomic_inc(&n_rcu_torture_mberror);
91585+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91586 cur_ops->read_delay(&rand);
91587 preempt_disable();
91588 pipe_count = p->rtort_pipe_count;
91589@@ -1173,11 +1173,11 @@ rcu_torture_stats_print(void)
91590 rcu_torture_current,
91591 rcu_torture_current_version,
91592 list_empty(&rcu_torture_freelist),
91593- atomic_read(&n_rcu_torture_alloc),
91594- atomic_read(&n_rcu_torture_alloc_fail),
91595- atomic_read(&n_rcu_torture_free));
91596+ atomic_read_unchecked(&n_rcu_torture_alloc),
91597+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
91598+ atomic_read_unchecked(&n_rcu_torture_free));
91599 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
91600- atomic_read(&n_rcu_torture_mberror),
91601+ atomic_read_unchecked(&n_rcu_torture_mberror),
91602 n_rcu_torture_boost_ktrerror,
91603 n_rcu_torture_boost_rterror);
91604 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
91605@@ -1189,17 +1189,17 @@ rcu_torture_stats_print(void)
91606 n_barrier_successes,
91607 n_barrier_attempts,
91608 n_rcu_torture_barrier_error);
91609- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
91610+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
91611
91612 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91613- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
91614+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
91615 n_rcu_torture_barrier_error != 0 ||
91616 n_rcu_torture_boost_ktrerror != 0 ||
91617 n_rcu_torture_boost_rterror != 0 ||
91618 n_rcu_torture_boost_failure != 0 ||
91619 i > 1) {
91620 pr_cont("%s", "!!! ");
91621- atomic_inc(&n_rcu_torture_error);
91622+ atomic_inc_unchecked(&n_rcu_torture_error);
91623 WARN_ON_ONCE(1);
91624 }
91625 pr_cont("Reader Pipe: ");
91626@@ -1216,7 +1216,7 @@ rcu_torture_stats_print(void)
91627 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91628 pr_cont("Free-Block Circulation: ");
91629 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91630- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
91631+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
91632 }
91633 pr_cont("\n");
91634
91635@@ -1560,7 +1560,7 @@ rcu_torture_cleanup(void)
91636
91637 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
91638
91639- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91640+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91641 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
91642 else if (torture_onoff_failures())
91643 rcu_torture_print_module_parms(cur_ops,
91644@@ -1685,18 +1685,18 @@ rcu_torture_init(void)
91645
91646 rcu_torture_current = NULL;
91647 rcu_torture_current_version = 0;
91648- atomic_set(&n_rcu_torture_alloc, 0);
91649- atomic_set(&n_rcu_torture_alloc_fail, 0);
91650- atomic_set(&n_rcu_torture_free, 0);
91651- atomic_set(&n_rcu_torture_mberror, 0);
91652- atomic_set(&n_rcu_torture_error, 0);
91653+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
91654+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
91655+ atomic_set_unchecked(&n_rcu_torture_free, 0);
91656+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
91657+ atomic_set_unchecked(&n_rcu_torture_error, 0);
91658 n_rcu_torture_barrier_error = 0;
91659 n_rcu_torture_boost_ktrerror = 0;
91660 n_rcu_torture_boost_rterror = 0;
91661 n_rcu_torture_boost_failure = 0;
91662 n_rcu_torture_boosts = 0;
91663 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
91664- atomic_set(&rcu_torture_wcount[i], 0);
91665+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
91666 for_each_possible_cpu(cpu) {
91667 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91668 per_cpu(rcu_torture_count, cpu)[i] = 0;
91669diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
91670index 0db5649..e6ec167 100644
91671--- a/kernel/rcu/tiny.c
91672+++ b/kernel/rcu/tiny.c
91673@@ -42,7 +42,7 @@
91674 /* Forward declarations for tiny_plugin.h. */
91675 struct rcu_ctrlblk;
91676 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
91677-static void rcu_process_callbacks(struct softirq_action *unused);
91678+static void rcu_process_callbacks(void);
91679 static void __call_rcu(struct rcu_head *head,
91680 void (*func)(struct rcu_head *rcu),
91681 struct rcu_ctrlblk *rcp);
91682@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
91683 false));
91684 }
91685
91686-static void rcu_process_callbacks(struct softirq_action *unused)
91687+static __latent_entropy void rcu_process_callbacks(void)
91688 {
91689 __rcu_process_callbacks(&rcu_sched_ctrlblk);
91690 __rcu_process_callbacks(&rcu_bh_ctrlblk);
91691diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
91692index 858c565..7efd915 100644
91693--- a/kernel/rcu/tiny_plugin.h
91694+++ b/kernel/rcu/tiny_plugin.h
91695@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
91696 dump_stack();
91697 }
91698 if (*rcp->curtail && ULONG_CMP_GE(j, js))
91699- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
91700+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
91701 3 * rcu_jiffies_till_stall_check() + 3;
91702 else if (ULONG_CMP_GE(j, js))
91703- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91704+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91705 }
91706
91707 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
91708 {
91709 rcp->ticks_this_gp = 0;
91710 rcp->gp_start = jiffies;
91711- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91712+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91713 }
91714
91715 static void check_cpu_stalls(void)
91716diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
91717index 7680fc2..b8e9161 100644
91718--- a/kernel/rcu/tree.c
91719+++ b/kernel/rcu/tree.c
91720@@ -261,7 +261,7 @@ static void rcu_momentary_dyntick_idle(void)
91721 */
91722 rdtp = this_cpu_ptr(&rcu_dynticks);
91723 smp_mb__before_atomic(); /* Earlier stuff before QS. */
91724- atomic_add(2, &rdtp->dynticks); /* QS. */
91725+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
91726 smp_mb__after_atomic(); /* Later stuff after QS. */
91727 break;
91728 }
91729@@ -521,9 +521,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
91730 rcu_prepare_for_idle();
91731 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91732 smp_mb__before_atomic(); /* See above. */
91733- atomic_inc(&rdtp->dynticks);
91734+ atomic_inc_unchecked(&rdtp->dynticks);
91735 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
91736- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91737+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91738 rcu_dynticks_task_enter();
91739
91740 /*
91741@@ -644,10 +644,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
91742
91743 rcu_dynticks_task_exit();
91744 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
91745- atomic_inc(&rdtp->dynticks);
91746+ atomic_inc_unchecked(&rdtp->dynticks);
91747 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91748 smp_mb__after_atomic(); /* See above. */
91749- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91750+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91751 rcu_cleanup_after_idle();
91752 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
91753 if (!user && !is_idle_task(current)) {
91754@@ -768,14 +768,14 @@ void rcu_nmi_enter(void)
91755 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
91756
91757 if (rdtp->dynticks_nmi_nesting == 0 &&
91758- (atomic_read(&rdtp->dynticks) & 0x1))
91759+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
91760 return;
91761 rdtp->dynticks_nmi_nesting++;
91762 smp_mb__before_atomic(); /* Force delay from prior write. */
91763- atomic_inc(&rdtp->dynticks);
91764+ atomic_inc_unchecked(&rdtp->dynticks);
91765 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91766 smp_mb__after_atomic(); /* See above. */
91767- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91768+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91769 }
91770
91771 /**
91772@@ -794,9 +794,9 @@ void rcu_nmi_exit(void)
91773 return;
91774 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91775 smp_mb__before_atomic(); /* See above. */
91776- atomic_inc(&rdtp->dynticks);
91777+ atomic_inc_unchecked(&rdtp->dynticks);
91778 smp_mb__after_atomic(); /* Force delay to next write. */
91779- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91780+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91781 }
91782
91783 /**
91784@@ -809,7 +809,7 @@ void rcu_nmi_exit(void)
91785 */
91786 bool notrace __rcu_is_watching(void)
91787 {
91788- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91789+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91790 }
91791
91792 /**
91793@@ -892,7 +892,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
91794 static int dyntick_save_progress_counter(struct rcu_data *rdp,
91795 bool *isidle, unsigned long *maxj)
91796 {
91797- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
91798+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91799 rcu_sysidle_check_cpu(rdp, isidle, maxj);
91800 if ((rdp->dynticks_snap & 0x1) == 0) {
91801 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
91802@@ -921,7 +921,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91803 int *rcrmp;
91804 unsigned int snap;
91805
91806- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
91807+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91808 snap = (unsigned int)rdp->dynticks_snap;
91809
91810 /*
91811@@ -984,10 +984,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91812 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
91813 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
91814 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
91815- ACCESS_ONCE(rdp->cond_resched_completed) =
91816+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
91817 ACCESS_ONCE(rdp->mynode->completed);
91818 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
91819- ACCESS_ONCE(*rcrmp) =
91820+ ACCESS_ONCE_RW(*rcrmp) =
91821 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
91822 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
91823 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
91824@@ -1009,7 +1009,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
91825 rsp->gp_start = j;
91826 smp_wmb(); /* Record start time before stall time. */
91827 j1 = rcu_jiffies_till_stall_check();
91828- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
91829+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
91830 rsp->jiffies_resched = j + j1 / 2;
91831 }
91832
91833@@ -1050,7 +1050,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
91834 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91835 return;
91836 }
91837- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
91838+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
91839 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91840
91841 /*
91842@@ -1127,7 +1127,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
91843
91844 raw_spin_lock_irqsave(&rnp->lock, flags);
91845 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
91846- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
91847+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
91848 3 * rcu_jiffies_till_stall_check() + 3;
91849 raw_spin_unlock_irqrestore(&rnp->lock, flags);
91850
91851@@ -1211,7 +1211,7 @@ void rcu_cpu_stall_reset(void)
91852 struct rcu_state *rsp;
91853
91854 for_each_rcu_flavor(rsp)
91855- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
91856+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
91857 }
91858
91859 /*
91860@@ -1597,7 +1597,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
91861 raw_spin_unlock_irq(&rnp->lock);
91862 return 0;
91863 }
91864- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
91865+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
91866
91867 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
91868 /*
91869@@ -1638,9 +1638,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
91870 rdp = this_cpu_ptr(rsp->rda);
91871 rcu_preempt_check_blocked_tasks(rnp);
91872 rnp->qsmask = rnp->qsmaskinit;
91873- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
91874+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
91875 WARN_ON_ONCE(rnp->completed != rsp->completed);
91876- ACCESS_ONCE(rnp->completed) = rsp->completed;
91877+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
91878 if (rnp == rdp->mynode)
91879 (void)__note_gp_changes(rsp, rnp, rdp);
91880 rcu_preempt_boost_start_gp(rnp);
91881@@ -1685,7 +1685,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
91882 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
91883 raw_spin_lock_irq(&rnp->lock);
91884 smp_mb__after_unlock_lock();
91885- ACCESS_ONCE(rsp->gp_flags) =
91886+ ACCESS_ONCE_RW(rsp->gp_flags) =
91887 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
91888 raw_spin_unlock_irq(&rnp->lock);
91889 }
91890@@ -1731,7 +1731,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
91891 rcu_for_each_node_breadth_first(rsp, rnp) {
91892 raw_spin_lock_irq(&rnp->lock);
91893 smp_mb__after_unlock_lock();
91894- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
91895+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
91896 rdp = this_cpu_ptr(rsp->rda);
91897 if (rnp == rdp->mynode)
91898 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
91899@@ -1746,14 +1746,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
91900 rcu_nocb_gp_set(rnp, nocb);
91901
91902 /* Declare grace period done. */
91903- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
91904+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
91905 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
91906 rsp->fqs_state = RCU_GP_IDLE;
91907 rdp = this_cpu_ptr(rsp->rda);
91908 /* Advance CBs to reduce false positives below. */
91909 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
91910 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
91911- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91912+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91913 trace_rcu_grace_period(rsp->name,
91914 ACCESS_ONCE(rsp->gpnum),
91915 TPS("newreq"));
91916@@ -1878,7 +1878,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
91917 */
91918 return false;
91919 }
91920- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91921+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
91922 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
91923 TPS("newreq"));
91924
91925@@ -2099,7 +2099,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
91926 rsp->qlen += rdp->qlen;
91927 rdp->n_cbs_orphaned += rdp->qlen;
91928 rdp->qlen_lazy = 0;
91929- ACCESS_ONCE(rdp->qlen) = 0;
91930+ ACCESS_ONCE_RW(rdp->qlen) = 0;
91931 }
91932
91933 /*
91934@@ -2344,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
91935 }
91936 smp_mb(); /* List handling before counting for rcu_barrier(). */
91937 rdp->qlen_lazy -= count_lazy;
91938- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
91939+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
91940 rdp->n_cbs_invoked += count;
91941
91942 /* Reinstate batch limit if we have worked down the excess. */
91943@@ -2507,7 +2507,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
91944 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
91945 return; /* Someone beat us to it. */
91946 }
91947- ACCESS_ONCE(rsp->gp_flags) =
91948+ ACCESS_ONCE_RW(rsp->gp_flags) =
91949 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
91950 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
91951 rcu_gp_kthread_wake(rsp);
91952@@ -2553,7 +2553,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
91953 /*
91954 * Do RCU core processing for the current CPU.
91955 */
91956-static void rcu_process_callbacks(struct softirq_action *unused)
91957+static void rcu_process_callbacks(void)
91958 {
91959 struct rcu_state *rsp;
91960
91961@@ -2665,7 +2665,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
91962 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
91963 if (debug_rcu_head_queue(head)) {
91964 /* Probable double call_rcu(), so leak the callback. */
91965- ACCESS_ONCE(head->func) = rcu_leak_callback;
91966+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
91967 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
91968 return;
91969 }
91970@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
91971 local_irq_restore(flags);
91972 return;
91973 }
91974- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
91975+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
91976 if (lazy)
91977 rdp->qlen_lazy++;
91978 else
91979@@ -2966,11 +2966,11 @@ void synchronize_sched_expedited(void)
91980 * counter wrap on a 32-bit system. Quite a few more CPUs would of
91981 * course be required on a 64-bit system.
91982 */
91983- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
91984+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
91985 (ulong)atomic_long_read(&rsp->expedited_done) +
91986 ULONG_MAX / 8)) {
91987 synchronize_sched();
91988- atomic_long_inc(&rsp->expedited_wrap);
91989+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
91990 return;
91991 }
91992
91993@@ -2978,12 +2978,12 @@ void synchronize_sched_expedited(void)
91994 * Take a ticket. Note that atomic_inc_return() implies a
91995 * full memory barrier.
91996 */
91997- snap = atomic_long_inc_return(&rsp->expedited_start);
91998+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
91999 firstsnap = snap;
92000 if (!try_get_online_cpus()) {
92001 /* CPU hotplug operation in flight, fall back to normal GP. */
92002 wait_rcu_gp(call_rcu_sched);
92003- atomic_long_inc(&rsp->expedited_normal);
92004+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92005 return;
92006 }
92007 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
92008@@ -2996,7 +2996,7 @@ void synchronize_sched_expedited(void)
92009 for_each_cpu(cpu, cm) {
92010 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
92011
92012- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
92013+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
92014 cpumask_clear_cpu(cpu, cm);
92015 }
92016 if (cpumask_weight(cm) == 0)
92017@@ -3011,14 +3011,14 @@ void synchronize_sched_expedited(void)
92018 synchronize_sched_expedited_cpu_stop,
92019 NULL) == -EAGAIN) {
92020 put_online_cpus();
92021- atomic_long_inc(&rsp->expedited_tryfail);
92022+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
92023
92024 /* Check to see if someone else did our work for us. */
92025 s = atomic_long_read(&rsp->expedited_done);
92026 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92027 /* ensure test happens before caller kfree */
92028 smp_mb__before_atomic(); /* ^^^ */
92029- atomic_long_inc(&rsp->expedited_workdone1);
92030+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
92031 free_cpumask_var(cm);
92032 return;
92033 }
92034@@ -3028,7 +3028,7 @@ void synchronize_sched_expedited(void)
92035 udelay(trycount * num_online_cpus());
92036 } else {
92037 wait_rcu_gp(call_rcu_sched);
92038- atomic_long_inc(&rsp->expedited_normal);
92039+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92040 free_cpumask_var(cm);
92041 return;
92042 }
92043@@ -3038,7 +3038,7 @@ void synchronize_sched_expedited(void)
92044 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92045 /* ensure test happens before caller kfree */
92046 smp_mb__before_atomic(); /* ^^^ */
92047- atomic_long_inc(&rsp->expedited_workdone2);
92048+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
92049 free_cpumask_var(cm);
92050 return;
92051 }
92052@@ -3053,14 +3053,14 @@ void synchronize_sched_expedited(void)
92053 if (!try_get_online_cpus()) {
92054 /* CPU hotplug operation in flight, use normal GP. */
92055 wait_rcu_gp(call_rcu_sched);
92056- atomic_long_inc(&rsp->expedited_normal);
92057+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92058 free_cpumask_var(cm);
92059 return;
92060 }
92061- snap = atomic_long_read(&rsp->expedited_start);
92062+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
92063 smp_mb(); /* ensure read is before try_stop_cpus(). */
92064 }
92065- atomic_long_inc(&rsp->expedited_stoppedcpus);
92066+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
92067
92068 all_cpus_idle:
92069 free_cpumask_var(cm);
92070@@ -3072,16 +3072,16 @@ all_cpus_idle:
92071 * than we did already did their update.
92072 */
92073 do {
92074- atomic_long_inc(&rsp->expedited_done_tries);
92075+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
92076 s = atomic_long_read(&rsp->expedited_done);
92077 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
92078 /* ensure test happens before caller kfree */
92079 smp_mb__before_atomic(); /* ^^^ */
92080- atomic_long_inc(&rsp->expedited_done_lost);
92081+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
92082 break;
92083 }
92084 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
92085- atomic_long_inc(&rsp->expedited_done_exit);
92086+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
92087
92088 put_online_cpus();
92089 }
92090@@ -3287,7 +3287,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92091 * ACCESS_ONCE() to prevent the compiler from speculating
92092 * the increment to precede the early-exit check.
92093 */
92094- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92095+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92096 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
92097 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
92098 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
92099@@ -3342,7 +3342,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92100
92101 /* Increment ->n_barrier_done to prevent duplicate work. */
92102 smp_mb(); /* Keep increment after above mechanism. */
92103- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92104+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92105 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
92106 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
92107 smp_mb(); /* Keep increment before caller's subsequent code. */
92108@@ -3387,10 +3387,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
92109 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
92110 init_callback_list(rdp);
92111 rdp->qlen_lazy = 0;
92112- ACCESS_ONCE(rdp->qlen) = 0;
92113+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92114 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
92115 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
92116- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
92117+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
92118 rdp->cpu = cpu;
92119 rdp->rsp = rsp;
92120 rcu_boot_init_nocb_percpu_data(rdp);
92121@@ -3423,8 +3423,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
92122 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
92123 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
92124 rcu_sysidle_init_percpu_data(rdp->dynticks);
92125- atomic_set(&rdp->dynticks->dynticks,
92126- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
92127+ atomic_set_unchecked(&rdp->dynticks->dynticks,
92128+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
92129 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
92130
92131 /* Add CPU to rcu_node bitmasks. */
92132diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
92133index 8e7b184..9c55768 100644
92134--- a/kernel/rcu/tree.h
92135+++ b/kernel/rcu/tree.h
92136@@ -87,11 +87,11 @@ struct rcu_dynticks {
92137 long long dynticks_nesting; /* Track irq/process nesting level. */
92138 /* Process level is worth LLONG_MAX/2. */
92139 int dynticks_nmi_nesting; /* Track NMI nesting level. */
92140- atomic_t dynticks; /* Even value for idle, else odd. */
92141+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
92142 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
92143 long long dynticks_idle_nesting;
92144 /* irq/process nesting level from idle. */
92145- atomic_t dynticks_idle; /* Even value for idle, else odd. */
92146+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
92147 /* "Idle" excludes userspace execution. */
92148 unsigned long dynticks_idle_jiffies;
92149 /* End of last non-NMI non-idle period. */
92150@@ -466,17 +466,17 @@ struct rcu_state {
92151 /* _rcu_barrier(). */
92152 /* End of fields guarded by barrier_mutex. */
92153
92154- atomic_long_t expedited_start; /* Starting ticket. */
92155- atomic_long_t expedited_done; /* Done ticket. */
92156- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
92157- atomic_long_t expedited_tryfail; /* # acquisition failures. */
92158- atomic_long_t expedited_workdone1; /* # done by others #1. */
92159- atomic_long_t expedited_workdone2; /* # done by others #2. */
92160- atomic_long_t expedited_normal; /* # fallbacks to normal. */
92161- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
92162- atomic_long_t expedited_done_tries; /* # tries to update _done. */
92163- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
92164- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
92165+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
92166+ atomic_long_t expedited_done; /* Done ticket. */
92167+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
92168+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
92169+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
92170+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
92171+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
92172+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
92173+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
92174+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
92175+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
92176
92177 unsigned long jiffies_force_qs; /* Time at which to invoke */
92178 /* force_quiescent_state(). */
92179diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
92180index 3ec85cb..3687925 100644
92181--- a/kernel/rcu/tree_plugin.h
92182+++ b/kernel/rcu/tree_plugin.h
92183@@ -709,7 +709,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
92184 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
92185 {
92186 return !rcu_preempted_readers_exp(rnp) &&
92187- ACCESS_ONCE(rnp->expmask) == 0;
92188+ ACCESS_ONCE_RW(rnp->expmask) == 0;
92189 }
92190
92191 /*
92192@@ -870,7 +870,7 @@ void synchronize_rcu_expedited(void)
92193
92194 /* Clean up and exit. */
92195 smp_mb(); /* ensure expedited GP seen before counter increment. */
92196- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
92197+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
92198 sync_rcu_preempt_exp_count + 1;
92199 unlock_mb_ret:
92200 mutex_unlock(&sync_rcu_preempt_exp_mutex);
92201@@ -1426,7 +1426,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
92202 free_cpumask_var(cm);
92203 }
92204
92205-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
92206+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
92207 .store = &rcu_cpu_kthread_task,
92208 .thread_should_run = rcu_cpu_kthread_should_run,
92209 .thread_fn = rcu_cpu_kthread,
92210@@ -1900,7 +1900,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
92211 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
92212 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
92213 cpu, ticks_value, ticks_title,
92214- atomic_read(&rdtp->dynticks) & 0xfff,
92215+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
92216 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
92217 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
92218 fast_no_hz);
92219@@ -2044,7 +2044,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
92220 return;
92221 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
92222 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
92223- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
92224+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
92225 wake_up(&rdp_leader->nocb_wq);
92226 }
92227 }
92228@@ -2096,7 +2096,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
92229
92230 /* Enqueue the callback on the nocb list and update counts. */
92231 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
92232- ACCESS_ONCE(*old_rhpp) = rhp;
92233+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
92234 atomic_long_add(rhcount, &rdp->nocb_q_count);
92235 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
92236 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
92237@@ -2286,7 +2286,7 @@ wait_again:
92238 continue; /* No CBs here, try next follower. */
92239
92240 /* Move callbacks to wait-for-GP list, which is empty. */
92241- ACCESS_ONCE(rdp->nocb_head) = NULL;
92242+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
92243 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
92244 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
92245 rdp->nocb_gp_count_lazy =
92246@@ -2413,7 +2413,7 @@ static int rcu_nocb_kthread(void *arg)
92247 list = ACCESS_ONCE(rdp->nocb_follower_head);
92248 BUG_ON(!list);
92249 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
92250- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
92251+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
92252 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
92253 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
92254 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
92255@@ -2443,8 +2443,8 @@ static int rcu_nocb_kthread(void *arg)
92256 list = next;
92257 }
92258 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
92259- ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92260- ACCESS_ONCE(rdp->nocb_p_count_lazy) =
92261+ ACCESS_ONCE_RW(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92262+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) =
92263 rdp->nocb_p_count_lazy - cl;
92264 rdp->n_nocbs_invoked += c;
92265 }
92266@@ -2465,7 +2465,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
92267 if (!rcu_nocb_need_deferred_wakeup(rdp))
92268 return;
92269 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
92270- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92271+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92272 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
92273 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
92274 }
92275@@ -2588,7 +2588,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
92276 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
92277 "rcuo%c/%d", rsp->abbr, cpu);
92278 BUG_ON(IS_ERR(t));
92279- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
92280+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
92281 }
92282
92283 /*
92284@@ -2793,11 +2793,11 @@ static void rcu_sysidle_enter(int irq)
92285
92286 /* Record start of fully idle period. */
92287 j = jiffies;
92288- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
92289+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
92290 smp_mb__before_atomic();
92291- atomic_inc(&rdtp->dynticks_idle);
92292+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92293 smp_mb__after_atomic();
92294- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
92295+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
92296 }
92297
92298 /*
92299@@ -2868,9 +2868,9 @@ static void rcu_sysidle_exit(int irq)
92300
92301 /* Record end of idle period. */
92302 smp_mb__before_atomic();
92303- atomic_inc(&rdtp->dynticks_idle);
92304+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92305 smp_mb__after_atomic();
92306- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
92307+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
92308
92309 /*
92310 * If we are the timekeeping CPU, we are permitted to be non-idle
92311@@ -2915,7 +2915,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
92312 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
92313
92314 /* Pick up current idle and NMI-nesting counter and check. */
92315- cur = atomic_read(&rdtp->dynticks_idle);
92316+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
92317 if (cur & 0x1) {
92318 *isidle = false; /* We are not idle! */
92319 return;
92320@@ -2964,7 +2964,7 @@ static void rcu_sysidle(unsigned long j)
92321 case RCU_SYSIDLE_NOT:
92322
92323 /* First time all are idle, so note a short idle period. */
92324- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92325+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92326 break;
92327
92328 case RCU_SYSIDLE_SHORT:
92329@@ -3002,7 +3002,7 @@ static void rcu_sysidle_cancel(void)
92330 {
92331 smp_mb();
92332 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
92333- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
92334+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
92335 }
92336
92337 /*
92338@@ -3054,7 +3054,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
92339 smp_mb(); /* grace period precedes setting inuse. */
92340
92341 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
92342- ACCESS_ONCE(rshp->inuse) = 0;
92343+ ACCESS_ONCE_RW(rshp->inuse) = 0;
92344 }
92345
92346 /*
92347@@ -3207,7 +3207,7 @@ static void rcu_bind_gp_kthread(void)
92348 static void rcu_dynticks_task_enter(void)
92349 {
92350 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92351- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
92352+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
92353 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92354 }
92355
92356@@ -3215,6 +3215,6 @@ static void rcu_dynticks_task_enter(void)
92357 static void rcu_dynticks_task_exit(void)
92358 {
92359 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92360- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
92361+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
92362 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92363 }
92364diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
92365index 5cdc62e..cc52e88 100644
92366--- a/kernel/rcu/tree_trace.c
92367+++ b/kernel/rcu/tree_trace.c
92368@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
92369 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
92370 rdp->passed_quiesce, rdp->qs_pending);
92371 seq_printf(m, " dt=%d/%llx/%d df=%lu",
92372- atomic_read(&rdp->dynticks->dynticks),
92373+ atomic_read_unchecked(&rdp->dynticks->dynticks),
92374 rdp->dynticks->dynticks_nesting,
92375 rdp->dynticks->dynticks_nmi_nesting,
92376 rdp->dynticks_fqs);
92377@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
92378 struct rcu_state *rsp = (struct rcu_state *)m->private;
92379
92380 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
92381- atomic_long_read(&rsp->expedited_start),
92382+ atomic_long_read_unchecked(&rsp->expedited_start),
92383 atomic_long_read(&rsp->expedited_done),
92384- atomic_long_read(&rsp->expedited_wrap),
92385- atomic_long_read(&rsp->expedited_tryfail),
92386- atomic_long_read(&rsp->expedited_workdone1),
92387- atomic_long_read(&rsp->expedited_workdone2),
92388- atomic_long_read(&rsp->expedited_normal),
92389- atomic_long_read(&rsp->expedited_stoppedcpus),
92390- atomic_long_read(&rsp->expedited_done_tries),
92391- atomic_long_read(&rsp->expedited_done_lost),
92392- atomic_long_read(&rsp->expedited_done_exit));
92393+ atomic_long_read_unchecked(&rsp->expedited_wrap),
92394+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
92395+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
92396+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
92397+ atomic_long_read_unchecked(&rsp->expedited_normal),
92398+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
92399+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
92400+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
92401+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
92402 return 0;
92403 }
92404
92405diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
92406index e0d31a3..f4dafe3 100644
92407--- a/kernel/rcu/update.c
92408+++ b/kernel/rcu/update.c
92409@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
92410 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
92411 */
92412 if (till_stall_check < 3) {
92413- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
92414+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
92415 till_stall_check = 3;
92416 } else if (till_stall_check > 300) {
92417- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
92418+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
92419 till_stall_check = 300;
92420 }
92421 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
92422@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
92423 !ACCESS_ONCE(t->on_rq) ||
92424 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
92425 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
92426- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
92427+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
92428 list_del_init(&t->rcu_tasks_holdout_list);
92429 put_task_struct(t);
92430 return;
92431@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
92432 !is_idle_task(t)) {
92433 get_task_struct(t);
92434 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
92435- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
92436+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
92437 list_add(&t->rcu_tasks_holdout_list,
92438 &rcu_tasks_holdouts);
92439 }
92440@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
92441 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
92442 BUG_ON(IS_ERR(t));
92443 smp_mb(); /* Ensure others see full kthread. */
92444- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
92445+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
92446 mutex_unlock(&rcu_tasks_kthread_mutex);
92447 }
92448
92449diff --git a/kernel/resource.c b/kernel/resource.c
92450index 0bcebff..e7cd5b2 100644
92451--- a/kernel/resource.c
92452+++ b/kernel/resource.c
92453@@ -161,8 +161,18 @@ static const struct file_operations proc_iomem_operations = {
92454
92455 static int __init ioresources_init(void)
92456 {
92457+#ifdef CONFIG_GRKERNSEC_PROC_ADD
92458+#ifdef CONFIG_GRKERNSEC_PROC_USER
92459+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
92460+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
92461+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92462+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
92463+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
92464+#endif
92465+#else
92466 proc_create("ioports", 0, NULL, &proc_ioports_operations);
92467 proc_create("iomem", 0, NULL, &proc_iomem_operations);
92468+#endif
92469 return 0;
92470 }
92471 __initcall(ioresources_init);
92472diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
92473index eae160d..c9aa22e 100644
92474--- a/kernel/sched/auto_group.c
92475+++ b/kernel/sched/auto_group.c
92476@@ -11,7 +11,7 @@
92477
92478 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
92479 static struct autogroup autogroup_default;
92480-static atomic_t autogroup_seq_nr;
92481+static atomic_unchecked_t autogroup_seq_nr;
92482
92483 void __init autogroup_init(struct task_struct *init_task)
92484 {
92485@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
92486
92487 kref_init(&ag->kref);
92488 init_rwsem(&ag->lock);
92489- ag->id = atomic_inc_return(&autogroup_seq_nr);
92490+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
92491 ag->tg = tg;
92492 #ifdef CONFIG_RT_GROUP_SCHED
92493 /*
92494diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
92495index 607f852..486bc87 100644
92496--- a/kernel/sched/completion.c
92497+++ b/kernel/sched/completion.c
92498@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
92499 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92500 * or number of jiffies left till timeout) if completed.
92501 */
92502-long __sched
92503+long __sched __intentional_overflow(-1)
92504 wait_for_completion_interruptible_timeout(struct completion *x,
92505 unsigned long timeout)
92506 {
92507@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
92508 *
92509 * Return: -ERESTARTSYS if interrupted, 0 if completed.
92510 */
92511-int __sched wait_for_completion_killable(struct completion *x)
92512+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
92513 {
92514 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
92515 if (t == -ERESTARTSYS)
92516@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
92517 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92518 * or number of jiffies left till timeout) if completed.
92519 */
92520-long __sched
92521+long __sched __intentional_overflow(-1)
92522 wait_for_completion_killable_timeout(struct completion *x,
92523 unsigned long timeout)
92524 {
92525diff --git a/kernel/sched/core.c b/kernel/sched/core.c
92526index 44dfc8b..56d160d 100644
92527--- a/kernel/sched/core.c
92528+++ b/kernel/sched/core.c
92529@@ -1902,7 +1902,7 @@ void set_numabalancing_state(bool enabled)
92530 int sysctl_numa_balancing(struct ctl_table *table, int write,
92531 void __user *buffer, size_t *lenp, loff_t *ppos)
92532 {
92533- struct ctl_table t;
92534+ ctl_table_no_const t;
92535 int err;
92536 int state = numabalancing_enabled;
92537
92538@@ -2352,8 +2352,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
92539 next->active_mm = oldmm;
92540 atomic_inc(&oldmm->mm_count);
92541 enter_lazy_tlb(oldmm, next);
92542- } else
92543+ } else {
92544 switch_mm(oldmm, mm, next);
92545+ populate_stack();
92546+ }
92547
92548 if (!prev->mm) {
92549 prev->active_mm = NULL;
92550@@ -3152,6 +3154,8 @@ int can_nice(const struct task_struct *p, const int nice)
92551 /* convert nice value [19,-20] to rlimit style value [1,40] */
92552 int nice_rlim = nice_to_rlimit(nice);
92553
92554+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
92555+
92556 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
92557 capable(CAP_SYS_NICE));
92558 }
92559@@ -3178,7 +3182,8 @@ SYSCALL_DEFINE1(nice, int, increment)
92560 nice = task_nice(current) + increment;
92561
92562 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
92563- if (increment < 0 && !can_nice(current, nice))
92564+ if (increment < 0 && (!can_nice(current, nice) ||
92565+ gr_handle_chroot_nice()))
92566 return -EPERM;
92567
92568 retval = security_task_setnice(current, nice);
92569@@ -3473,6 +3478,7 @@ recheck:
92570 if (policy != p->policy && !rlim_rtprio)
92571 return -EPERM;
92572
92573+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
92574 /* can't increase priority */
92575 if (attr->sched_priority > p->rt_priority &&
92576 attr->sched_priority > rlim_rtprio)
92577@@ -4973,6 +4979,7 @@ void idle_task_exit(void)
92578
92579 if (mm != &init_mm) {
92580 switch_mm(mm, &init_mm, current);
92581+ populate_stack();
92582 finish_arch_post_lock_switch();
92583 }
92584 mmdrop(mm);
92585@@ -5068,7 +5075,7 @@ static void migrate_tasks(unsigned int dead_cpu)
92586
92587 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
92588
92589-static struct ctl_table sd_ctl_dir[] = {
92590+static ctl_table_no_const sd_ctl_dir[] __read_only = {
92591 {
92592 .procname = "sched_domain",
92593 .mode = 0555,
92594@@ -5085,17 +5092,17 @@ static struct ctl_table sd_ctl_root[] = {
92595 {}
92596 };
92597
92598-static struct ctl_table *sd_alloc_ctl_entry(int n)
92599+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
92600 {
92601- struct ctl_table *entry =
92602+ ctl_table_no_const *entry =
92603 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
92604
92605 return entry;
92606 }
92607
92608-static void sd_free_ctl_entry(struct ctl_table **tablep)
92609+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
92610 {
92611- struct ctl_table *entry;
92612+ ctl_table_no_const *entry;
92613
92614 /*
92615 * In the intermediate directories, both the child directory and
92616@@ -5103,22 +5110,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
92617 * will always be set. In the lowest directory the names are
92618 * static strings and all have proc handlers.
92619 */
92620- for (entry = *tablep; entry->mode; entry++) {
92621- if (entry->child)
92622- sd_free_ctl_entry(&entry->child);
92623+ for (entry = tablep; entry->mode; entry++) {
92624+ if (entry->child) {
92625+ sd_free_ctl_entry(entry->child);
92626+ pax_open_kernel();
92627+ entry->child = NULL;
92628+ pax_close_kernel();
92629+ }
92630 if (entry->proc_handler == NULL)
92631 kfree(entry->procname);
92632 }
92633
92634- kfree(*tablep);
92635- *tablep = NULL;
92636+ kfree(tablep);
92637 }
92638
92639 static int min_load_idx = 0;
92640 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
92641
92642 static void
92643-set_table_entry(struct ctl_table *entry,
92644+set_table_entry(ctl_table_no_const *entry,
92645 const char *procname, void *data, int maxlen,
92646 umode_t mode, proc_handler *proc_handler,
92647 bool load_idx)
92648@@ -5138,7 +5148,7 @@ set_table_entry(struct ctl_table *entry,
92649 static struct ctl_table *
92650 sd_alloc_ctl_domain_table(struct sched_domain *sd)
92651 {
92652- struct ctl_table *table = sd_alloc_ctl_entry(14);
92653+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
92654
92655 if (table == NULL)
92656 return NULL;
92657@@ -5176,9 +5186,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
92658 return table;
92659 }
92660
92661-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
92662+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
92663 {
92664- struct ctl_table *entry, *table;
92665+ ctl_table_no_const *entry, *table;
92666 struct sched_domain *sd;
92667 int domain_num = 0, i;
92668 char buf[32];
92669@@ -5205,11 +5215,13 @@ static struct ctl_table_header *sd_sysctl_header;
92670 static void register_sched_domain_sysctl(void)
92671 {
92672 int i, cpu_num = num_possible_cpus();
92673- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
92674+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
92675 char buf[32];
92676
92677 WARN_ON(sd_ctl_dir[0].child);
92678+ pax_open_kernel();
92679 sd_ctl_dir[0].child = entry;
92680+ pax_close_kernel();
92681
92682 if (entry == NULL)
92683 return;
92684@@ -5232,8 +5244,12 @@ static void unregister_sched_domain_sysctl(void)
92685 if (sd_sysctl_header)
92686 unregister_sysctl_table(sd_sysctl_header);
92687 sd_sysctl_header = NULL;
92688- if (sd_ctl_dir[0].child)
92689- sd_free_ctl_entry(&sd_ctl_dir[0].child);
92690+ if (sd_ctl_dir[0].child) {
92691+ sd_free_ctl_entry(sd_ctl_dir[0].child);
92692+ pax_open_kernel();
92693+ sd_ctl_dir[0].child = NULL;
92694+ pax_close_kernel();
92695+ }
92696 }
92697 #else
92698 static void register_sched_domain_sysctl(void)
92699diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
92700index fe331fc..29d620e 100644
92701--- a/kernel/sched/fair.c
92702+++ b/kernel/sched/fair.c
92703@@ -2089,7 +2089,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
92704
92705 static void reset_ptenuma_scan(struct task_struct *p)
92706 {
92707- ACCESS_ONCE(p->mm->numa_scan_seq)++;
92708+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
92709 p->mm->numa_scan_offset = 0;
92710 }
92711
92712@@ -7651,7 +7651,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
92713 * run_rebalance_domains is triggered when needed from the scheduler tick.
92714 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
92715 */
92716-static void run_rebalance_domains(struct softirq_action *h)
92717+static __latent_entropy void run_rebalance_domains(void)
92718 {
92719 struct rq *this_rq = this_rq();
92720 enum cpu_idle_type idle = this_rq->idle_balance ?
92721diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
92722index 9a2a45c..bb91ace 100644
92723--- a/kernel/sched/sched.h
92724+++ b/kernel/sched/sched.h
92725@@ -1182,7 +1182,7 @@ struct sched_class {
92726 #ifdef CONFIG_FAIR_GROUP_SCHED
92727 void (*task_move_group) (struct task_struct *p, int on_rq);
92728 #endif
92729-};
92730+} __do_const;
92731
92732 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
92733 {
92734diff --git a/kernel/seccomp.c b/kernel/seccomp.c
92735index 4ef9687..4f44028 100644
92736--- a/kernel/seccomp.c
92737+++ b/kernel/seccomp.c
92738@@ -629,7 +629,9 @@ static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd)
92739
92740 switch (action) {
92741 case SECCOMP_RET_ERRNO:
92742- /* Set the low-order 16-bits as a errno. */
92743+ /* Set low-order bits as an errno, capped at MAX_ERRNO. */
92744+ if (data > MAX_ERRNO)
92745+ data = MAX_ERRNO;
92746 syscall_set_return_value(current, task_pt_regs(current),
92747 -data, 0);
92748 goto skip;
92749diff --git a/kernel/signal.c b/kernel/signal.c
92750index 16a30529..25ad033 100644
92751--- a/kernel/signal.c
92752+++ b/kernel/signal.c
92753@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
92754
92755 int print_fatal_signals __read_mostly;
92756
92757-static void __user *sig_handler(struct task_struct *t, int sig)
92758+static __sighandler_t sig_handler(struct task_struct *t, int sig)
92759 {
92760 return t->sighand->action[sig - 1].sa.sa_handler;
92761 }
92762
92763-static int sig_handler_ignored(void __user *handler, int sig)
92764+static int sig_handler_ignored(__sighandler_t handler, int sig)
92765 {
92766 /* Is it explicitly or implicitly ignored? */
92767 return handler == SIG_IGN ||
92768@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
92769
92770 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
92771 {
92772- void __user *handler;
92773+ __sighandler_t handler;
92774
92775 handler = sig_handler(t, sig);
92776
92777@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
92778 atomic_inc(&user->sigpending);
92779 rcu_read_unlock();
92780
92781+ if (!override_rlimit)
92782+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
92783+
92784 if (override_rlimit ||
92785 atomic_read(&user->sigpending) <=
92786 task_rlimit(t, RLIMIT_SIGPENDING)) {
92787@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
92788
92789 int unhandled_signal(struct task_struct *tsk, int sig)
92790 {
92791- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
92792+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
92793 if (is_global_init(tsk))
92794 return 1;
92795 if (handler != SIG_IGN && handler != SIG_DFL)
92796@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
92797 }
92798 }
92799
92800+ /* allow glibc communication via tgkill to other threads in our
92801+ thread group */
92802+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
92803+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
92804+ && gr_handle_signal(t, sig))
92805+ return -EPERM;
92806+
92807 return security_task_kill(t, info, sig, 0);
92808 }
92809
92810@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92811 return send_signal(sig, info, p, 1);
92812 }
92813
92814-static int
92815+int
92816 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92817 {
92818 return send_signal(sig, info, t, 0);
92819@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92820 unsigned long int flags;
92821 int ret, blocked, ignored;
92822 struct k_sigaction *action;
92823+ int is_unhandled = 0;
92824
92825 spin_lock_irqsave(&t->sighand->siglock, flags);
92826 action = &t->sighand->action[sig-1];
92827@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92828 }
92829 if (action->sa.sa_handler == SIG_DFL)
92830 t->signal->flags &= ~SIGNAL_UNKILLABLE;
92831+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
92832+ is_unhandled = 1;
92833 ret = specific_send_sig_info(sig, info, t);
92834 spin_unlock_irqrestore(&t->sighand->siglock, flags);
92835
92836+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
92837+ normal operation */
92838+ if (is_unhandled) {
92839+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
92840+ gr_handle_crash(t, sig);
92841+ }
92842+
92843 return ret;
92844 }
92845
92846@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92847 ret = check_kill_permission(sig, info, p);
92848 rcu_read_unlock();
92849
92850- if (!ret && sig)
92851+ if (!ret && sig) {
92852 ret = do_send_sig_info(sig, info, p, true);
92853+ if (!ret)
92854+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
92855+ }
92856
92857 return ret;
92858 }
92859@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
92860 int error = -ESRCH;
92861
92862 rcu_read_lock();
92863- p = find_task_by_vpid(pid);
92864+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
92865+ /* allow glibc communication via tgkill to other threads in our
92866+ thread group */
92867+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
92868+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
92869+ p = find_task_by_vpid_unrestricted(pid);
92870+ else
92871+#endif
92872+ p = find_task_by_vpid(pid);
92873 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
92874 error = check_kill_permission(sig, info, p);
92875 /*
92876@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
92877 }
92878 seg = get_fs();
92879 set_fs(KERNEL_DS);
92880- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
92881- (stack_t __force __user *) &uoss,
92882+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
92883+ (stack_t __force_user *) &uoss,
92884 compat_user_stack_pointer());
92885 set_fs(seg);
92886 if (ret >= 0 && uoss_ptr) {
92887diff --git a/kernel/smpboot.c b/kernel/smpboot.c
92888index 40190f2..8861d40 100644
92889--- a/kernel/smpboot.c
92890+++ b/kernel/smpboot.c
92891@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
92892 }
92893 smpboot_unpark_thread(plug_thread, cpu);
92894 }
92895- list_add(&plug_thread->list, &hotplug_threads);
92896+ pax_list_add(&plug_thread->list, &hotplug_threads);
92897 out:
92898 mutex_unlock(&smpboot_threads_lock);
92899 put_online_cpus();
92900@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
92901 {
92902 get_online_cpus();
92903 mutex_lock(&smpboot_threads_lock);
92904- list_del(&plug_thread->list);
92905+ pax_list_del(&plug_thread->list);
92906 smpboot_destroy_threads(plug_thread);
92907 mutex_unlock(&smpboot_threads_lock);
92908 put_online_cpus();
92909diff --git a/kernel/softirq.c b/kernel/softirq.c
92910index c497fcd..e8f90a9 100644
92911--- a/kernel/softirq.c
92912+++ b/kernel/softirq.c
92913@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
92914 EXPORT_SYMBOL(irq_stat);
92915 #endif
92916
92917-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
92918+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
92919
92920 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
92921
92922@@ -266,7 +266,7 @@ restart:
92923 kstat_incr_softirqs_this_cpu(vec_nr);
92924
92925 trace_softirq_entry(vec_nr);
92926- h->action(h);
92927+ h->action();
92928 trace_softirq_exit(vec_nr);
92929 if (unlikely(prev_count != preempt_count())) {
92930 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
92931@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
92932 or_softirq_pending(1UL << nr);
92933 }
92934
92935-void open_softirq(int nr, void (*action)(struct softirq_action *))
92936+void __init open_softirq(int nr, void (*action)(void))
92937 {
92938 softirq_vec[nr].action = action;
92939 }
92940@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
92941 }
92942 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
92943
92944-static void tasklet_action(struct softirq_action *a)
92945+static void tasklet_action(void)
92946 {
92947 struct tasklet_struct *list;
92948
92949@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
92950 }
92951 }
92952
92953-static void tasklet_hi_action(struct softirq_action *a)
92954+static __latent_entropy void tasklet_hi_action(void)
92955 {
92956 struct tasklet_struct *list;
92957
92958@@ -745,7 +745,7 @@ static struct notifier_block cpu_nfb = {
92959 .notifier_call = cpu_callback
92960 };
92961
92962-static struct smp_hotplug_thread softirq_threads = {
92963+static struct smp_hotplug_thread softirq_threads __read_only = {
92964 .store = &ksoftirqd,
92965 .thread_should_run = ksoftirqd_should_run,
92966 .thread_fn = run_ksoftirqd,
92967diff --git a/kernel/sys.c b/kernel/sys.c
92968index ea9c881..2194af5 100644
92969--- a/kernel/sys.c
92970+++ b/kernel/sys.c
92971@@ -154,6 +154,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
92972 error = -EACCES;
92973 goto out;
92974 }
92975+
92976+ if (gr_handle_chroot_setpriority(p, niceval)) {
92977+ error = -EACCES;
92978+ goto out;
92979+ }
92980+
92981 no_nice = security_task_setnice(p, niceval);
92982 if (no_nice) {
92983 error = no_nice;
92984@@ -359,6 +365,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
92985 goto error;
92986 }
92987
92988+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
92989+ goto error;
92990+
92991+ if (!gid_eq(new->gid, old->gid)) {
92992+ /* make sure we generate a learn log for what will
92993+ end up being a role transition after a full-learning
92994+ policy is generated
92995+ CAP_SETGID is required to perform a transition
92996+ we may not log a CAP_SETGID check above, e.g.
92997+ in the case where new rgid = old egid
92998+ */
92999+ gr_learn_cap(current, new, CAP_SETGID);
93000+ }
93001+
93002 if (rgid != (gid_t) -1 ||
93003 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
93004 new->sgid = new->egid;
93005@@ -394,6 +414,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
93006 old = current_cred();
93007
93008 retval = -EPERM;
93009+
93010+ if (gr_check_group_change(kgid, kgid, kgid))
93011+ goto error;
93012+
93013 if (ns_capable(old->user_ns, CAP_SETGID))
93014 new->gid = new->egid = new->sgid = new->fsgid = kgid;
93015 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
93016@@ -411,7 +435,7 @@ error:
93017 /*
93018 * change the user struct in a credentials set to match the new UID
93019 */
93020-static int set_user(struct cred *new)
93021+int set_user(struct cred *new)
93022 {
93023 struct user_struct *new_user;
93024
93025@@ -491,7 +515,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
93026 goto error;
93027 }
93028
93029+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
93030+ goto error;
93031+
93032 if (!uid_eq(new->uid, old->uid)) {
93033+ /* make sure we generate a learn log for what will
93034+ end up being a role transition after a full-learning
93035+ policy is generated
93036+ CAP_SETUID is required to perform a transition
93037+ we may not log a CAP_SETUID check above, e.g.
93038+ in the case where new ruid = old euid
93039+ */
93040+ gr_learn_cap(current, new, CAP_SETUID);
93041 retval = set_user(new);
93042 if (retval < 0)
93043 goto error;
93044@@ -541,6 +576,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
93045 old = current_cred();
93046
93047 retval = -EPERM;
93048+
93049+ if (gr_check_crash_uid(kuid))
93050+ goto error;
93051+ if (gr_check_user_change(kuid, kuid, kuid))
93052+ goto error;
93053+
93054 if (ns_capable(old->user_ns, CAP_SETUID)) {
93055 new->suid = new->uid = kuid;
93056 if (!uid_eq(kuid, old->uid)) {
93057@@ -610,6 +651,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
93058 goto error;
93059 }
93060
93061+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
93062+ goto error;
93063+
93064 if (ruid != (uid_t) -1) {
93065 new->uid = kruid;
93066 if (!uid_eq(kruid, old->uid)) {
93067@@ -694,6 +738,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
93068 goto error;
93069 }
93070
93071+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
93072+ goto error;
93073+
93074 if (rgid != (gid_t) -1)
93075 new->gid = krgid;
93076 if (egid != (gid_t) -1)
93077@@ -758,12 +805,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
93078 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
93079 ns_capable(old->user_ns, CAP_SETUID)) {
93080 if (!uid_eq(kuid, old->fsuid)) {
93081+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
93082+ goto error;
93083+
93084 new->fsuid = kuid;
93085 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
93086 goto change_okay;
93087 }
93088 }
93089
93090+error:
93091 abort_creds(new);
93092 return old_fsuid;
93093
93094@@ -796,12 +847,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
93095 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
93096 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
93097 ns_capable(old->user_ns, CAP_SETGID)) {
93098+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
93099+ goto error;
93100+
93101 if (!gid_eq(kgid, old->fsgid)) {
93102 new->fsgid = kgid;
93103 goto change_okay;
93104 }
93105 }
93106
93107+error:
93108 abort_creds(new);
93109 return old_fsgid;
93110
93111@@ -1178,19 +1233,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
93112 return -EFAULT;
93113
93114 down_read(&uts_sem);
93115- error = __copy_to_user(&name->sysname, &utsname()->sysname,
93116+ error = __copy_to_user(name->sysname, &utsname()->sysname,
93117 __OLD_UTS_LEN);
93118 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
93119- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
93120+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
93121 __OLD_UTS_LEN);
93122 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
93123- error |= __copy_to_user(&name->release, &utsname()->release,
93124+ error |= __copy_to_user(name->release, &utsname()->release,
93125 __OLD_UTS_LEN);
93126 error |= __put_user(0, name->release + __OLD_UTS_LEN);
93127- error |= __copy_to_user(&name->version, &utsname()->version,
93128+ error |= __copy_to_user(name->version, &utsname()->version,
93129 __OLD_UTS_LEN);
93130 error |= __put_user(0, name->version + __OLD_UTS_LEN);
93131- error |= __copy_to_user(&name->machine, &utsname()->machine,
93132+ error |= __copy_to_user(name->machine, &utsname()->machine,
93133 __OLD_UTS_LEN);
93134 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
93135 up_read(&uts_sem);
93136@@ -1391,6 +1446,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
93137 */
93138 new_rlim->rlim_cur = 1;
93139 }
93140+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
93141+ is changed to a lower value. Since tasks can be created by the same
93142+ user in between this limit change and an execve by this task, force
93143+ a recheck only for this task by setting PF_NPROC_EXCEEDED
93144+ */
93145+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
93146+ tsk->flags |= PF_NPROC_EXCEEDED;
93147 }
93148 if (!retval) {
93149 if (old_rlim)
93150diff --git a/kernel/sysctl.c b/kernel/sysctl.c
93151index 88ea2d6..88acc77 100644
93152--- a/kernel/sysctl.c
93153+++ b/kernel/sysctl.c
93154@@ -94,7 +94,6 @@
93155
93156
93157 #if defined(CONFIG_SYSCTL)
93158-
93159 /* External variables not in a header file. */
93160 extern int max_threads;
93161 extern int suid_dumpable;
93162@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
93163
93164 /* Constants used for minimum and maximum */
93165 #ifdef CONFIG_LOCKUP_DETECTOR
93166-static int sixty = 60;
93167+static int sixty __read_only = 60;
93168 #endif
93169
93170-static int __maybe_unused neg_one = -1;
93171+static int __maybe_unused neg_one __read_only = -1;
93172
93173-static int zero;
93174-static int __maybe_unused one = 1;
93175-static int __maybe_unused two = 2;
93176-static int __maybe_unused four = 4;
93177-static unsigned long one_ul = 1;
93178-static int one_hundred = 100;
93179+static int zero __read_only = 0;
93180+static int __maybe_unused one __read_only = 1;
93181+static int __maybe_unused two __read_only = 2;
93182+static int __maybe_unused three __read_only = 3;
93183+static int __maybe_unused four __read_only = 4;
93184+static unsigned long one_ul __read_only = 1;
93185+static int one_hundred __read_only = 100;
93186 #ifdef CONFIG_PRINTK
93187-static int ten_thousand = 10000;
93188+static int ten_thousand __read_only = 10000;
93189 #endif
93190
93191 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
93192@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
93193 void __user *buffer, size_t *lenp, loff_t *ppos);
93194 #endif
93195
93196-#ifdef CONFIG_PRINTK
93197 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93198 void __user *buffer, size_t *lenp, loff_t *ppos);
93199-#endif
93200
93201 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
93202 void __user *buffer, size_t *lenp, loff_t *ppos);
93203@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
93204
93205 #endif
93206
93207+extern struct ctl_table grsecurity_table[];
93208+
93209 static struct ctl_table kern_table[];
93210 static struct ctl_table vm_table[];
93211 static struct ctl_table fs_table[];
93212@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
93213 int sysctl_legacy_va_layout;
93214 #endif
93215
93216+#ifdef CONFIG_PAX_SOFTMODE
93217+static struct ctl_table pax_table[] = {
93218+ {
93219+ .procname = "softmode",
93220+ .data = &pax_softmode,
93221+ .maxlen = sizeof(unsigned int),
93222+ .mode = 0600,
93223+ .proc_handler = &proc_dointvec,
93224+ },
93225+
93226+ { }
93227+};
93228+#endif
93229+
93230 /* The default sysctl tables: */
93231
93232 static struct ctl_table sysctl_base_table[] = {
93233@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
93234 #endif
93235
93236 static struct ctl_table kern_table[] = {
93237+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
93238+ {
93239+ .procname = "grsecurity",
93240+ .mode = 0500,
93241+ .child = grsecurity_table,
93242+ },
93243+#endif
93244+
93245+#ifdef CONFIG_PAX_SOFTMODE
93246+ {
93247+ .procname = "pax",
93248+ .mode = 0500,
93249+ .child = pax_table,
93250+ },
93251+#endif
93252+
93253 {
93254 .procname = "sched_child_runs_first",
93255 .data = &sysctl_sched_child_runs_first,
93256@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
93257 .data = &modprobe_path,
93258 .maxlen = KMOD_PATH_LEN,
93259 .mode = 0644,
93260- .proc_handler = proc_dostring,
93261+ .proc_handler = proc_dostring_modpriv,
93262 },
93263 {
93264 .procname = "modules_disabled",
93265@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
93266 .extra1 = &zero,
93267 .extra2 = &one,
93268 },
93269+#endif
93270 {
93271 .procname = "kptr_restrict",
93272 .data = &kptr_restrict,
93273 .maxlen = sizeof(int),
93274 .mode = 0644,
93275 .proc_handler = proc_dointvec_minmax_sysadmin,
93276+#ifdef CONFIG_GRKERNSEC_HIDESYM
93277+ .extra1 = &two,
93278+#else
93279 .extra1 = &zero,
93280+#endif
93281 .extra2 = &two,
93282 },
93283-#endif
93284 {
93285 .procname = "ngroups_max",
93286 .data = &ngroups_max,
93287@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
93288 */
93289 {
93290 .procname = "perf_event_paranoid",
93291- .data = &sysctl_perf_event_paranoid,
93292- .maxlen = sizeof(sysctl_perf_event_paranoid),
93293+ .data = &sysctl_perf_event_legitimately_concerned,
93294+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
93295 .mode = 0644,
93296- .proc_handler = proc_dointvec,
93297+ /* go ahead, be a hero */
93298+ .proc_handler = proc_dointvec_minmax_sysadmin,
93299+ .extra1 = &neg_one,
93300+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
93301+ .extra2 = &three,
93302+#else
93303+ .extra2 = &two,
93304+#endif
93305 },
93306 {
93307 .procname = "perf_event_mlock_kb",
93308@@ -1340,6 +1381,13 @@ static struct ctl_table vm_table[] = {
93309 .proc_handler = proc_dointvec_minmax,
93310 .extra1 = &zero,
93311 },
93312+ {
93313+ .procname = "heap_stack_gap",
93314+ .data = &sysctl_heap_stack_gap,
93315+ .maxlen = sizeof(sysctl_heap_stack_gap),
93316+ .mode = 0644,
93317+ .proc_handler = proc_doulongvec_minmax,
93318+ },
93319 #else
93320 {
93321 .procname = "nr_trim_pages",
93322@@ -1822,6 +1870,16 @@ int proc_dostring(struct ctl_table *table, int write,
93323 (char __user *)buffer, lenp, ppos);
93324 }
93325
93326+int proc_dostring_modpriv(struct ctl_table *table, int write,
93327+ void __user *buffer, size_t *lenp, loff_t *ppos)
93328+{
93329+ if (write && !capable(CAP_SYS_MODULE))
93330+ return -EPERM;
93331+
93332+ return _proc_do_string(table->data, table->maxlen, write,
93333+ buffer, lenp, ppos);
93334+}
93335+
93336 static size_t proc_skip_spaces(char **buf)
93337 {
93338 size_t ret;
93339@@ -1927,6 +1985,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
93340 len = strlen(tmp);
93341 if (len > *size)
93342 len = *size;
93343+ if (len > sizeof(tmp))
93344+ len = sizeof(tmp);
93345 if (copy_to_user(*buf, tmp, len))
93346 return -EFAULT;
93347 *size -= len;
93348@@ -2104,7 +2164,7 @@ int proc_dointvec(struct ctl_table *table, int write,
93349 static int proc_taint(struct ctl_table *table, int write,
93350 void __user *buffer, size_t *lenp, loff_t *ppos)
93351 {
93352- struct ctl_table t;
93353+ ctl_table_no_const t;
93354 unsigned long tmptaint = get_taint();
93355 int err;
93356
93357@@ -2132,7 +2192,6 @@ static int proc_taint(struct ctl_table *table, int write,
93358 return err;
93359 }
93360
93361-#ifdef CONFIG_PRINTK
93362 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93363 void __user *buffer, size_t *lenp, loff_t *ppos)
93364 {
93365@@ -2141,7 +2200,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93366
93367 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
93368 }
93369-#endif
93370
93371 struct do_proc_dointvec_minmax_conv_param {
93372 int *min;
93373@@ -2701,6 +2759,12 @@ int proc_dostring(struct ctl_table *table, int write,
93374 return -ENOSYS;
93375 }
93376
93377+int proc_dostring_modpriv(struct ctl_table *table, int write,
93378+ void __user *buffer, size_t *lenp, loff_t *ppos)
93379+{
93380+ return -ENOSYS;
93381+}
93382+
93383 int proc_dointvec(struct ctl_table *table, int write,
93384 void __user *buffer, size_t *lenp, loff_t *ppos)
93385 {
93386@@ -2757,5 +2821,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
93387 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
93388 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
93389 EXPORT_SYMBOL(proc_dostring);
93390+EXPORT_SYMBOL(proc_dostring_modpriv);
93391 EXPORT_SYMBOL(proc_doulongvec_minmax);
93392 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
93393diff --git a/kernel/taskstats.c b/kernel/taskstats.c
93394index 670fff8..a247812 100644
93395--- a/kernel/taskstats.c
93396+++ b/kernel/taskstats.c
93397@@ -28,9 +28,12 @@
93398 #include <linux/fs.h>
93399 #include <linux/file.h>
93400 #include <linux/pid_namespace.h>
93401+#include <linux/grsecurity.h>
93402 #include <net/genetlink.h>
93403 #include <linux/atomic.h>
93404
93405+extern int gr_is_taskstats_denied(int pid);
93406+
93407 /*
93408 * Maximum length of a cpumask that can be specified in
93409 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
93410@@ -576,6 +579,9 @@ err:
93411
93412 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
93413 {
93414+ if (gr_is_taskstats_denied(current->pid))
93415+ return -EACCES;
93416+
93417 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
93418 return cmd_attr_register_cpumask(info);
93419 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
93420diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
93421index a7077d3..dd48a49 100644
93422--- a/kernel/time/alarmtimer.c
93423+++ b/kernel/time/alarmtimer.c
93424@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
93425 struct platform_device *pdev;
93426 int error = 0;
93427 int i;
93428- struct k_clock alarm_clock = {
93429+ static struct k_clock alarm_clock = {
93430 .clock_getres = alarm_clock_getres,
93431 .clock_get = alarm_clock_get,
93432 .timer_create = alarm_timer_create,
93433diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
93434index d8c724c..6b331a4 100644
93435--- a/kernel/time/hrtimer.c
93436+++ b/kernel/time/hrtimer.c
93437@@ -1399,7 +1399,7 @@ void hrtimer_peek_ahead_timers(void)
93438 local_irq_restore(flags);
93439 }
93440
93441-static void run_hrtimer_softirq(struct softirq_action *h)
93442+static __latent_entropy void run_hrtimer_softirq(void)
93443 {
93444 hrtimer_peek_ahead_timers();
93445 }
93446diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
93447index a16b678..8c5bd9d 100644
93448--- a/kernel/time/posix-cpu-timers.c
93449+++ b/kernel/time/posix-cpu-timers.c
93450@@ -1450,14 +1450,14 @@ struct k_clock clock_posix_cpu = {
93451
93452 static __init int init_posix_cpu_timers(void)
93453 {
93454- struct k_clock process = {
93455+ static struct k_clock process = {
93456 .clock_getres = process_cpu_clock_getres,
93457 .clock_get = process_cpu_clock_get,
93458 .timer_create = process_cpu_timer_create,
93459 .nsleep = process_cpu_nsleep,
93460 .nsleep_restart = process_cpu_nsleep_restart,
93461 };
93462- struct k_clock thread = {
93463+ static struct k_clock thread = {
93464 .clock_getres = thread_cpu_clock_getres,
93465 .clock_get = thread_cpu_clock_get,
93466 .timer_create = thread_cpu_timer_create,
93467diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
93468index 31ea01f..7fc61ef 100644
93469--- a/kernel/time/posix-timers.c
93470+++ b/kernel/time/posix-timers.c
93471@@ -43,6 +43,7 @@
93472 #include <linux/hash.h>
93473 #include <linux/posix-clock.h>
93474 #include <linux/posix-timers.h>
93475+#include <linux/grsecurity.h>
93476 #include <linux/syscalls.h>
93477 #include <linux/wait.h>
93478 #include <linux/workqueue.h>
93479@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
93480 * which we beg off on and pass to do_sys_settimeofday().
93481 */
93482
93483-static struct k_clock posix_clocks[MAX_CLOCKS];
93484+static struct k_clock *posix_clocks[MAX_CLOCKS];
93485
93486 /*
93487 * These ones are defined below.
93488@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
93489 */
93490 static __init int init_posix_timers(void)
93491 {
93492- struct k_clock clock_realtime = {
93493+ static struct k_clock clock_realtime = {
93494 .clock_getres = hrtimer_get_res,
93495 .clock_get = posix_clock_realtime_get,
93496 .clock_set = posix_clock_realtime_set,
93497@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
93498 .timer_get = common_timer_get,
93499 .timer_del = common_timer_del,
93500 };
93501- struct k_clock clock_monotonic = {
93502+ static struct k_clock clock_monotonic = {
93503 .clock_getres = hrtimer_get_res,
93504 .clock_get = posix_ktime_get_ts,
93505 .nsleep = common_nsleep,
93506@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
93507 .timer_get = common_timer_get,
93508 .timer_del = common_timer_del,
93509 };
93510- struct k_clock clock_monotonic_raw = {
93511+ static struct k_clock clock_monotonic_raw = {
93512 .clock_getres = hrtimer_get_res,
93513 .clock_get = posix_get_monotonic_raw,
93514 };
93515- struct k_clock clock_realtime_coarse = {
93516+ static struct k_clock clock_realtime_coarse = {
93517 .clock_getres = posix_get_coarse_res,
93518 .clock_get = posix_get_realtime_coarse,
93519 };
93520- struct k_clock clock_monotonic_coarse = {
93521+ static struct k_clock clock_monotonic_coarse = {
93522 .clock_getres = posix_get_coarse_res,
93523 .clock_get = posix_get_monotonic_coarse,
93524 };
93525- struct k_clock clock_tai = {
93526+ static struct k_clock clock_tai = {
93527 .clock_getres = hrtimer_get_res,
93528 .clock_get = posix_get_tai,
93529 .nsleep = common_nsleep,
93530@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
93531 .timer_get = common_timer_get,
93532 .timer_del = common_timer_del,
93533 };
93534- struct k_clock clock_boottime = {
93535+ static struct k_clock clock_boottime = {
93536 .clock_getres = hrtimer_get_res,
93537 .clock_get = posix_get_boottime,
93538 .nsleep = common_nsleep,
93539@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
93540 return;
93541 }
93542
93543- posix_clocks[clock_id] = *new_clock;
93544+ posix_clocks[clock_id] = new_clock;
93545 }
93546 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
93547
93548@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
93549 return (id & CLOCKFD_MASK) == CLOCKFD ?
93550 &clock_posix_dynamic : &clock_posix_cpu;
93551
93552- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
93553+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
93554 return NULL;
93555- return &posix_clocks[id];
93556+ return posix_clocks[id];
93557 }
93558
93559 static int common_timer_create(struct k_itimer *new_timer)
93560@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
93561 struct k_clock *kc = clockid_to_kclock(which_clock);
93562 struct k_itimer *new_timer;
93563 int error, new_timer_id;
93564- sigevent_t event;
93565+ sigevent_t event = { };
93566 int it_id_set = IT_ID_NOT_SET;
93567
93568 if (!kc)
93569@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93570 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93571 return -EFAULT;
93572
93573+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93574+ have their clock_set fptr set to a nosettime dummy function
93575+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93576+ call common_clock_set, which calls do_sys_settimeofday, which
93577+ we hook
93578+ */
93579+
93580 return kc->clock_set(which_clock, &new_tp);
93581 }
93582
93583diff --git a/kernel/time/time.c b/kernel/time/time.c
93584index 2c85b77..6530536 100644
93585--- a/kernel/time/time.c
93586+++ b/kernel/time/time.c
93587@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
93588 return error;
93589
93590 if (tz) {
93591+ /* we log in do_settimeofday called below, so don't log twice
93592+ */
93593+ if (!tv)
93594+ gr_log_timechange();
93595+
93596 sys_tz = *tz;
93597 update_vsyscall_tz();
93598 if (firsttime) {
93599diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
93600index 6a93185..288c331 100644
93601--- a/kernel/time/timekeeping.c
93602+++ b/kernel/time/timekeeping.c
93603@@ -15,6 +15,7 @@
93604 #include <linux/init.h>
93605 #include <linux/mm.h>
93606 #include <linux/sched.h>
93607+#include <linux/grsecurity.h>
93608 #include <linux/syscore_ops.h>
93609 #include <linux/clocksource.h>
93610 #include <linux/jiffies.h>
93611@@ -775,6 +776,8 @@ int do_settimeofday64(const struct timespec64 *ts)
93612 if (!timespec64_valid_strict(ts))
93613 return -EINVAL;
93614
93615+ gr_log_timechange();
93616+
93617 raw_spin_lock_irqsave(&timekeeper_lock, flags);
93618 write_seqcount_begin(&tk_core.seq);
93619
93620diff --git a/kernel/time/timer.c b/kernel/time/timer.c
93621index 2d3f5c5..7ed7dc5 100644
93622--- a/kernel/time/timer.c
93623+++ b/kernel/time/timer.c
93624@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
93625 /*
93626 * This function runs timers and the timer-tq in bottom half context.
93627 */
93628-static void run_timer_softirq(struct softirq_action *h)
93629+static __latent_entropy void run_timer_softirq(void)
93630 {
93631 struct tvec_base *base = __this_cpu_read(tvec_bases);
93632
93633@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
93634 *
93635 * In all cases the return value is guaranteed to be non-negative.
93636 */
93637-signed long __sched schedule_timeout(signed long timeout)
93638+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
93639 {
93640 struct timer_list timer;
93641 unsigned long expire;
93642diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
93643index 61ed862..3b52c65 100644
93644--- a/kernel/time/timer_list.c
93645+++ b/kernel/time/timer_list.c
93646@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
93647
93648 static void print_name_offset(struct seq_file *m, void *sym)
93649 {
93650+#ifdef CONFIG_GRKERNSEC_HIDESYM
93651+ SEQ_printf(m, "<%p>", NULL);
93652+#else
93653 char symname[KSYM_NAME_LEN];
93654
93655 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
93656 SEQ_printf(m, "<%pK>", sym);
93657 else
93658 SEQ_printf(m, "%s", symname);
93659+#endif
93660 }
93661
93662 static void
93663@@ -119,7 +123,11 @@ next_one:
93664 static void
93665 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
93666 {
93667+#ifdef CONFIG_GRKERNSEC_HIDESYM
93668+ SEQ_printf(m, " .base: %p\n", NULL);
93669+#else
93670 SEQ_printf(m, " .base: %pK\n", base);
93671+#endif
93672 SEQ_printf(m, " .index: %d\n",
93673 base->index);
93674 SEQ_printf(m, " .resolution: %Lu nsecs\n",
93675@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
93676 {
93677 struct proc_dir_entry *pe;
93678
93679+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93680+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
93681+#else
93682 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
93683+#endif
93684 if (!pe)
93685 return -ENOMEM;
93686 return 0;
93687diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
93688index 1fb08f2..ca4bb1e 100644
93689--- a/kernel/time/timer_stats.c
93690+++ b/kernel/time/timer_stats.c
93691@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
93692 static unsigned long nr_entries;
93693 static struct entry entries[MAX_ENTRIES];
93694
93695-static atomic_t overflow_count;
93696+static atomic_unchecked_t overflow_count;
93697
93698 /*
93699 * The entries are in a hash-table, for fast lookup:
93700@@ -140,7 +140,7 @@ static void reset_entries(void)
93701 nr_entries = 0;
93702 memset(entries, 0, sizeof(entries));
93703 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
93704- atomic_set(&overflow_count, 0);
93705+ atomic_set_unchecked(&overflow_count, 0);
93706 }
93707
93708 static struct entry *alloc_entry(void)
93709@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93710 if (likely(entry))
93711 entry->count++;
93712 else
93713- atomic_inc(&overflow_count);
93714+ atomic_inc_unchecked(&overflow_count);
93715
93716 out_unlock:
93717 raw_spin_unlock_irqrestore(lock, flags);
93718@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93719
93720 static void print_name_offset(struct seq_file *m, unsigned long addr)
93721 {
93722+#ifdef CONFIG_GRKERNSEC_HIDESYM
93723+ seq_printf(m, "<%p>", NULL);
93724+#else
93725 char symname[KSYM_NAME_LEN];
93726
93727 if (lookup_symbol_name(addr, symname) < 0)
93728- seq_printf(m, "<%p>", (void *)addr);
93729+ seq_printf(m, "<%pK>", (void *)addr);
93730 else
93731 seq_printf(m, "%s", symname);
93732+#endif
93733 }
93734
93735 static int tstats_show(struct seq_file *m, void *v)
93736@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
93737
93738 seq_puts(m, "Timer Stats Version: v0.3\n");
93739 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
93740- if (atomic_read(&overflow_count))
93741- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
93742+ if (atomic_read_unchecked(&overflow_count))
93743+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
93744 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
93745
93746 for (i = 0; i < nr_entries; i++) {
93747@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
93748 {
93749 struct proc_dir_entry *pe;
93750
93751+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93752+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
93753+#else
93754 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
93755+#endif
93756 if (!pe)
93757 return -ENOMEM;
93758 return 0;
93759diff --git a/kernel/torture.c b/kernel/torture.c
93760index dd70993..0bf694b 100644
93761--- a/kernel/torture.c
93762+++ b/kernel/torture.c
93763@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
93764 mutex_lock(&fullstop_mutex);
93765 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
93766 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
93767- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
93768+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
93769 } else {
93770 pr_warn("Concurrent rmmod and shutdown illegal!\n");
93771 }
93772@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
93773 if (!torture_must_stop()) {
93774 if (stutter > 1) {
93775 schedule_timeout_interruptible(stutter - 1);
93776- ACCESS_ONCE(stutter_pause_test) = 2;
93777+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
93778 }
93779 schedule_timeout_interruptible(1);
93780- ACCESS_ONCE(stutter_pause_test) = 1;
93781+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
93782 }
93783 if (!torture_must_stop())
93784 schedule_timeout_interruptible(stutter);
93785- ACCESS_ONCE(stutter_pause_test) = 0;
93786+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
93787 torture_shutdown_absorb("torture_stutter");
93788 } while (!torture_must_stop());
93789 torture_kthread_stopping("torture_stutter");
93790@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
93791 schedule_timeout_uninterruptible(10);
93792 return true;
93793 }
93794- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
93795+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
93796 mutex_unlock(&fullstop_mutex);
93797 torture_shutdown_cleanup();
93798 torture_shuffle_cleanup();
93799diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
93800index 483cecf..ac46091 100644
93801--- a/kernel/trace/blktrace.c
93802+++ b/kernel/trace/blktrace.c
93803@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
93804 struct blk_trace *bt = filp->private_data;
93805 char buf[16];
93806
93807- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
93808+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
93809
93810 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
93811 }
93812@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
93813 return 1;
93814
93815 bt = buf->chan->private_data;
93816- atomic_inc(&bt->dropped);
93817+ atomic_inc_unchecked(&bt->dropped);
93818 return 0;
93819 }
93820
93821@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
93822
93823 bt->dir = dir;
93824 bt->dev = dev;
93825- atomic_set(&bt->dropped, 0);
93826+ atomic_set_unchecked(&bt->dropped, 0);
93827 INIT_LIST_HEAD(&bt->running_list);
93828
93829 ret = -EIO;
93830diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
93831index af5bffd..57664b8 100644
93832--- a/kernel/trace/ftrace.c
93833+++ b/kernel/trace/ftrace.c
93834@@ -2382,12 +2382,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
93835 if (unlikely(ftrace_disabled))
93836 return 0;
93837
93838+ ret = ftrace_arch_code_modify_prepare();
93839+ FTRACE_WARN_ON(ret);
93840+ if (ret)
93841+ return 0;
93842+
93843 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
93844+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
93845 if (ret) {
93846 ftrace_bug(ret, rec);
93847- return 0;
93848 }
93849- return 1;
93850+ return ret ? 0 : 1;
93851 }
93852
93853 /*
93854@@ -4776,8 +4781,10 @@ static int ftrace_process_locs(struct module *mod,
93855 if (!count)
93856 return 0;
93857
93858+ pax_open_kernel();
93859 sort(start, count, sizeof(*start),
93860 ftrace_cmp_ips, ftrace_swap_ips);
93861+ pax_close_kernel();
93862
93863 start_pg = ftrace_allocate_pages(count);
93864 if (!start_pg)
93865@@ -5653,7 +5660,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
93866
93867 if (t->ret_stack == NULL) {
93868 atomic_set(&t->tracing_graph_pause, 0);
93869- atomic_set(&t->trace_overrun, 0);
93870+ atomic_set_unchecked(&t->trace_overrun, 0);
93871 t->curr_ret_stack = -1;
93872 /* Make sure the tasks see the -1 first: */
93873 smp_wmb();
93874@@ -5876,7 +5883,7 @@ static void
93875 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
93876 {
93877 atomic_set(&t->tracing_graph_pause, 0);
93878- atomic_set(&t->trace_overrun, 0);
93879+ atomic_set_unchecked(&t->trace_overrun, 0);
93880 t->ftrace_timestamp = 0;
93881 /* make curr_ret_stack visible before we add the ret_stack */
93882 smp_wmb();
93883diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
93884index d2e151c..b68c835 100644
93885--- a/kernel/trace/ring_buffer.c
93886+++ b/kernel/trace/ring_buffer.c
93887@@ -350,9 +350,9 @@ struct buffer_data_page {
93888 */
93889 struct buffer_page {
93890 struct list_head list; /* list of buffer pages */
93891- local_t write; /* index for next write */
93892+ local_unchecked_t write; /* index for next write */
93893 unsigned read; /* index for next read */
93894- local_t entries; /* entries on this page */
93895+ local_unchecked_t entries; /* entries on this page */
93896 unsigned long real_end; /* real end of data */
93897 struct buffer_data_page *page; /* Actual data page */
93898 };
93899@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
93900 unsigned long last_overrun;
93901 local_t entries_bytes;
93902 local_t entries;
93903- local_t overrun;
93904- local_t commit_overrun;
93905+ local_unchecked_t overrun;
93906+ local_unchecked_t commit_overrun;
93907 local_t dropped_events;
93908 local_t committing;
93909 local_t commits;
93910@@ -1047,8 +1047,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
93911 *
93912 * We add a counter to the write field to denote this.
93913 */
93914- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
93915- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
93916+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
93917+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
93918
93919 /*
93920 * Just make sure we have seen our old_write and synchronize
93921@@ -1076,8 +1076,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
93922 * cmpxchg to only update if an interrupt did not already
93923 * do it for us. If the cmpxchg fails, we don't care.
93924 */
93925- (void)local_cmpxchg(&next_page->write, old_write, val);
93926- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
93927+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
93928+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
93929
93930 /*
93931 * No need to worry about races with clearing out the commit.
93932@@ -1445,12 +1445,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
93933
93934 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
93935 {
93936- return local_read(&bpage->entries) & RB_WRITE_MASK;
93937+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
93938 }
93939
93940 static inline unsigned long rb_page_write(struct buffer_page *bpage)
93941 {
93942- return local_read(&bpage->write) & RB_WRITE_MASK;
93943+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
93944 }
93945
93946 static int
93947@@ -1545,7 +1545,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
93948 * bytes consumed in ring buffer from here.
93949 * Increment overrun to account for the lost events.
93950 */
93951- local_add(page_entries, &cpu_buffer->overrun);
93952+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
93953 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
93954 }
93955
93956@@ -2107,7 +2107,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
93957 * it is our responsibility to update
93958 * the counters.
93959 */
93960- local_add(entries, &cpu_buffer->overrun);
93961+ local_add_unchecked(entries, &cpu_buffer->overrun);
93962 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
93963
93964 /*
93965@@ -2257,7 +2257,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
93966 if (tail == BUF_PAGE_SIZE)
93967 tail_page->real_end = 0;
93968
93969- local_sub(length, &tail_page->write);
93970+ local_sub_unchecked(length, &tail_page->write);
93971 return;
93972 }
93973
93974@@ -2292,7 +2292,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
93975 rb_event_set_padding(event);
93976
93977 /* Set the write back to the previous setting */
93978- local_sub(length, &tail_page->write);
93979+ local_sub_unchecked(length, &tail_page->write);
93980 return;
93981 }
93982
93983@@ -2304,7 +2304,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
93984
93985 /* Set write to end of buffer */
93986 length = (tail + length) - BUF_PAGE_SIZE;
93987- local_sub(length, &tail_page->write);
93988+ local_sub_unchecked(length, &tail_page->write);
93989 }
93990
93991 /*
93992@@ -2330,7 +2330,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
93993 * about it.
93994 */
93995 if (unlikely(next_page == commit_page)) {
93996- local_inc(&cpu_buffer->commit_overrun);
93997+ local_inc_unchecked(&cpu_buffer->commit_overrun);
93998 goto out_reset;
93999 }
94000
94001@@ -2386,7 +2386,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94002 cpu_buffer->tail_page) &&
94003 (cpu_buffer->commit_page ==
94004 cpu_buffer->reader_page))) {
94005- local_inc(&cpu_buffer->commit_overrun);
94006+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94007 goto out_reset;
94008 }
94009 }
94010@@ -2434,7 +2434,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94011 length += RB_LEN_TIME_EXTEND;
94012
94013 tail_page = cpu_buffer->tail_page;
94014- write = local_add_return(length, &tail_page->write);
94015+ write = local_add_return_unchecked(length, &tail_page->write);
94016
94017 /* set write to only the index of the write */
94018 write &= RB_WRITE_MASK;
94019@@ -2458,7 +2458,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94020 kmemcheck_annotate_bitfield(event, bitfield);
94021 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
94022
94023- local_inc(&tail_page->entries);
94024+ local_inc_unchecked(&tail_page->entries);
94025
94026 /*
94027 * If this is the first commit on the page, then update
94028@@ -2491,7 +2491,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94029
94030 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
94031 unsigned long write_mask =
94032- local_read(&bpage->write) & ~RB_WRITE_MASK;
94033+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
94034 unsigned long event_length = rb_event_length(event);
94035 /*
94036 * This is on the tail page. It is possible that
94037@@ -2501,7 +2501,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94038 */
94039 old_index += write_mask;
94040 new_index += write_mask;
94041- index = local_cmpxchg(&bpage->write, old_index, new_index);
94042+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
94043 if (index == old_index) {
94044 /* update counters */
94045 local_sub(event_length, &cpu_buffer->entries_bytes);
94046@@ -2904,7 +2904,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94047
94048 /* Do the likely case first */
94049 if (likely(bpage->page == (void *)addr)) {
94050- local_dec(&bpage->entries);
94051+ local_dec_unchecked(&bpage->entries);
94052 return;
94053 }
94054
94055@@ -2916,7 +2916,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94056 start = bpage;
94057 do {
94058 if (bpage->page == (void *)addr) {
94059- local_dec(&bpage->entries);
94060+ local_dec_unchecked(&bpage->entries);
94061 return;
94062 }
94063 rb_inc_page(cpu_buffer, &bpage);
94064@@ -3200,7 +3200,7 @@ static inline unsigned long
94065 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
94066 {
94067 return local_read(&cpu_buffer->entries) -
94068- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
94069+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
94070 }
94071
94072 /**
94073@@ -3289,7 +3289,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
94074 return 0;
94075
94076 cpu_buffer = buffer->buffers[cpu];
94077- ret = local_read(&cpu_buffer->overrun);
94078+ ret = local_read_unchecked(&cpu_buffer->overrun);
94079
94080 return ret;
94081 }
94082@@ -3312,7 +3312,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
94083 return 0;
94084
94085 cpu_buffer = buffer->buffers[cpu];
94086- ret = local_read(&cpu_buffer->commit_overrun);
94087+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
94088
94089 return ret;
94090 }
94091@@ -3397,7 +3397,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
94092 /* if you care about this being correct, lock the buffer */
94093 for_each_buffer_cpu(buffer, cpu) {
94094 cpu_buffer = buffer->buffers[cpu];
94095- overruns += local_read(&cpu_buffer->overrun);
94096+ overruns += local_read_unchecked(&cpu_buffer->overrun);
94097 }
94098
94099 return overruns;
94100@@ -3568,8 +3568,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94101 /*
94102 * Reset the reader page to size zero.
94103 */
94104- local_set(&cpu_buffer->reader_page->write, 0);
94105- local_set(&cpu_buffer->reader_page->entries, 0);
94106+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94107+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94108 local_set(&cpu_buffer->reader_page->page->commit, 0);
94109 cpu_buffer->reader_page->real_end = 0;
94110
94111@@ -3603,7 +3603,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94112 * want to compare with the last_overrun.
94113 */
94114 smp_mb();
94115- overwrite = local_read(&(cpu_buffer->overrun));
94116+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
94117
94118 /*
94119 * Here's the tricky part.
94120@@ -4175,8 +4175,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94121
94122 cpu_buffer->head_page
94123 = list_entry(cpu_buffer->pages, struct buffer_page, list);
94124- local_set(&cpu_buffer->head_page->write, 0);
94125- local_set(&cpu_buffer->head_page->entries, 0);
94126+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
94127+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
94128 local_set(&cpu_buffer->head_page->page->commit, 0);
94129
94130 cpu_buffer->head_page->read = 0;
94131@@ -4186,14 +4186,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94132
94133 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
94134 INIT_LIST_HEAD(&cpu_buffer->new_pages);
94135- local_set(&cpu_buffer->reader_page->write, 0);
94136- local_set(&cpu_buffer->reader_page->entries, 0);
94137+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94138+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94139 local_set(&cpu_buffer->reader_page->page->commit, 0);
94140 cpu_buffer->reader_page->read = 0;
94141
94142 local_set(&cpu_buffer->entries_bytes, 0);
94143- local_set(&cpu_buffer->overrun, 0);
94144- local_set(&cpu_buffer->commit_overrun, 0);
94145+ local_set_unchecked(&cpu_buffer->overrun, 0);
94146+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
94147 local_set(&cpu_buffer->dropped_events, 0);
94148 local_set(&cpu_buffer->entries, 0);
94149 local_set(&cpu_buffer->committing, 0);
94150@@ -4598,8 +4598,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
94151 rb_init_page(bpage);
94152 bpage = reader->page;
94153 reader->page = *data_page;
94154- local_set(&reader->write, 0);
94155- local_set(&reader->entries, 0);
94156+ local_set_unchecked(&reader->write, 0);
94157+ local_set_unchecked(&reader->entries, 0);
94158 reader->read = 0;
94159 *data_page = bpage;
94160
94161diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
94162index 361a827..6a319a3 100644
94163--- a/kernel/trace/trace.c
94164+++ b/kernel/trace/trace.c
94165@@ -3499,7 +3499,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
94166 return 0;
94167 }
94168
94169-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
94170+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
94171 {
94172 /* do nothing if flag is already set */
94173 if (!!(trace_flags & mask) == !!enabled)
94174diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
94175index 8de48ba..3e5b4fa 100644
94176--- a/kernel/trace/trace.h
94177+++ b/kernel/trace/trace.h
94178@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
94179 void trace_printk_init_buffers(void);
94180 void trace_printk_start_comm(void);
94181 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
94182-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
94183+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
94184
94185 /*
94186 * Normal trace_printk() and friends allocates special buffers
94187diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
94188index 57b67b1..66082a9 100644
94189--- a/kernel/trace/trace_clock.c
94190+++ b/kernel/trace/trace_clock.c
94191@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
94192 return now;
94193 }
94194
94195-static atomic64_t trace_counter;
94196+static atomic64_unchecked_t trace_counter;
94197
94198 /*
94199 * trace_clock_counter(): simply an atomic counter.
94200@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
94201 */
94202 u64 notrace trace_clock_counter(void)
94203 {
94204- return atomic64_add_return(1, &trace_counter);
94205+ return atomic64_inc_return_unchecked(&trace_counter);
94206 }
94207diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
94208index b03a0ea..2df3168 100644
94209--- a/kernel/trace/trace_events.c
94210+++ b/kernel/trace/trace_events.c
94211@@ -1755,7 +1755,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
94212 return 0;
94213 }
94214
94215-struct ftrace_module_file_ops;
94216 static void __add_event_to_tracers(struct ftrace_event_call *call);
94217
94218 /* Add an additional event_call dynamically */
94219diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
94220index ba47600..d0e47fa 100644
94221--- a/kernel/trace/trace_functions_graph.c
94222+++ b/kernel/trace/trace_functions_graph.c
94223@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
94224
94225 /* The return trace stack is full */
94226 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
94227- atomic_inc(&current->trace_overrun);
94228+ atomic_inc_unchecked(&current->trace_overrun);
94229 return -EBUSY;
94230 }
94231
94232@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
94233 *ret = current->ret_stack[index].ret;
94234 trace->func = current->ret_stack[index].func;
94235 trace->calltime = current->ret_stack[index].calltime;
94236- trace->overrun = atomic_read(&current->trace_overrun);
94237+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
94238 trace->depth = index;
94239 }
94240
94241diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
94242index 7a9ba62..2e0e4a1 100644
94243--- a/kernel/trace/trace_mmiotrace.c
94244+++ b/kernel/trace/trace_mmiotrace.c
94245@@ -24,7 +24,7 @@ struct header_iter {
94246 static struct trace_array *mmio_trace_array;
94247 static bool overrun_detected;
94248 static unsigned long prev_overruns;
94249-static atomic_t dropped_count;
94250+static atomic_unchecked_t dropped_count;
94251
94252 static void mmio_reset_data(struct trace_array *tr)
94253 {
94254@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
94255
94256 static unsigned long count_overruns(struct trace_iterator *iter)
94257 {
94258- unsigned long cnt = atomic_xchg(&dropped_count, 0);
94259+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
94260 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
94261
94262 if (over > prev_overruns)
94263@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
94264 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
94265 sizeof(*entry), 0, pc);
94266 if (!event) {
94267- atomic_inc(&dropped_count);
94268+ atomic_inc_unchecked(&dropped_count);
94269 return;
94270 }
94271 entry = ring_buffer_event_data(event);
94272@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
94273 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
94274 sizeof(*entry), 0, pc);
94275 if (!event) {
94276- atomic_inc(&dropped_count);
94277+ atomic_inc_unchecked(&dropped_count);
94278 return;
94279 }
94280 entry = ring_buffer_event_data(event);
94281diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
94282index b77b9a6..82f19bd 100644
94283--- a/kernel/trace/trace_output.c
94284+++ b/kernel/trace/trace_output.c
94285@@ -707,14 +707,16 @@ int register_ftrace_event(struct trace_event *event)
94286 goto out;
94287 }
94288
94289+ pax_open_kernel();
94290 if (event->funcs->trace == NULL)
94291- event->funcs->trace = trace_nop_print;
94292+ *(void **)&event->funcs->trace = trace_nop_print;
94293 if (event->funcs->raw == NULL)
94294- event->funcs->raw = trace_nop_print;
94295+ *(void **)&event->funcs->raw = trace_nop_print;
94296 if (event->funcs->hex == NULL)
94297- event->funcs->hex = trace_nop_print;
94298+ *(void **)&event->funcs->hex = trace_nop_print;
94299 if (event->funcs->binary == NULL)
94300- event->funcs->binary = trace_nop_print;
94301+ *(void **)&event->funcs->binary = trace_nop_print;
94302+ pax_close_kernel();
94303
94304 key = event->type & (EVENT_HASHSIZE - 1);
94305
94306diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
94307index f8b45d8..70ff6c8 100644
94308--- a/kernel/trace/trace_seq.c
94309+++ b/kernel/trace/trace_seq.c
94310@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
94311 return 0;
94312 }
94313
94314- seq_buf_path(&s->seq, path, "\n");
94315+ seq_buf_path(&s->seq, path, "\n\\");
94316
94317 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
94318 s->seq.len = save_len;
94319diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
94320index 16eddb3..758b308 100644
94321--- a/kernel/trace/trace_stack.c
94322+++ b/kernel/trace/trace_stack.c
94323@@ -90,7 +90,7 @@ check_stack(unsigned long ip, unsigned long *stack)
94324 return;
94325
94326 /* we do not handle interrupt stacks yet */
94327- if (!object_is_on_stack(stack))
94328+ if (!object_starts_on_stack(stack))
94329 return;
94330
94331 local_irq_save(flags);
94332diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
94333index c6ee36f..78513f3 100644
94334--- a/kernel/trace/trace_syscalls.c
94335+++ b/kernel/trace/trace_syscalls.c
94336@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
94337 int num;
94338
94339 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94340+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94341+ return -EINVAL;
94342
94343 mutex_lock(&syscall_trace_lock);
94344 if (!sys_perf_refcount_enter)
94345@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
94346 int num;
94347
94348 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94349+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94350+ return;
94351
94352 mutex_lock(&syscall_trace_lock);
94353 sys_perf_refcount_enter--;
94354@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
94355 int num;
94356
94357 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94358+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94359+ return -EINVAL;
94360
94361 mutex_lock(&syscall_trace_lock);
94362 if (!sys_perf_refcount_exit)
94363@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
94364 int num;
94365
94366 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94367+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94368+ return;
94369
94370 mutex_lock(&syscall_trace_lock);
94371 sys_perf_refcount_exit--;
94372diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
94373index 4109f83..fe1f830 100644
94374--- a/kernel/user_namespace.c
94375+++ b/kernel/user_namespace.c
94376@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
94377 !kgid_has_mapping(parent_ns, group))
94378 return -EPERM;
94379
94380+#ifdef CONFIG_GRKERNSEC
94381+ /*
94382+ * This doesn't really inspire confidence:
94383+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
94384+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
94385+ * Increases kernel attack surface in areas developers
94386+ * previously cared little about ("low importance due
94387+ * to requiring "root" capability")
94388+ * To be removed when this code receives *proper* review
94389+ */
94390+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
94391+ !capable(CAP_SETGID))
94392+ return -EPERM;
94393+#endif
94394+
94395 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
94396 if (!ns)
94397 return -ENOMEM;
94398@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
94399 if (atomic_read(&current->mm->mm_users) > 1)
94400 return -EINVAL;
94401
94402- if (current->fs->users != 1)
94403+ if (atomic_read(&current->fs->users) != 1)
94404 return -EINVAL;
94405
94406 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
94407diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
94408index c8eac43..4b5f08f 100644
94409--- a/kernel/utsname_sysctl.c
94410+++ b/kernel/utsname_sysctl.c
94411@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
94412 static int proc_do_uts_string(struct ctl_table *table, int write,
94413 void __user *buffer, size_t *lenp, loff_t *ppos)
94414 {
94415- struct ctl_table uts_table;
94416+ ctl_table_no_const uts_table;
94417 int r;
94418 memcpy(&uts_table, table, sizeof(uts_table));
94419 uts_table.data = get_uts(table, write);
94420diff --git a/kernel/watchdog.c b/kernel/watchdog.c
94421index 70bf118..4be3c37 100644
94422--- a/kernel/watchdog.c
94423+++ b/kernel/watchdog.c
94424@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
94425 static void watchdog_nmi_disable(unsigned int cpu) { return; }
94426 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
94427
94428-static struct smp_hotplug_thread watchdog_threads = {
94429+static struct smp_hotplug_thread watchdog_threads __read_only = {
94430 .store = &softlockup_watchdog,
94431 .thread_should_run = watchdog_should_run,
94432 .thread_fn = watchdog,
94433diff --git a/kernel/workqueue.c b/kernel/workqueue.c
94434index 82d0c8d..37f4222 100644
94435--- a/kernel/workqueue.c
94436+++ b/kernel/workqueue.c
94437@@ -4565,7 +4565,7 @@ static void rebind_workers(struct worker_pool *pool)
94438 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
94439 worker_flags |= WORKER_REBOUND;
94440 worker_flags &= ~WORKER_UNBOUND;
94441- ACCESS_ONCE(worker->flags) = worker_flags;
94442+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
94443 }
94444
94445 spin_unlock_irq(&pool->lock);
94446diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
94447index 5f2ce61..85a0b1b 100644
94448--- a/lib/Kconfig.debug
94449+++ b/lib/Kconfig.debug
94450@@ -910,7 +910,7 @@ config DEBUG_MUTEXES
94451
94452 config DEBUG_WW_MUTEX_SLOWPATH
94453 bool "Wait/wound mutex debugging: Slowpath testing"
94454- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94455+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94456 select DEBUG_LOCK_ALLOC
94457 select DEBUG_SPINLOCK
94458 select DEBUG_MUTEXES
94459@@ -927,7 +927,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
94460
94461 config DEBUG_LOCK_ALLOC
94462 bool "Lock debugging: detect incorrect freeing of live locks"
94463- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94464+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94465 select DEBUG_SPINLOCK
94466 select DEBUG_MUTEXES
94467 select LOCKDEP
94468@@ -941,7 +941,7 @@ config DEBUG_LOCK_ALLOC
94469
94470 config PROVE_LOCKING
94471 bool "Lock debugging: prove locking correctness"
94472- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94473+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94474 select LOCKDEP
94475 select DEBUG_SPINLOCK
94476 select DEBUG_MUTEXES
94477@@ -992,7 +992,7 @@ config LOCKDEP
94478
94479 config LOCK_STAT
94480 bool "Lock usage statistics"
94481- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94482+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94483 select LOCKDEP
94484 select DEBUG_SPINLOCK
94485 select DEBUG_MUTEXES
94486@@ -1453,6 +1453,7 @@ config LATENCYTOP
94487 depends on DEBUG_KERNEL
94488 depends on STACKTRACE_SUPPORT
94489 depends on PROC_FS
94490+ depends on !GRKERNSEC_HIDESYM
94491 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
94492 select KALLSYMS
94493 select KALLSYMS_ALL
94494@@ -1469,7 +1470,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94495 config DEBUG_STRICT_USER_COPY_CHECKS
94496 bool "Strict user copy size checks"
94497 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94498- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
94499+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
94500 help
94501 Enabling this option turns a certain set of sanity checks for user
94502 copy operations into compile time failures.
94503@@ -1597,7 +1598,7 @@ endmenu # runtime tests
94504
94505 config PROVIDE_OHCI1394_DMA_INIT
94506 bool "Remote debugging over FireWire early on boot"
94507- depends on PCI && X86
94508+ depends on PCI && X86 && !GRKERNSEC
94509 help
94510 If you want to debug problems which hang or crash the kernel early
94511 on boot and the crashing machine has a FireWire port, you can use
94512diff --git a/lib/Makefile b/lib/Makefile
94513index 3c3b30b..ca29102 100644
94514--- a/lib/Makefile
94515+++ b/lib/Makefile
94516@@ -55,7 +55,7 @@ obj-$(CONFIG_BTREE) += btree.o
94517 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
94518 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
94519 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
94520-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
94521+obj-y += list_debug.o
94522 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
94523
94524 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
94525diff --git a/lib/average.c b/lib/average.c
94526index 114d1be..ab0350c 100644
94527--- a/lib/average.c
94528+++ b/lib/average.c
94529@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
94530 {
94531 unsigned long internal = ACCESS_ONCE(avg->internal);
94532
94533- ACCESS_ONCE(avg->internal) = internal ?
94534+ ACCESS_ONCE_RW(avg->internal) = internal ?
94535 (((internal << avg->weight) - internal) +
94536 (val << avg->factor)) >> avg->weight :
94537 (val << avg->factor);
94538diff --git a/lib/bitmap.c b/lib/bitmap.c
94539index 324ea9e..46b1ae2 100644
94540--- a/lib/bitmap.c
94541+++ b/lib/bitmap.c
94542@@ -271,7 +271,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
94543 }
94544 EXPORT_SYMBOL(__bitmap_subset);
94545
94546-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94547+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94548 {
94549 unsigned int k, lim = bits/BITS_PER_LONG;
94550 int w = 0;
94551@@ -437,7 +437,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
94552 {
94553 int c, old_c, totaldigits, ndigits, nchunks, nbits;
94554 u32 chunk;
94555- const char __user __force *ubuf = (const char __user __force *)buf;
94556+ const char __user *ubuf = (const char __force_user *)buf;
94557
94558 bitmap_zero(maskp, nmaskbits);
94559
94560@@ -522,7 +522,7 @@ int bitmap_parse_user(const char __user *ubuf,
94561 {
94562 if (!access_ok(VERIFY_READ, ubuf, ulen))
94563 return -EFAULT;
94564- return __bitmap_parse((const char __force *)ubuf,
94565+ return __bitmap_parse((const char __force_kernel *)ubuf,
94566 ulen, 1, maskp, nmaskbits);
94567
94568 }
94569@@ -640,7 +640,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
94570 {
94571 unsigned a, b;
94572 int c, old_c, totaldigits;
94573- const char __user __force *ubuf = (const char __user __force *)buf;
94574+ const char __user *ubuf = (const char __force_user *)buf;
94575 int exp_digit, in_range;
94576
94577 totaldigits = c = 0;
94578@@ -735,7 +735,7 @@ int bitmap_parselist_user(const char __user *ubuf,
94579 {
94580 if (!access_ok(VERIFY_READ, ubuf, ulen))
94581 return -EFAULT;
94582- return __bitmap_parselist((const char __force *)ubuf,
94583+ return __bitmap_parselist((const char __force_kernel *)ubuf,
94584 ulen, 1, maskp, nmaskbits);
94585 }
94586 EXPORT_SYMBOL(bitmap_parselist_user);
94587diff --git a/lib/bug.c b/lib/bug.c
94588index 0c3bd95..5a615a1 100644
94589--- a/lib/bug.c
94590+++ b/lib/bug.c
94591@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
94592 return BUG_TRAP_TYPE_NONE;
94593
94594 bug = find_bug(bugaddr);
94595+ if (!bug)
94596+ return BUG_TRAP_TYPE_NONE;
94597
94598 file = NULL;
94599 line = 0;
94600diff --git a/lib/debugobjects.c b/lib/debugobjects.c
94601index 547f7f9..a6d4ba0 100644
94602--- a/lib/debugobjects.c
94603+++ b/lib/debugobjects.c
94604@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
94605 if (limit > 4)
94606 return;
94607
94608- is_on_stack = object_is_on_stack(addr);
94609+ is_on_stack = object_starts_on_stack(addr);
94610 if (is_on_stack == onstack)
94611 return;
94612
94613diff --git a/lib/div64.c b/lib/div64.c
94614index 4382ad7..08aa558 100644
94615--- a/lib/div64.c
94616+++ b/lib/div64.c
94617@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
94618 EXPORT_SYMBOL(__div64_32);
94619
94620 #ifndef div_s64_rem
94621-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94622+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94623 {
94624 u64 quotient;
94625
94626@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
94627 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
94628 */
94629 #ifndef div64_u64
94630-u64 div64_u64(u64 dividend, u64 divisor)
94631+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
94632 {
94633 u32 high = divisor >> 32;
94634 u64 quot;
94635diff --git a/lib/dma-debug.c b/lib/dma-debug.c
94636index 9722bd2..0d826f4 100644
94637--- a/lib/dma-debug.c
94638+++ b/lib/dma-debug.c
94639@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
94640
94641 void dma_debug_add_bus(struct bus_type *bus)
94642 {
94643- struct notifier_block *nb;
94644+ notifier_block_no_const *nb;
94645
94646 if (dma_debug_disabled())
94647 return;
94648@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
94649
94650 static void check_for_stack(struct device *dev, void *addr)
94651 {
94652- if (object_is_on_stack(addr))
94653+ if (object_starts_on_stack(addr))
94654 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
94655 "stack [addr=%p]\n", addr);
94656 }
94657diff --git a/lib/inflate.c b/lib/inflate.c
94658index 013a761..c28f3fc 100644
94659--- a/lib/inflate.c
94660+++ b/lib/inflate.c
94661@@ -269,7 +269,7 @@ static void free(void *where)
94662 malloc_ptr = free_mem_ptr;
94663 }
94664 #else
94665-#define malloc(a) kmalloc(a, GFP_KERNEL)
94666+#define malloc(a) kmalloc((a), GFP_KERNEL)
94667 #define free(a) kfree(a)
94668 #endif
94669
94670diff --git a/lib/ioremap.c b/lib/ioremap.c
94671index 0c9216c..863bd89 100644
94672--- a/lib/ioremap.c
94673+++ b/lib/ioremap.c
94674@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
94675 unsigned long next;
94676
94677 phys_addr -= addr;
94678- pmd = pmd_alloc(&init_mm, pud, addr);
94679+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
94680 if (!pmd)
94681 return -ENOMEM;
94682 do {
94683@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
94684 unsigned long next;
94685
94686 phys_addr -= addr;
94687- pud = pud_alloc(&init_mm, pgd, addr);
94688+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
94689 if (!pud)
94690 return -ENOMEM;
94691 do {
94692diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
94693index bd2bea9..6b3c95e 100644
94694--- a/lib/is_single_threaded.c
94695+++ b/lib/is_single_threaded.c
94696@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
94697 struct task_struct *p, *t;
94698 bool ret;
94699
94700+ if (!mm)
94701+ return true;
94702+
94703 if (atomic_read(&task->signal->live) != 1)
94704 return false;
94705
94706diff --git a/lib/kobject.c b/lib/kobject.c
94707index 03d4ab3..46f6374 100644
94708--- a/lib/kobject.c
94709+++ b/lib/kobject.c
94710@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
94711
94712
94713 static DEFINE_SPINLOCK(kobj_ns_type_lock);
94714-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
94715+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
94716
94717-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94718+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94719 {
94720 enum kobj_ns_type type = ops->type;
94721 int error;
94722diff --git a/lib/list_debug.c b/lib/list_debug.c
94723index c24c2f7..f0296f4 100644
94724--- a/lib/list_debug.c
94725+++ b/lib/list_debug.c
94726@@ -11,7 +11,9 @@
94727 #include <linux/bug.h>
94728 #include <linux/kernel.h>
94729 #include <linux/rculist.h>
94730+#include <linux/mm.h>
94731
94732+#ifdef CONFIG_DEBUG_LIST
94733 /*
94734 * Insert a new entry between two known consecutive entries.
94735 *
94736@@ -19,21 +21,40 @@
94737 * the prev/next entries already!
94738 */
94739
94740+static bool __list_add_debug(struct list_head *new,
94741+ struct list_head *prev,
94742+ struct list_head *next)
94743+{
94744+ if (unlikely(next->prev != prev)) {
94745+ printk(KERN_ERR "list_add corruption. next->prev should be "
94746+ "prev (%p), but was %p. (next=%p).\n",
94747+ prev, next->prev, next);
94748+ BUG();
94749+ return false;
94750+ }
94751+ if (unlikely(prev->next != next)) {
94752+ printk(KERN_ERR "list_add corruption. prev->next should be "
94753+ "next (%p), but was %p. (prev=%p).\n",
94754+ next, prev->next, prev);
94755+ BUG();
94756+ return false;
94757+ }
94758+ if (unlikely(new == prev || new == next)) {
94759+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
94760+ new, prev, next);
94761+ BUG();
94762+ return false;
94763+ }
94764+ return true;
94765+}
94766+
94767 void __list_add(struct list_head *new,
94768- struct list_head *prev,
94769- struct list_head *next)
94770+ struct list_head *prev,
94771+ struct list_head *next)
94772 {
94773- WARN(next->prev != prev,
94774- "list_add corruption. next->prev should be "
94775- "prev (%p), but was %p. (next=%p).\n",
94776- prev, next->prev, next);
94777- WARN(prev->next != next,
94778- "list_add corruption. prev->next should be "
94779- "next (%p), but was %p. (prev=%p).\n",
94780- next, prev->next, prev);
94781- WARN(new == prev || new == next,
94782- "list_add double add: new=%p, prev=%p, next=%p.\n",
94783- new, prev, next);
94784+ if (!__list_add_debug(new, prev, next))
94785+ return;
94786+
94787 next->prev = new;
94788 new->next = next;
94789 new->prev = prev;
94790@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
94791 }
94792 EXPORT_SYMBOL(__list_add);
94793
94794-void __list_del_entry(struct list_head *entry)
94795+static bool __list_del_entry_debug(struct list_head *entry)
94796 {
94797 struct list_head *prev, *next;
94798
94799 prev = entry->prev;
94800 next = entry->next;
94801
94802- if (WARN(next == LIST_POISON1,
94803- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94804- entry, LIST_POISON1) ||
94805- WARN(prev == LIST_POISON2,
94806- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94807- entry, LIST_POISON2) ||
94808- WARN(prev->next != entry,
94809- "list_del corruption. prev->next should be %p, "
94810- "but was %p\n", entry, prev->next) ||
94811- WARN(next->prev != entry,
94812- "list_del corruption. next->prev should be %p, "
94813- "but was %p\n", entry, next->prev))
94814+ if (unlikely(next == LIST_POISON1)) {
94815+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94816+ entry, LIST_POISON1);
94817+ BUG();
94818+ return false;
94819+ }
94820+ if (unlikely(prev == LIST_POISON2)) {
94821+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94822+ entry, LIST_POISON2);
94823+ BUG();
94824+ return false;
94825+ }
94826+ if (unlikely(entry->prev->next != entry)) {
94827+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
94828+ "but was %p\n", entry, prev->next);
94829+ BUG();
94830+ return false;
94831+ }
94832+ if (unlikely(entry->next->prev != entry)) {
94833+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
94834+ "but was %p\n", entry, next->prev);
94835+ BUG();
94836+ return false;
94837+ }
94838+ return true;
94839+}
94840+
94841+void __list_del_entry(struct list_head *entry)
94842+{
94843+ if (!__list_del_entry_debug(entry))
94844 return;
94845
94846- __list_del(prev, next);
94847+ __list_del(entry->prev, entry->next);
94848 }
94849 EXPORT_SYMBOL(__list_del_entry);
94850
94851@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
94852 void __list_add_rcu(struct list_head *new,
94853 struct list_head *prev, struct list_head *next)
94854 {
94855- WARN(next->prev != prev,
94856- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
94857- prev, next->prev, next);
94858- WARN(prev->next != next,
94859- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
94860- next, prev->next, prev);
94861+ if (!__list_add_debug(new, prev, next))
94862+ return;
94863+
94864 new->next = next;
94865 new->prev = prev;
94866 rcu_assign_pointer(list_next_rcu(prev), new);
94867 next->prev = new;
94868 }
94869 EXPORT_SYMBOL(__list_add_rcu);
94870+#endif
94871+
94872+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
94873+{
94874+#ifdef CONFIG_DEBUG_LIST
94875+ if (!__list_add_debug(new, prev, next))
94876+ return;
94877+#endif
94878+
94879+ pax_open_kernel();
94880+ next->prev = new;
94881+ new->next = next;
94882+ new->prev = prev;
94883+ prev->next = new;
94884+ pax_close_kernel();
94885+}
94886+EXPORT_SYMBOL(__pax_list_add);
94887+
94888+void pax_list_del(struct list_head *entry)
94889+{
94890+#ifdef CONFIG_DEBUG_LIST
94891+ if (!__list_del_entry_debug(entry))
94892+ return;
94893+#endif
94894+
94895+ pax_open_kernel();
94896+ __list_del(entry->prev, entry->next);
94897+ entry->next = LIST_POISON1;
94898+ entry->prev = LIST_POISON2;
94899+ pax_close_kernel();
94900+}
94901+EXPORT_SYMBOL(pax_list_del);
94902+
94903+void pax_list_del_init(struct list_head *entry)
94904+{
94905+ pax_open_kernel();
94906+ __list_del(entry->prev, entry->next);
94907+ INIT_LIST_HEAD(entry);
94908+ pax_close_kernel();
94909+}
94910+EXPORT_SYMBOL(pax_list_del_init);
94911+
94912+void __pax_list_add_rcu(struct list_head *new,
94913+ struct list_head *prev, struct list_head *next)
94914+{
94915+#ifdef CONFIG_DEBUG_LIST
94916+ if (!__list_add_debug(new, prev, next))
94917+ return;
94918+#endif
94919+
94920+ pax_open_kernel();
94921+ new->next = next;
94922+ new->prev = prev;
94923+ rcu_assign_pointer(list_next_rcu(prev), new);
94924+ next->prev = new;
94925+ pax_close_kernel();
94926+}
94927+EXPORT_SYMBOL(__pax_list_add_rcu);
94928+
94929+void pax_list_del_rcu(struct list_head *entry)
94930+{
94931+#ifdef CONFIG_DEBUG_LIST
94932+ if (!__list_del_entry_debug(entry))
94933+ return;
94934+#endif
94935+
94936+ pax_open_kernel();
94937+ __list_del(entry->prev, entry->next);
94938+ entry->next = LIST_POISON1;
94939+ entry->prev = LIST_POISON2;
94940+ pax_close_kernel();
94941+}
94942+EXPORT_SYMBOL(pax_list_del_rcu);
94943diff --git a/lib/lockref.c b/lib/lockref.c
94944index d2233de..fa1a2f6 100644
94945--- a/lib/lockref.c
94946+++ b/lib/lockref.c
94947@@ -48,13 +48,13 @@
94948 void lockref_get(struct lockref *lockref)
94949 {
94950 CMPXCHG_LOOP(
94951- new.count++;
94952+ __lockref_inc(&new);
94953 ,
94954 return;
94955 );
94956
94957 spin_lock(&lockref->lock);
94958- lockref->count++;
94959+ __lockref_inc(lockref);
94960 spin_unlock(&lockref->lock);
94961 }
94962 EXPORT_SYMBOL(lockref_get);
94963@@ -69,7 +69,7 @@ int lockref_get_not_zero(struct lockref *lockref)
94964 int retval;
94965
94966 CMPXCHG_LOOP(
94967- new.count++;
94968+ __lockref_inc(&new);
94969 if (!old.count)
94970 return 0;
94971 ,
94972@@ -79,7 +79,7 @@ int lockref_get_not_zero(struct lockref *lockref)
94973 spin_lock(&lockref->lock);
94974 retval = 0;
94975 if (lockref->count) {
94976- lockref->count++;
94977+ __lockref_inc(lockref);
94978 retval = 1;
94979 }
94980 spin_unlock(&lockref->lock);
94981@@ -96,7 +96,7 @@ EXPORT_SYMBOL(lockref_get_not_zero);
94982 int lockref_get_or_lock(struct lockref *lockref)
94983 {
94984 CMPXCHG_LOOP(
94985- new.count++;
94986+ __lockref_inc(&new);
94987 if (!old.count)
94988 break;
94989 ,
94990@@ -106,7 +106,7 @@ int lockref_get_or_lock(struct lockref *lockref)
94991 spin_lock(&lockref->lock);
94992 if (!lockref->count)
94993 return 0;
94994- lockref->count++;
94995+ __lockref_inc(lockref);
94996 spin_unlock(&lockref->lock);
94997 return 1;
94998 }
94999@@ -120,7 +120,7 @@ EXPORT_SYMBOL(lockref_get_or_lock);
95000 int lockref_put_or_lock(struct lockref *lockref)
95001 {
95002 CMPXCHG_LOOP(
95003- new.count--;
95004+ __lockref_dec(&new);
95005 if (old.count <= 1)
95006 break;
95007 ,
95008@@ -130,7 +130,7 @@ int lockref_put_or_lock(struct lockref *lockref)
95009 spin_lock(&lockref->lock);
95010 if (lockref->count <= 1)
95011 return 0;
95012- lockref->count--;
95013+ __lockref_dec(lockref);
95014 spin_unlock(&lockref->lock);
95015 return 1;
95016 }
95017@@ -157,7 +157,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95018 int retval;
95019
95020 CMPXCHG_LOOP(
95021- new.count++;
95022+ __lockref_inc(&new);
95023 if ((int)old.count < 0)
95024 return 0;
95025 ,
95026@@ -167,7 +167,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95027 spin_lock(&lockref->lock);
95028 retval = 0;
95029 if ((int) lockref->count >= 0) {
95030- lockref->count++;
95031+ __lockref_inc(lockref);
95032 retval = 1;
95033 }
95034 spin_unlock(&lockref->lock);
95035diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
95036index 6111bcb..02e816b 100644
95037--- a/lib/percpu-refcount.c
95038+++ b/lib/percpu-refcount.c
95039@@ -31,7 +31,7 @@
95040 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
95041 */
95042
95043-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
95044+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
95045
95046 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
95047
95048diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95049index 3291a8e..346a91e 100644
95050--- a/lib/radix-tree.c
95051+++ b/lib/radix-tree.c
95052@@ -67,7 +67,7 @@ struct radix_tree_preload {
95053 int nr;
95054 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
95055 };
95056-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95057+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95058
95059 static inline void *ptr_to_indirect(void *ptr)
95060 {
95061diff --git a/lib/random32.c b/lib/random32.c
95062index 0bee183..526f12f 100644
95063--- a/lib/random32.c
95064+++ b/lib/random32.c
95065@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
95066 }
95067 #endif
95068
95069-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
95070+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
95071
95072 /**
95073 * prandom_u32_state - seeded pseudo-random number generator.
95074diff --git a/lib/rbtree.c b/lib/rbtree.c
95075index c16c81a..4dcbda1 100644
95076--- a/lib/rbtree.c
95077+++ b/lib/rbtree.c
95078@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
95079 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
95080
95081 static const struct rb_augment_callbacks dummy_callbacks = {
95082- dummy_propagate, dummy_copy, dummy_rotate
95083+ .propagate = dummy_propagate,
95084+ .copy = dummy_copy,
95085+ .rotate = dummy_rotate
95086 };
95087
95088 void rb_insert_color(struct rb_node *node, struct rb_root *root)
95089diff --git a/lib/show_mem.c b/lib/show_mem.c
95090index 7de89f4..00d70b7 100644
95091--- a/lib/show_mem.c
95092+++ b/lib/show_mem.c
95093@@ -50,6 +50,6 @@ void show_mem(unsigned int filter)
95094 quicklist_total_size());
95095 #endif
95096 #ifdef CONFIG_MEMORY_FAILURE
95097- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
95098+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
95099 #endif
95100 }
95101diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
95102index bb2b201..46abaf9 100644
95103--- a/lib/strncpy_from_user.c
95104+++ b/lib/strncpy_from_user.c
95105@@ -21,7 +21,7 @@
95106 */
95107 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
95108 {
95109- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95110+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95111 long res = 0;
95112
95113 /*
95114diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
95115index a28df52..3d55877 100644
95116--- a/lib/strnlen_user.c
95117+++ b/lib/strnlen_user.c
95118@@ -26,7 +26,7 @@
95119 */
95120 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
95121 {
95122- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95123+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95124 long align, res = 0;
95125 unsigned long c;
95126
95127diff --git a/lib/swiotlb.c b/lib/swiotlb.c
95128index 4abda07..b9d3765 100644
95129--- a/lib/swiotlb.c
95130+++ b/lib/swiotlb.c
95131@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
95132
95133 void
95134 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
95135- dma_addr_t dev_addr)
95136+ dma_addr_t dev_addr, struct dma_attrs *attrs)
95137 {
95138 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
95139
95140diff --git a/lib/usercopy.c b/lib/usercopy.c
95141index 4f5b1dd..7cab418 100644
95142--- a/lib/usercopy.c
95143+++ b/lib/usercopy.c
95144@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
95145 WARN(1, "Buffer overflow detected!\n");
95146 }
95147 EXPORT_SYMBOL(copy_from_user_overflow);
95148+
95149+void copy_to_user_overflow(void)
95150+{
95151+ WARN(1, "Buffer overflow detected!\n");
95152+}
95153+EXPORT_SYMBOL(copy_to_user_overflow);
95154diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95155index ec337f6..8484eb2 100644
95156--- a/lib/vsprintf.c
95157+++ b/lib/vsprintf.c
95158@@ -16,6 +16,9 @@
95159 * - scnprintf and vscnprintf
95160 */
95161
95162+#ifdef CONFIG_GRKERNSEC_HIDESYM
95163+#define __INCLUDED_BY_HIDESYM 1
95164+#endif
95165 #include <stdarg.h>
95166 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
95167 #include <linux/types.h>
95168@@ -625,7 +628,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
95169 #ifdef CONFIG_KALLSYMS
95170 if (*fmt == 'B')
95171 sprint_backtrace(sym, value);
95172- else if (*fmt != 'f' && *fmt != 's')
95173+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
95174 sprint_symbol(sym, value);
95175 else
95176 sprint_symbol_no_offset(sym, value);
95177@@ -1240,7 +1243,11 @@ char *address_val(char *buf, char *end, const void *addr,
95178 return number(buf, end, num, spec);
95179 }
95180
95181+#ifdef CONFIG_GRKERNSEC_HIDESYM
95182+int kptr_restrict __read_mostly = 2;
95183+#else
95184 int kptr_restrict __read_mostly;
95185+#endif
95186
95187 /*
95188 * Show a '%p' thing. A kernel extension is that the '%p' is followed
95189@@ -1251,8 +1258,10 @@ int kptr_restrict __read_mostly;
95190 *
95191 * - 'F' For symbolic function descriptor pointers with offset
95192 * - 'f' For simple symbolic function names without offset
95193+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
95194 * - 'S' For symbolic direct pointers with offset
95195 * - 's' For symbolic direct pointers without offset
95196+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
95197 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
95198 * - 'B' For backtraced symbolic direct pointers with offset
95199 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
95200@@ -1331,12 +1340,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95201
95202 if (!ptr && *fmt != 'K') {
95203 /*
95204- * Print (null) with the same width as a pointer so it makes
95205+ * Print (nil) with the same width as a pointer so it makes
95206 * tabular output look nice.
95207 */
95208 if (spec.field_width == -1)
95209 spec.field_width = default_width;
95210- return string(buf, end, "(null)", spec);
95211+ return string(buf, end, "(nil)", spec);
95212 }
95213
95214 switch (*fmt) {
95215@@ -1346,6 +1355,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95216 /* Fallthrough */
95217 case 'S':
95218 case 's':
95219+#ifdef CONFIG_GRKERNSEC_HIDESYM
95220+ break;
95221+#else
95222+ return symbol_string(buf, end, ptr, spec, fmt);
95223+#endif
95224+ case 'X':
95225+ ptr = dereference_function_descriptor(ptr);
95226+ case 'A':
95227 case 'B':
95228 return symbol_string(buf, end, ptr, spec, fmt);
95229 case 'R':
95230@@ -1403,6 +1420,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95231 va_end(va);
95232 return buf;
95233 }
95234+ case 'P':
95235+ break;
95236 case 'K':
95237 /*
95238 * %pK cannot be used in IRQ context because its test
95239@@ -1460,6 +1479,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95240 ((const struct file *)ptr)->f_path.dentry,
95241 spec, fmt);
95242 }
95243+
95244+#ifdef CONFIG_GRKERNSEC_HIDESYM
95245+ /* 'P' = approved pointers to copy to userland,
95246+ as in the /proc/kallsyms case, as we make it display nothing
95247+ for non-root users, and the real contents for root users
95248+ 'X' = approved simple symbols
95249+ Also ignore 'K' pointers, since we force their NULLing for non-root users
95250+ above
95251+ */
95252+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
95253+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
95254+ dump_stack();
95255+ ptr = NULL;
95256+ }
95257+#endif
95258+
95259 spec.flags |= SMALL;
95260 if (spec.field_width == -1) {
95261 spec.field_width = default_width;
95262@@ -2160,11 +2195,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95263 typeof(type) value; \
95264 if (sizeof(type) == 8) { \
95265 args = PTR_ALIGN(args, sizeof(u32)); \
95266- *(u32 *)&value = *(u32 *)args; \
95267- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
95268+ *(u32 *)&value = *(const u32 *)args; \
95269+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
95270 } else { \
95271 args = PTR_ALIGN(args, sizeof(type)); \
95272- value = *(typeof(type) *)args; \
95273+ value = *(const typeof(type) *)args; \
95274 } \
95275 args += sizeof(type); \
95276 value; \
95277@@ -2227,7 +2262,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95278 case FORMAT_TYPE_STR: {
95279 const char *str_arg = args;
95280 args += strlen(str_arg) + 1;
95281- str = string(str, end, (char *)str_arg, spec);
95282+ str = string(str, end, str_arg, spec);
95283 break;
95284 }
95285
95286diff --git a/localversion-grsec b/localversion-grsec
95287new file mode 100644
95288index 0000000..7cd6065
95289--- /dev/null
95290+++ b/localversion-grsec
95291@@ -0,0 +1 @@
95292+-grsec
95293diff --git a/mm/Kconfig b/mm/Kconfig
95294index 1d1ae6b..0f05885 100644
95295--- a/mm/Kconfig
95296+++ b/mm/Kconfig
95297@@ -341,10 +341,11 @@ config KSM
95298 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
95299
95300 config DEFAULT_MMAP_MIN_ADDR
95301- int "Low address space to protect from user allocation"
95302+ int "Low address space to protect from user allocation"
95303 depends on MMU
95304- default 4096
95305- help
95306+ default 32768 if ALPHA || ARM || PARISC || SPARC32
95307+ default 65536
95308+ help
95309 This is the portion of low virtual memory which should be protected
95310 from userspace allocation. Keeping a user from writing to low pages
95311 can help reduce the impact of kernel NULL pointer bugs.
95312@@ -375,7 +376,7 @@ config MEMORY_FAILURE
95313
95314 config HWPOISON_INJECT
95315 tristate "HWPoison pages injector"
95316- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
95317+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
95318 select PROC_PAGE_MONITOR
95319
95320 config NOMMU_INITIAL_TRIM_EXCESS
95321diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
95322index 957d3da..1d34e20 100644
95323--- a/mm/Kconfig.debug
95324+++ b/mm/Kconfig.debug
95325@@ -10,6 +10,7 @@ config PAGE_EXTENSION
95326 config DEBUG_PAGEALLOC
95327 bool "Debug page memory allocations"
95328 depends on DEBUG_KERNEL
95329+ depends on !PAX_MEMORY_SANITIZE
95330 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
95331 depends on !KMEMCHECK
95332 select PAGE_EXTENSION
95333diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95334index 0ae0df5..82ac56b 100644
95335--- a/mm/backing-dev.c
95336+++ b/mm/backing-dev.c
95337@@ -12,7 +12,7 @@
95338 #include <linux/device.h>
95339 #include <trace/events/writeback.h>
95340
95341-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
95342+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
95343
95344 struct backing_dev_info default_backing_dev_info = {
95345 .name = "default",
95346@@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
95347 return err;
95348
95349 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
95350- atomic_long_inc_return(&bdi_seq));
95351+ atomic_long_inc_return_unchecked(&bdi_seq));
95352 if (err) {
95353 bdi_destroy(bdi);
95354 return err;
95355diff --git a/mm/filemap.c b/mm/filemap.c
95356index 673e458..7192013 100644
95357--- a/mm/filemap.c
95358+++ b/mm/filemap.c
95359@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95360 struct address_space *mapping = file->f_mapping;
95361
95362 if (!mapping->a_ops->readpage)
95363- return -ENOEXEC;
95364+ return -ENODEV;
95365 file_accessed(file);
95366 vma->vm_ops = &generic_file_vm_ops;
95367 return 0;
95368@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95369 *pos = i_size_read(inode);
95370
95371 if (limit != RLIM_INFINITY) {
95372+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95373 if (*pos >= limit) {
95374 send_sig(SIGXFSZ, current, 0);
95375 return -EFBIG;
95376diff --git a/mm/fremap.c b/mm/fremap.c
95377index 2805d71..8b56e7d 100644
95378--- a/mm/fremap.c
95379+++ b/mm/fremap.c
95380@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95381 retry:
95382 vma = find_vma(mm, start);
95383
95384+#ifdef CONFIG_PAX_SEGMEXEC
95385+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95386+ goto out;
95387+#endif
95388+
95389 /*
95390 * Make sure the vma is shared, that it supports prefaulting,
95391 * and that the remapped range is valid and fully within
95392diff --git a/mm/gup.c b/mm/gup.c
95393index 9b2afbf..647297c 100644
95394--- a/mm/gup.c
95395+++ b/mm/gup.c
95396@@ -274,11 +274,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
95397 unsigned int fault_flags = 0;
95398 int ret;
95399
95400- /* For mlock, just skip the stack guard page. */
95401- if ((*flags & FOLL_MLOCK) &&
95402- (stack_guard_page_start(vma, address) ||
95403- stack_guard_page_end(vma, address + PAGE_SIZE)))
95404- return -ENOENT;
95405 if (*flags & FOLL_WRITE)
95406 fault_flags |= FAULT_FLAG_WRITE;
95407 if (nonblocking)
95408@@ -444,14 +439,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95409 if (!(gup_flags & FOLL_FORCE))
95410 gup_flags |= FOLL_NUMA;
95411
95412- do {
95413+ while (nr_pages) {
95414 struct page *page;
95415 unsigned int foll_flags = gup_flags;
95416 unsigned int page_increm;
95417
95418 /* first iteration or cross vma bound */
95419 if (!vma || start >= vma->vm_end) {
95420- vma = find_extend_vma(mm, start);
95421+ vma = find_vma(mm, start);
95422 if (!vma && in_gate_area(mm, start)) {
95423 int ret;
95424 ret = get_gate_page(mm, start & PAGE_MASK,
95425@@ -463,7 +458,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95426 goto next_page;
95427 }
95428
95429- if (!vma || check_vma_flags(vma, gup_flags))
95430+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
95431 return i ? : -EFAULT;
95432 if (is_vm_hugetlb_page(vma)) {
95433 i = follow_hugetlb_page(mm, vma, pages, vmas,
95434@@ -518,7 +513,7 @@ next_page:
95435 i += page_increm;
95436 start += page_increm * PAGE_SIZE;
95437 nr_pages -= page_increm;
95438- } while (nr_pages);
95439+ }
95440 return i;
95441 }
95442 EXPORT_SYMBOL(__get_user_pages);
95443diff --git a/mm/highmem.c b/mm/highmem.c
95444index 123bcd3..0de52ba 100644
95445--- a/mm/highmem.c
95446+++ b/mm/highmem.c
95447@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
95448 * So no dangers, even with speculative execution.
95449 */
95450 page = pte_page(pkmap_page_table[i]);
95451+ pax_open_kernel();
95452 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
95453-
95454+ pax_close_kernel();
95455 set_page_address(page, NULL);
95456 need_flush = 1;
95457 }
95458@@ -259,9 +260,11 @@ start:
95459 }
95460 }
95461 vaddr = PKMAP_ADDR(last_pkmap_nr);
95462+
95463+ pax_open_kernel();
95464 set_pte_at(&init_mm, vaddr,
95465 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95466-
95467+ pax_close_kernel();
95468 pkmap_count[last_pkmap_nr] = 1;
95469 set_page_address(page, (void *)vaddr);
95470
95471diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95472index 267e419..394bed9 100644
95473--- a/mm/hugetlb.c
95474+++ b/mm/hugetlb.c
95475@@ -2258,6 +2258,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95476 struct ctl_table *table, int write,
95477 void __user *buffer, size_t *length, loff_t *ppos)
95478 {
95479+ ctl_table_no_const t;
95480 struct hstate *h = &default_hstate;
95481 unsigned long tmp = h->max_huge_pages;
95482 int ret;
95483@@ -2265,9 +2266,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95484 if (!hugepages_supported())
95485 return -ENOTSUPP;
95486
95487- table->data = &tmp;
95488- table->maxlen = sizeof(unsigned long);
95489- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95490+ t = *table;
95491+ t.data = &tmp;
95492+ t.maxlen = sizeof(unsigned long);
95493+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
95494 if (ret)
95495 goto out;
95496
95497@@ -2302,6 +2304,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95498 struct hstate *h = &default_hstate;
95499 unsigned long tmp;
95500 int ret;
95501+ ctl_table_no_const hugetlb_table;
95502
95503 if (!hugepages_supported())
95504 return -ENOTSUPP;
95505@@ -2311,9 +2314,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95506 if (write && hstate_is_gigantic(h))
95507 return -EINVAL;
95508
95509- table->data = &tmp;
95510- table->maxlen = sizeof(unsigned long);
95511- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95512+ hugetlb_table = *table;
95513+ hugetlb_table.data = &tmp;
95514+ hugetlb_table.maxlen = sizeof(unsigned long);
95515+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
95516 if (ret)
95517 goto out;
95518
95519@@ -2798,6 +2802,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95520 i_mmap_unlock_write(mapping);
95521 }
95522
95523+#ifdef CONFIG_PAX_SEGMEXEC
95524+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95525+{
95526+ struct mm_struct *mm = vma->vm_mm;
95527+ struct vm_area_struct *vma_m;
95528+ unsigned long address_m;
95529+ pte_t *ptep_m;
95530+
95531+ vma_m = pax_find_mirror_vma(vma);
95532+ if (!vma_m)
95533+ return;
95534+
95535+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95536+ address_m = address + SEGMEXEC_TASK_SIZE;
95537+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95538+ get_page(page_m);
95539+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
95540+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95541+}
95542+#endif
95543+
95544 /*
95545 * Hugetlb_cow() should be called with page lock of the original hugepage held.
95546 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
95547@@ -2910,6 +2935,11 @@ retry_avoidcopy:
95548 make_huge_pte(vma, new_page, 1));
95549 page_remove_rmap(old_page);
95550 hugepage_add_new_anon_rmap(new_page, vma, address);
95551+
95552+#ifdef CONFIG_PAX_SEGMEXEC
95553+ pax_mirror_huge_pte(vma, address, new_page);
95554+#endif
95555+
95556 /* Make the old page be freed below */
95557 new_page = old_page;
95558 }
95559@@ -3070,6 +3100,10 @@ retry:
95560 && (vma->vm_flags & VM_SHARED)));
95561 set_huge_pte_at(mm, address, ptep, new_pte);
95562
95563+#ifdef CONFIG_PAX_SEGMEXEC
95564+ pax_mirror_huge_pte(vma, address, page);
95565+#endif
95566+
95567 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95568 /* Optimization, do the COW without a second fault */
95569 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
95570@@ -3137,6 +3171,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95571 struct address_space *mapping;
95572 int need_wait_lock = 0;
95573
95574+#ifdef CONFIG_PAX_SEGMEXEC
95575+ struct vm_area_struct *vma_m;
95576+#endif
95577+
95578 address &= huge_page_mask(h);
95579
95580 ptep = huge_pte_offset(mm, address);
95581@@ -3150,6 +3188,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95582 VM_FAULT_SET_HINDEX(hstate_index(h));
95583 }
95584
95585+#ifdef CONFIG_PAX_SEGMEXEC
95586+ vma_m = pax_find_mirror_vma(vma);
95587+ if (vma_m) {
95588+ unsigned long address_m;
95589+
95590+ if (vma->vm_start > vma_m->vm_start) {
95591+ address_m = address;
95592+ address -= SEGMEXEC_TASK_SIZE;
95593+ vma = vma_m;
95594+ h = hstate_vma(vma);
95595+ } else
95596+ address_m = address + SEGMEXEC_TASK_SIZE;
95597+
95598+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
95599+ return VM_FAULT_OOM;
95600+ address_m &= HPAGE_MASK;
95601+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
95602+ }
95603+#endif
95604+
95605 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
95606 if (!ptep)
95607 return VM_FAULT_OOM;
95608diff --git a/mm/internal.h b/mm/internal.h
95609index efad241..57ae4ca 100644
95610--- a/mm/internal.h
95611+++ b/mm/internal.h
95612@@ -134,6 +134,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
95613
95614 extern int __isolate_free_page(struct page *page, unsigned int order);
95615 extern void __free_pages_bootmem(struct page *page, unsigned int order);
95616+extern void free_compound_page(struct page *page);
95617 extern void prep_compound_page(struct page *page, unsigned long order);
95618 #ifdef CONFIG_MEMORY_FAILURE
95619 extern bool is_free_buddy_page(struct page *page);
95620@@ -387,7 +388,7 @@ extern u32 hwpoison_filter_enable;
95621
95622 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
95623 unsigned long, unsigned long,
95624- unsigned long, unsigned long);
95625+ unsigned long, unsigned long) __intentional_overflow(-1);
95626
95627 extern void set_pageblock_order(void);
95628 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
95629diff --git a/mm/kmemleak.c b/mm/kmemleak.c
95630index 3cda50c..032ba634 100644
95631--- a/mm/kmemleak.c
95632+++ b/mm/kmemleak.c
95633@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
95634
95635 for (i = 0; i < object->trace_len; i++) {
95636 void *ptr = (void *)object->trace[i];
95637- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
95638+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
95639 }
95640 }
95641
95642@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
95643 return -ENOMEM;
95644 }
95645
95646- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
95647+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
95648 &kmemleak_fops);
95649 if (!dentry)
95650 pr_warning("Failed to create the debugfs kmemleak file\n");
95651diff --git a/mm/maccess.c b/mm/maccess.c
95652index d53adf9..03a24bf 100644
95653--- a/mm/maccess.c
95654+++ b/mm/maccess.c
95655@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
95656 set_fs(KERNEL_DS);
95657 pagefault_disable();
95658 ret = __copy_from_user_inatomic(dst,
95659- (__force const void __user *)src, size);
95660+ (const void __force_user *)src, size);
95661 pagefault_enable();
95662 set_fs(old_fs);
95663
95664@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
95665
95666 set_fs(KERNEL_DS);
95667 pagefault_disable();
95668- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
95669+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
95670 pagefault_enable();
95671 set_fs(old_fs);
95672
95673diff --git a/mm/madvise.c b/mm/madvise.c
95674index a271adc..831d82f 100644
95675--- a/mm/madvise.c
95676+++ b/mm/madvise.c
95677@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
95678 pgoff_t pgoff;
95679 unsigned long new_flags = vma->vm_flags;
95680
95681+#ifdef CONFIG_PAX_SEGMEXEC
95682+ struct vm_area_struct *vma_m;
95683+#endif
95684+
95685 switch (behavior) {
95686 case MADV_NORMAL:
95687 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
95688@@ -126,6 +130,13 @@ success:
95689 /*
95690 * vm_flags is protected by the mmap_sem held in write mode.
95691 */
95692+
95693+#ifdef CONFIG_PAX_SEGMEXEC
95694+ vma_m = pax_find_mirror_vma(vma);
95695+ if (vma_m)
95696+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
95697+#endif
95698+
95699 vma->vm_flags = new_flags;
95700
95701 out:
95702@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95703 struct vm_area_struct **prev,
95704 unsigned long start, unsigned long end)
95705 {
95706+
95707+#ifdef CONFIG_PAX_SEGMEXEC
95708+ struct vm_area_struct *vma_m;
95709+#endif
95710+
95711 *prev = vma;
95712 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
95713 return -EINVAL;
95714@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95715 zap_page_range(vma, start, end - start, &details);
95716 } else
95717 zap_page_range(vma, start, end - start, NULL);
95718+
95719+#ifdef CONFIG_PAX_SEGMEXEC
95720+ vma_m = pax_find_mirror_vma(vma);
95721+ if (vma_m) {
95722+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
95723+ struct zap_details details = {
95724+ .nonlinear_vma = vma_m,
95725+ .last_index = ULONG_MAX,
95726+ };
95727+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
95728+ } else
95729+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
95730+ }
95731+#endif
95732+
95733 return 0;
95734 }
95735
95736@@ -488,6 +519,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
95737 if (end < start)
95738 return error;
95739
95740+#ifdef CONFIG_PAX_SEGMEXEC
95741+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
95742+ if (end > SEGMEXEC_TASK_SIZE)
95743+ return error;
95744+ } else
95745+#endif
95746+
95747+ if (end > TASK_SIZE)
95748+ return error;
95749+
95750 error = 0;
95751 if (end == start)
95752 return error;
95753diff --git a/mm/memory-failure.c b/mm/memory-failure.c
95754index 20c29dd..22bd8e2 100644
95755--- a/mm/memory-failure.c
95756+++ b/mm/memory-failure.c
95757@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
95758
95759 int sysctl_memory_failure_recovery __read_mostly = 1;
95760
95761-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95762+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95763
95764 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
95765
95766@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
95767 pfn, t->comm, t->pid);
95768 si.si_signo = SIGBUS;
95769 si.si_errno = 0;
95770- si.si_addr = (void *)addr;
95771+ si.si_addr = (void __user *)addr;
95772 #ifdef __ARCH_SI_TRAPNO
95773 si.si_trapno = trapno;
95774 #endif
95775@@ -786,7 +786,7 @@ static struct page_state {
95776 unsigned long res;
95777 char *msg;
95778 int (*action)(struct page *p, unsigned long pfn);
95779-} error_states[] = {
95780+} __do_const error_states[] = {
95781 { reserved, reserved, "reserved kernel", me_kernel },
95782 /*
95783 * free pages are specially detected outside this table:
95784@@ -1094,7 +1094,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95785 nr_pages = 1 << compound_order(hpage);
95786 else /* normal page or thp */
95787 nr_pages = 1;
95788- atomic_long_add(nr_pages, &num_poisoned_pages);
95789+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
95790
95791 /*
95792 * We need/can do nothing about count=0 pages.
95793@@ -1123,7 +1123,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95794 if (PageHWPoison(hpage)) {
95795 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
95796 || (p != hpage && TestSetPageHWPoison(hpage))) {
95797- atomic_long_sub(nr_pages, &num_poisoned_pages);
95798+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95799 unlock_page(hpage);
95800 return 0;
95801 }
95802@@ -1191,14 +1191,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95803 */
95804 if (!PageHWPoison(p)) {
95805 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
95806- atomic_long_sub(nr_pages, &num_poisoned_pages);
95807+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95808 put_page(hpage);
95809 res = 0;
95810 goto out;
95811 }
95812 if (hwpoison_filter(p)) {
95813 if (TestClearPageHWPoison(p))
95814- atomic_long_sub(nr_pages, &num_poisoned_pages);
95815+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95816 unlock_page(hpage);
95817 put_page(hpage);
95818 return 0;
95819@@ -1428,7 +1428,7 @@ int unpoison_memory(unsigned long pfn)
95820 return 0;
95821 }
95822 if (TestClearPageHWPoison(p))
95823- atomic_long_dec(&num_poisoned_pages);
95824+ atomic_long_dec_unchecked(&num_poisoned_pages);
95825 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
95826 return 0;
95827 }
95828@@ -1442,7 +1442,7 @@ int unpoison_memory(unsigned long pfn)
95829 */
95830 if (TestClearPageHWPoison(page)) {
95831 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
95832- atomic_long_sub(nr_pages, &num_poisoned_pages);
95833+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95834 freeit = 1;
95835 if (PageHuge(page))
95836 clear_page_hwpoison_huge_page(page);
95837@@ -1567,11 +1567,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
95838 if (PageHuge(page)) {
95839 set_page_hwpoison_huge_page(hpage);
95840 dequeue_hwpoisoned_huge_page(hpage);
95841- atomic_long_add(1 << compound_order(hpage),
95842+ atomic_long_add_unchecked(1 << compound_order(hpage),
95843 &num_poisoned_pages);
95844 } else {
95845 SetPageHWPoison(page);
95846- atomic_long_inc(&num_poisoned_pages);
95847+ atomic_long_inc_unchecked(&num_poisoned_pages);
95848 }
95849 }
95850 return ret;
95851@@ -1610,7 +1610,7 @@ static int __soft_offline_page(struct page *page, int flags)
95852 put_page(page);
95853 pr_info("soft_offline: %#lx: invalidated\n", pfn);
95854 SetPageHWPoison(page);
95855- atomic_long_inc(&num_poisoned_pages);
95856+ atomic_long_inc_unchecked(&num_poisoned_pages);
95857 return 0;
95858 }
95859
95860@@ -1659,7 +1659,7 @@ static int __soft_offline_page(struct page *page, int flags)
95861 if (!is_free_buddy_page(page))
95862 pr_info("soft offline: %#lx: page leaked\n",
95863 pfn);
95864- atomic_long_inc(&num_poisoned_pages);
95865+ atomic_long_inc_unchecked(&num_poisoned_pages);
95866 }
95867 } else {
95868 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
95869@@ -1729,11 +1729,11 @@ int soft_offline_page(struct page *page, int flags)
95870 if (PageHuge(page)) {
95871 set_page_hwpoison_huge_page(hpage);
95872 dequeue_hwpoisoned_huge_page(hpage);
95873- atomic_long_add(1 << compound_order(hpage),
95874+ atomic_long_add_unchecked(1 << compound_order(hpage),
95875 &num_poisoned_pages);
95876 } else {
95877 SetPageHWPoison(page);
95878- atomic_long_inc(&num_poisoned_pages);
95879+ atomic_long_inc_unchecked(&num_poisoned_pages);
95880 }
95881 }
95882 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
95883diff --git a/mm/memory.c b/mm/memory.c
95884index 6aa7822..3c76005 100644
95885--- a/mm/memory.c
95886+++ b/mm/memory.c
95887@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
95888 free_pte_range(tlb, pmd, addr);
95889 } while (pmd++, addr = next, addr != end);
95890
95891+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
95892 start &= PUD_MASK;
95893 if (start < floor)
95894 return;
95895@@ -428,6 +429,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
95896 pmd = pmd_offset(pud, start);
95897 pud_clear(pud);
95898 pmd_free_tlb(tlb, pmd, start);
95899+#endif
95900+
95901 }
95902
95903 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95904@@ -447,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95905 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
95906 } while (pud++, addr = next, addr != end);
95907
95908+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
95909 start &= PGDIR_MASK;
95910 if (start < floor)
95911 return;
95912@@ -461,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95913 pud = pud_offset(pgd, start);
95914 pgd_clear(pgd);
95915 pud_free_tlb(tlb, pud, start);
95916+#endif
95917+
95918 }
95919
95920 /*
95921@@ -690,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
95922 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
95923 */
95924 if (vma->vm_ops)
95925- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
95926+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
95927 vma->vm_ops->fault);
95928 if (vma->vm_file)
95929- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
95930+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
95931 vma->vm_file->f_op->mmap);
95932 dump_stack();
95933 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
95934@@ -1488,6 +1494,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
95935 page_add_file_rmap(page);
95936 set_pte_at(mm, addr, pte, mk_pte(page, prot));
95937
95938+#ifdef CONFIG_PAX_SEGMEXEC
95939+ pax_mirror_file_pte(vma, addr, page, ptl);
95940+#endif
95941+
95942 retval = 0;
95943 pte_unmap_unlock(pte, ptl);
95944 return retval;
95945@@ -1532,9 +1542,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
95946 if (!page_count(page))
95947 return -EINVAL;
95948 if (!(vma->vm_flags & VM_MIXEDMAP)) {
95949+
95950+#ifdef CONFIG_PAX_SEGMEXEC
95951+ struct vm_area_struct *vma_m;
95952+#endif
95953+
95954 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
95955 BUG_ON(vma->vm_flags & VM_PFNMAP);
95956 vma->vm_flags |= VM_MIXEDMAP;
95957+
95958+#ifdef CONFIG_PAX_SEGMEXEC
95959+ vma_m = pax_find_mirror_vma(vma);
95960+ if (vma_m)
95961+ vma_m->vm_flags |= VM_MIXEDMAP;
95962+#endif
95963+
95964 }
95965 return insert_page(vma, addr, page, vma->vm_page_prot);
95966 }
95967@@ -1617,6 +1639,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
95968 unsigned long pfn)
95969 {
95970 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
95971+ BUG_ON(vma->vm_mirror);
95972
95973 if (addr < vma->vm_start || addr >= vma->vm_end)
95974 return -EFAULT;
95975@@ -1864,7 +1887,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
95976
95977 BUG_ON(pud_huge(*pud));
95978
95979- pmd = pmd_alloc(mm, pud, addr);
95980+ pmd = (mm == &init_mm) ?
95981+ pmd_alloc_kernel(mm, pud, addr) :
95982+ pmd_alloc(mm, pud, addr);
95983 if (!pmd)
95984 return -ENOMEM;
95985 do {
95986@@ -1884,7 +1909,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
95987 unsigned long next;
95988 int err;
95989
95990- pud = pud_alloc(mm, pgd, addr);
95991+ pud = (mm == &init_mm) ?
95992+ pud_alloc_kernel(mm, pgd, addr) :
95993+ pud_alloc(mm, pgd, addr);
95994 if (!pud)
95995 return -ENOMEM;
95996 do {
95997@@ -2006,6 +2033,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
95998 return ret;
95999 }
96000
96001+#ifdef CONFIG_PAX_SEGMEXEC
96002+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96003+{
96004+ struct mm_struct *mm = vma->vm_mm;
96005+ spinlock_t *ptl;
96006+ pte_t *pte, entry;
96007+
96008+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96009+ entry = *pte;
96010+ if (!pte_present(entry)) {
96011+ if (!pte_none(entry)) {
96012+ BUG_ON(pte_file(entry));
96013+ free_swap_and_cache(pte_to_swp_entry(entry));
96014+ pte_clear_not_present_full(mm, address, pte, 0);
96015+ }
96016+ } else {
96017+ struct page *page;
96018+
96019+ flush_cache_page(vma, address, pte_pfn(entry));
96020+ entry = ptep_clear_flush(vma, address, pte);
96021+ BUG_ON(pte_dirty(entry));
96022+ page = vm_normal_page(vma, address, entry);
96023+ if (page) {
96024+ update_hiwater_rss(mm);
96025+ if (PageAnon(page))
96026+ dec_mm_counter_fast(mm, MM_ANONPAGES);
96027+ else
96028+ dec_mm_counter_fast(mm, MM_FILEPAGES);
96029+ page_remove_rmap(page);
96030+ page_cache_release(page);
96031+ }
96032+ }
96033+ pte_unmap_unlock(pte, ptl);
96034+}
96035+
96036+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96037+ *
96038+ * the ptl of the lower mapped page is held on entry and is not released on exit
96039+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96040+ */
96041+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96042+{
96043+ struct mm_struct *mm = vma->vm_mm;
96044+ unsigned long address_m;
96045+ spinlock_t *ptl_m;
96046+ struct vm_area_struct *vma_m;
96047+ pmd_t *pmd_m;
96048+ pte_t *pte_m, entry_m;
96049+
96050+ BUG_ON(!page_m || !PageAnon(page_m));
96051+
96052+ vma_m = pax_find_mirror_vma(vma);
96053+ if (!vma_m)
96054+ return;
96055+
96056+ BUG_ON(!PageLocked(page_m));
96057+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96058+ address_m = address + SEGMEXEC_TASK_SIZE;
96059+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96060+ pte_m = pte_offset_map(pmd_m, address_m);
96061+ ptl_m = pte_lockptr(mm, pmd_m);
96062+ if (ptl != ptl_m) {
96063+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96064+ if (!pte_none(*pte_m))
96065+ goto out;
96066+ }
96067+
96068+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96069+ page_cache_get(page_m);
96070+ page_add_anon_rmap(page_m, vma_m, address_m);
96071+ inc_mm_counter_fast(mm, MM_ANONPAGES);
96072+ set_pte_at(mm, address_m, pte_m, entry_m);
96073+ update_mmu_cache(vma_m, address_m, pte_m);
96074+out:
96075+ if (ptl != ptl_m)
96076+ spin_unlock(ptl_m);
96077+ pte_unmap(pte_m);
96078+ unlock_page(page_m);
96079+}
96080+
96081+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96082+{
96083+ struct mm_struct *mm = vma->vm_mm;
96084+ unsigned long address_m;
96085+ spinlock_t *ptl_m;
96086+ struct vm_area_struct *vma_m;
96087+ pmd_t *pmd_m;
96088+ pte_t *pte_m, entry_m;
96089+
96090+ BUG_ON(!page_m || PageAnon(page_m));
96091+
96092+ vma_m = pax_find_mirror_vma(vma);
96093+ if (!vma_m)
96094+ return;
96095+
96096+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96097+ address_m = address + SEGMEXEC_TASK_SIZE;
96098+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96099+ pte_m = pte_offset_map(pmd_m, address_m);
96100+ ptl_m = pte_lockptr(mm, pmd_m);
96101+ if (ptl != ptl_m) {
96102+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96103+ if (!pte_none(*pte_m))
96104+ goto out;
96105+ }
96106+
96107+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96108+ page_cache_get(page_m);
96109+ page_add_file_rmap(page_m);
96110+ inc_mm_counter_fast(mm, MM_FILEPAGES);
96111+ set_pte_at(mm, address_m, pte_m, entry_m);
96112+ update_mmu_cache(vma_m, address_m, pte_m);
96113+out:
96114+ if (ptl != ptl_m)
96115+ spin_unlock(ptl_m);
96116+ pte_unmap(pte_m);
96117+}
96118+
96119+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96120+{
96121+ struct mm_struct *mm = vma->vm_mm;
96122+ unsigned long address_m;
96123+ spinlock_t *ptl_m;
96124+ struct vm_area_struct *vma_m;
96125+ pmd_t *pmd_m;
96126+ pte_t *pte_m, entry_m;
96127+
96128+ vma_m = pax_find_mirror_vma(vma);
96129+ if (!vma_m)
96130+ return;
96131+
96132+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96133+ address_m = address + SEGMEXEC_TASK_SIZE;
96134+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96135+ pte_m = pte_offset_map(pmd_m, address_m);
96136+ ptl_m = pte_lockptr(mm, pmd_m);
96137+ if (ptl != ptl_m) {
96138+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96139+ if (!pte_none(*pte_m))
96140+ goto out;
96141+ }
96142+
96143+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96144+ set_pte_at(mm, address_m, pte_m, entry_m);
96145+out:
96146+ if (ptl != ptl_m)
96147+ spin_unlock(ptl_m);
96148+ pte_unmap(pte_m);
96149+}
96150+
96151+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96152+{
96153+ struct page *page_m;
96154+ pte_t entry;
96155+
96156+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96157+ goto out;
96158+
96159+ entry = *pte;
96160+ page_m = vm_normal_page(vma, address, entry);
96161+ if (!page_m)
96162+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96163+ else if (PageAnon(page_m)) {
96164+ if (pax_find_mirror_vma(vma)) {
96165+ pte_unmap_unlock(pte, ptl);
96166+ lock_page(page_m);
96167+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96168+ if (pte_same(entry, *pte))
96169+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96170+ else
96171+ unlock_page(page_m);
96172+ }
96173+ } else
96174+ pax_mirror_file_pte(vma, address, page_m, ptl);
96175+
96176+out:
96177+ pte_unmap_unlock(pte, ptl);
96178+}
96179+#endif
96180+
96181 /*
96182 * This routine handles present pages, when users try to write
96183 * to a shared page. It is done by copying the page to a new address
96184@@ -2212,6 +2419,12 @@ gotten:
96185 */
96186 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96187 if (likely(pte_same(*page_table, orig_pte))) {
96188+
96189+#ifdef CONFIG_PAX_SEGMEXEC
96190+ if (pax_find_mirror_vma(vma))
96191+ BUG_ON(!trylock_page(new_page));
96192+#endif
96193+
96194 if (old_page) {
96195 if (!PageAnon(old_page)) {
96196 dec_mm_counter_fast(mm, MM_FILEPAGES);
96197@@ -2265,6 +2478,10 @@ gotten:
96198 page_remove_rmap(old_page);
96199 }
96200
96201+#ifdef CONFIG_PAX_SEGMEXEC
96202+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96203+#endif
96204+
96205 /* Free the old page.. */
96206 new_page = old_page;
96207 ret |= VM_FAULT_WRITE;
96208@@ -2539,6 +2756,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96209 swap_free(entry);
96210 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
96211 try_to_free_swap(page);
96212+
96213+#ifdef CONFIG_PAX_SEGMEXEC
96214+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
96215+#endif
96216+
96217 unlock_page(page);
96218 if (page != swapcache) {
96219 /*
96220@@ -2562,6 +2784,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96221
96222 /* No need to invalidate - it was non-present before */
96223 update_mmu_cache(vma, address, page_table);
96224+
96225+#ifdef CONFIG_PAX_SEGMEXEC
96226+ pax_mirror_anon_pte(vma, address, page, ptl);
96227+#endif
96228+
96229 unlock:
96230 pte_unmap_unlock(page_table, ptl);
96231 out:
96232@@ -2581,40 +2808,6 @@ out_release:
96233 }
96234
96235 /*
96236- * This is like a special single-page "expand_{down|up}wards()",
96237- * except we must first make sure that 'address{-|+}PAGE_SIZE'
96238- * doesn't hit another vma.
96239- */
96240-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
96241-{
96242- address &= PAGE_MASK;
96243- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
96244- struct vm_area_struct *prev = vma->vm_prev;
96245-
96246- /*
96247- * Is there a mapping abutting this one below?
96248- *
96249- * That's only ok if it's the same stack mapping
96250- * that has gotten split..
96251- */
96252- if (prev && prev->vm_end == address)
96253- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
96254-
96255- return expand_downwards(vma, address - PAGE_SIZE);
96256- }
96257- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
96258- struct vm_area_struct *next = vma->vm_next;
96259-
96260- /* As VM_GROWSDOWN but s/below/above/ */
96261- if (next && next->vm_start == address + PAGE_SIZE)
96262- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
96263-
96264- return expand_upwards(vma, address + PAGE_SIZE);
96265- }
96266- return 0;
96267-}
96268-
96269-/*
96270 * We enter with non-exclusive mmap_sem (to exclude vma changes,
96271 * but allow concurrent faults), and pte mapped but not yet locked.
96272 * We return with mmap_sem still held, but pte unmapped and unlocked.
96273@@ -2624,27 +2817,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96274 unsigned int flags)
96275 {
96276 struct mem_cgroup *memcg;
96277- struct page *page;
96278+ struct page *page = NULL;
96279 spinlock_t *ptl;
96280 pte_t entry;
96281
96282- pte_unmap(page_table);
96283-
96284- /* Check if we need to add a guard page to the stack */
96285- if (check_stack_guard_page(vma, address) < 0)
96286- return VM_FAULT_SIGSEGV;
96287-
96288- /* Use the zero-page for reads */
96289 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
96290 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
96291 vma->vm_page_prot));
96292- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96293+ ptl = pte_lockptr(mm, pmd);
96294+ spin_lock(ptl);
96295 if (!pte_none(*page_table))
96296 goto unlock;
96297 goto setpte;
96298 }
96299
96300 /* Allocate our own private page. */
96301+ pte_unmap(page_table);
96302+
96303 if (unlikely(anon_vma_prepare(vma)))
96304 goto oom;
96305 page = alloc_zeroed_user_highpage_movable(vma, address);
96306@@ -2668,6 +2857,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96307 if (!pte_none(*page_table))
96308 goto release;
96309
96310+#ifdef CONFIG_PAX_SEGMEXEC
96311+ if (pax_find_mirror_vma(vma))
96312+ BUG_ON(!trylock_page(page));
96313+#endif
96314+
96315 inc_mm_counter_fast(mm, MM_ANONPAGES);
96316 page_add_new_anon_rmap(page, vma, address);
96317 mem_cgroup_commit_charge(page, memcg, false);
96318@@ -2677,6 +2871,12 @@ setpte:
96319
96320 /* No need to invalidate - it was non-present before */
96321 update_mmu_cache(vma, address, page_table);
96322+
96323+#ifdef CONFIG_PAX_SEGMEXEC
96324+ if (page)
96325+ pax_mirror_anon_pte(vma, address, page, ptl);
96326+#endif
96327+
96328 unlock:
96329 pte_unmap_unlock(page_table, ptl);
96330 return 0;
96331@@ -2907,6 +3107,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96332 return ret;
96333 }
96334 do_set_pte(vma, address, fault_page, pte, false, false);
96335+
96336+#ifdef CONFIG_PAX_SEGMEXEC
96337+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96338+#endif
96339+
96340 unlock_page(fault_page);
96341 unlock_out:
96342 pte_unmap_unlock(pte, ptl);
96343@@ -2949,7 +3154,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96344 page_cache_release(fault_page);
96345 goto uncharge_out;
96346 }
96347+
96348+#ifdef CONFIG_PAX_SEGMEXEC
96349+ if (pax_find_mirror_vma(vma))
96350+ BUG_ON(!trylock_page(new_page));
96351+#endif
96352+
96353 do_set_pte(vma, address, new_page, pte, true, true);
96354+
96355+#ifdef CONFIG_PAX_SEGMEXEC
96356+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96357+#endif
96358+
96359 mem_cgroup_commit_charge(new_page, memcg, false);
96360 lru_cache_add_active_or_unevictable(new_page, vma);
96361 pte_unmap_unlock(pte, ptl);
96362@@ -2999,6 +3215,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96363 return ret;
96364 }
96365 do_set_pte(vma, address, fault_page, pte, true, false);
96366+
96367+#ifdef CONFIG_PAX_SEGMEXEC
96368+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96369+#endif
96370+
96371 pte_unmap_unlock(pte, ptl);
96372
96373 if (set_page_dirty(fault_page))
96374@@ -3255,6 +3476,12 @@ static int handle_pte_fault(struct mm_struct *mm,
96375 if (flags & FAULT_FLAG_WRITE)
96376 flush_tlb_fix_spurious_fault(vma, address);
96377 }
96378+
96379+#ifdef CONFIG_PAX_SEGMEXEC
96380+ pax_mirror_pte(vma, address, pte, pmd, ptl);
96381+ return 0;
96382+#endif
96383+
96384 unlock:
96385 pte_unmap_unlock(pte, ptl);
96386 return 0;
96387@@ -3274,9 +3501,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96388 pmd_t *pmd;
96389 pte_t *pte;
96390
96391+#ifdef CONFIG_PAX_SEGMEXEC
96392+ struct vm_area_struct *vma_m;
96393+#endif
96394+
96395 if (unlikely(is_vm_hugetlb_page(vma)))
96396 return hugetlb_fault(mm, vma, address, flags);
96397
96398+#ifdef CONFIG_PAX_SEGMEXEC
96399+ vma_m = pax_find_mirror_vma(vma);
96400+ if (vma_m) {
96401+ unsigned long address_m;
96402+ pgd_t *pgd_m;
96403+ pud_t *pud_m;
96404+ pmd_t *pmd_m;
96405+
96406+ if (vma->vm_start > vma_m->vm_start) {
96407+ address_m = address;
96408+ address -= SEGMEXEC_TASK_SIZE;
96409+ vma = vma_m;
96410+ } else
96411+ address_m = address + SEGMEXEC_TASK_SIZE;
96412+
96413+ pgd_m = pgd_offset(mm, address_m);
96414+ pud_m = pud_alloc(mm, pgd_m, address_m);
96415+ if (!pud_m)
96416+ return VM_FAULT_OOM;
96417+ pmd_m = pmd_alloc(mm, pud_m, address_m);
96418+ if (!pmd_m)
96419+ return VM_FAULT_OOM;
96420+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
96421+ return VM_FAULT_OOM;
96422+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
96423+ }
96424+#endif
96425+
96426 pgd = pgd_offset(mm, address);
96427 pud = pud_alloc(mm, pgd, address);
96428 if (!pud)
96429@@ -3411,6 +3670,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96430 spin_unlock(&mm->page_table_lock);
96431 return 0;
96432 }
96433+
96434+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96435+{
96436+ pud_t *new = pud_alloc_one(mm, address);
96437+ if (!new)
96438+ return -ENOMEM;
96439+
96440+ smp_wmb(); /* See comment in __pte_alloc */
96441+
96442+ spin_lock(&mm->page_table_lock);
96443+ if (pgd_present(*pgd)) /* Another has populated it */
96444+ pud_free(mm, new);
96445+ else
96446+ pgd_populate_kernel(mm, pgd, new);
96447+ spin_unlock(&mm->page_table_lock);
96448+ return 0;
96449+}
96450 #endif /* __PAGETABLE_PUD_FOLDED */
96451
96452 #ifndef __PAGETABLE_PMD_FOLDED
96453@@ -3441,6 +3717,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
96454 spin_unlock(&mm->page_table_lock);
96455 return 0;
96456 }
96457+
96458+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
96459+{
96460+ pmd_t *new = pmd_alloc_one(mm, address);
96461+ if (!new)
96462+ return -ENOMEM;
96463+
96464+ smp_wmb(); /* See comment in __pte_alloc */
96465+
96466+ spin_lock(&mm->page_table_lock);
96467+#ifndef __ARCH_HAS_4LEVEL_HACK
96468+ if (pud_present(*pud)) /* Another has populated it */
96469+ pmd_free(mm, new);
96470+ else
96471+ pud_populate_kernel(mm, pud, new);
96472+#else
96473+ if (pgd_present(*pud)) /* Another has populated it */
96474+ pmd_free(mm, new);
96475+ else
96476+ pgd_populate_kernel(mm, pud, new);
96477+#endif /* __ARCH_HAS_4LEVEL_HACK */
96478+ spin_unlock(&mm->page_table_lock);
96479+ return 0;
96480+}
96481 #endif /* __PAGETABLE_PMD_FOLDED */
96482
96483 static int __follow_pte(struct mm_struct *mm, unsigned long address,
96484@@ -3550,8 +3850,8 @@ out:
96485 return ret;
96486 }
96487
96488-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96489- void *buf, int len, int write)
96490+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96491+ void *buf, size_t len, int write)
96492 {
96493 resource_size_t phys_addr;
96494 unsigned long prot = 0;
96495@@ -3577,8 +3877,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
96496 * Access another process' address space as given in mm. If non-NULL, use the
96497 * given task for page fault accounting.
96498 */
96499-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96500- unsigned long addr, void *buf, int len, int write)
96501+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96502+ unsigned long addr, void *buf, size_t len, int write)
96503 {
96504 struct vm_area_struct *vma;
96505 void *old_buf = buf;
96506@@ -3586,7 +3886,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96507 down_read(&mm->mmap_sem);
96508 /* ignore errors, just check how much was successfully transferred */
96509 while (len) {
96510- int bytes, ret, offset;
96511+ ssize_t bytes, ret, offset;
96512 void *maddr;
96513 struct page *page = NULL;
96514
96515@@ -3647,8 +3947,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96516 *
96517 * The caller must hold a reference on @mm.
96518 */
96519-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96520- void *buf, int len, int write)
96521+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
96522+ void *buf, size_t len, int write)
96523 {
96524 return __access_remote_vm(NULL, mm, addr, buf, len, write);
96525 }
96526@@ -3658,11 +3958,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96527 * Source/target buffer must be kernel space,
96528 * Do not walk the page table directly, use get_user_pages
96529 */
96530-int access_process_vm(struct task_struct *tsk, unsigned long addr,
96531- void *buf, int len, int write)
96532+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
96533+ void *buf, size_t len, int write)
96534 {
96535 struct mm_struct *mm;
96536- int ret;
96537+ ssize_t ret;
96538
96539 mm = get_task_mm(tsk);
96540 if (!mm)
96541diff --git a/mm/mempolicy.c b/mm/mempolicy.c
96542index 0e0961b..c9143b9 100644
96543--- a/mm/mempolicy.c
96544+++ b/mm/mempolicy.c
96545@@ -744,6 +744,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96546 unsigned long vmstart;
96547 unsigned long vmend;
96548
96549+#ifdef CONFIG_PAX_SEGMEXEC
96550+ struct vm_area_struct *vma_m;
96551+#endif
96552+
96553 vma = find_vma(mm, start);
96554 if (!vma || vma->vm_start > start)
96555 return -EFAULT;
96556@@ -787,6 +791,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96557 err = vma_replace_policy(vma, new_pol);
96558 if (err)
96559 goto out;
96560+
96561+#ifdef CONFIG_PAX_SEGMEXEC
96562+ vma_m = pax_find_mirror_vma(vma);
96563+ if (vma_m) {
96564+ err = vma_replace_policy(vma_m, new_pol);
96565+ if (err)
96566+ goto out;
96567+ }
96568+#endif
96569+
96570 }
96571
96572 out:
96573@@ -1201,6 +1215,17 @@ static long do_mbind(unsigned long start, unsigned long len,
96574
96575 if (end < start)
96576 return -EINVAL;
96577+
96578+#ifdef CONFIG_PAX_SEGMEXEC
96579+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
96580+ if (end > SEGMEXEC_TASK_SIZE)
96581+ return -EINVAL;
96582+ } else
96583+#endif
96584+
96585+ if (end > TASK_SIZE)
96586+ return -EINVAL;
96587+
96588 if (end == start)
96589 return 0;
96590
96591@@ -1426,8 +1451,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96592 */
96593 tcred = __task_cred(task);
96594 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96595- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96596- !capable(CAP_SYS_NICE)) {
96597+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96598 rcu_read_unlock();
96599 err = -EPERM;
96600 goto out_put;
96601@@ -1458,6 +1482,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96602 goto out;
96603 }
96604
96605+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96606+ if (mm != current->mm &&
96607+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96608+ mmput(mm);
96609+ err = -EPERM;
96610+ goto out;
96611+ }
96612+#endif
96613+
96614 err = do_migrate_pages(mm, old, new,
96615 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
96616
96617diff --git a/mm/migrate.c b/mm/migrate.c
96618index 344cdf6..07399500 100644
96619--- a/mm/migrate.c
96620+++ b/mm/migrate.c
96621@@ -1503,8 +1503,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96622 */
96623 tcred = __task_cred(task);
96624 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96625- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96626- !capable(CAP_SYS_NICE)) {
96627+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96628 rcu_read_unlock();
96629 err = -EPERM;
96630 goto out;
96631diff --git a/mm/mlock.c b/mm/mlock.c
96632index 73cf098..ab547c7 100644
96633--- a/mm/mlock.c
96634+++ b/mm/mlock.c
96635@@ -14,6 +14,7 @@
96636 #include <linux/pagevec.h>
96637 #include <linux/mempolicy.h>
96638 #include <linux/syscalls.h>
96639+#include <linux/security.h>
96640 #include <linux/sched.h>
96641 #include <linux/export.h>
96642 #include <linux/rmap.h>
96643@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
96644 {
96645 unsigned long nstart, end, tmp;
96646 struct vm_area_struct * vma, * prev;
96647- int error;
96648+ int error = 0;
96649
96650 VM_BUG_ON(start & ~PAGE_MASK);
96651 VM_BUG_ON(len != PAGE_ALIGN(len));
96652@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
96653 return -EINVAL;
96654 if (end == start)
96655 return 0;
96656+ if (end > TASK_SIZE)
96657+ return -EINVAL;
96658+
96659 vma = find_vma(current->mm, start);
96660 if (!vma || vma->vm_start > start)
96661 return -ENOMEM;
96662@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
96663 for (nstart = start ; ; ) {
96664 vm_flags_t newflags;
96665
96666+#ifdef CONFIG_PAX_SEGMEXEC
96667+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96668+ break;
96669+#endif
96670+
96671 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
96672
96673 newflags = vma->vm_flags & ~VM_LOCKED;
96674@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
96675 locked += current->mm->locked_vm;
96676
96677 /* check against resource limits */
96678+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
96679 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
96680 error = do_mlock(start, len, 1);
96681
96682@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
96683 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
96684 vm_flags_t newflags;
96685
96686+#ifdef CONFIG_PAX_SEGMEXEC
96687+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96688+ break;
96689+#endif
96690+
96691 newflags = vma->vm_flags & ~VM_LOCKED;
96692 if (flags & MCL_CURRENT)
96693 newflags |= VM_LOCKED;
96694@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
96695 lock_limit >>= PAGE_SHIFT;
96696
96697 ret = -ENOMEM;
96698+
96699+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
96700+
96701 down_write(&current->mm->mmap_sem);
96702-
96703 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
96704 capable(CAP_IPC_LOCK))
96705 ret = do_mlockall(flags);
96706diff --git a/mm/mmap.c b/mm/mmap.c
96707index e5cc3ca..bb9333f 100644
96708--- a/mm/mmap.c
96709+++ b/mm/mmap.c
96710@@ -41,6 +41,7 @@
96711 #include <linux/notifier.h>
96712 #include <linux/memory.h>
96713 #include <linux/printk.h>
96714+#include <linux/random.h>
96715
96716 #include <asm/uaccess.h>
96717 #include <asm/cacheflush.h>
96718@@ -57,6 +58,16 @@
96719 #define arch_rebalance_pgtables(addr, len) (addr)
96720 #endif
96721
96722+static inline void verify_mm_writelocked(struct mm_struct *mm)
96723+{
96724+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
96725+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
96726+ up_read(&mm->mmap_sem);
96727+ BUG();
96728+ }
96729+#endif
96730+}
96731+
96732 static void unmap_region(struct mm_struct *mm,
96733 struct vm_area_struct *vma, struct vm_area_struct *prev,
96734 unsigned long start, unsigned long end);
96735@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
96736 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
96737 *
96738 */
96739-pgprot_t protection_map[16] = {
96740+pgprot_t protection_map[16] __read_only = {
96741 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
96742 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96743 };
96744
96745-pgprot_t vm_get_page_prot(unsigned long vm_flags)
96746+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
96747 {
96748- return __pgprot(pgprot_val(protection_map[vm_flags &
96749+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
96750 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
96751 pgprot_val(arch_vm_get_page_prot(vm_flags)));
96752+
96753+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96754+ if (!(__supported_pte_mask & _PAGE_NX) &&
96755+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
96756+ (vm_flags & (VM_READ | VM_WRITE)))
96757+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
96758+#endif
96759+
96760+ return prot;
96761 }
96762 EXPORT_SYMBOL(vm_get_page_prot);
96763
96764@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
96765 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
96766 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
96767 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
96768+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
96769 /*
96770 * Make sure vm_committed_as in one cacheline and not cacheline shared with
96771 * other variables. It can be updated by several CPUs frequently.
96772@@ -274,6 +295,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
96773 struct vm_area_struct *next = vma->vm_next;
96774
96775 might_sleep();
96776+ BUG_ON(vma->vm_mirror);
96777 if (vma->vm_ops && vma->vm_ops->close)
96778 vma->vm_ops->close(vma);
96779 if (vma->vm_file)
96780@@ -287,6 +309,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
96781
96782 SYSCALL_DEFINE1(brk, unsigned long, brk)
96783 {
96784+ unsigned long rlim;
96785 unsigned long retval;
96786 unsigned long newbrk, oldbrk;
96787 struct mm_struct *mm = current->mm;
96788@@ -317,7 +340,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
96789 * segment grow beyond its set limit the in case where the limit is
96790 * not page aligned -Ram Gupta
96791 */
96792- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
96793+ rlim = rlimit(RLIMIT_DATA);
96794+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96795+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
96796+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
96797+ rlim = 4096 * PAGE_SIZE;
96798+#endif
96799+ if (check_data_rlimit(rlim, brk, mm->start_brk,
96800 mm->end_data, mm->start_data))
96801 goto out;
96802
96803@@ -978,6 +1007,12 @@ static int
96804 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
96805 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96806 {
96807+
96808+#ifdef CONFIG_PAX_SEGMEXEC
96809+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
96810+ return 0;
96811+#endif
96812+
96813 if (is_mergeable_vma(vma, file, vm_flags) &&
96814 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96815 if (vma->vm_pgoff == vm_pgoff)
96816@@ -997,6 +1032,12 @@ static int
96817 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96818 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96819 {
96820+
96821+#ifdef CONFIG_PAX_SEGMEXEC
96822+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
96823+ return 0;
96824+#endif
96825+
96826 if (is_mergeable_vma(vma, file, vm_flags) &&
96827 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96828 pgoff_t vm_pglen;
96829@@ -1046,6 +1087,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96830 struct vm_area_struct *area, *next;
96831 int err;
96832
96833+#ifdef CONFIG_PAX_SEGMEXEC
96834+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
96835+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
96836+
96837+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
96838+#endif
96839+
96840 /*
96841 * We later require that vma->vm_flags == vm_flags,
96842 * so this tests vma->vm_flags & VM_SPECIAL, too.
96843@@ -1061,6 +1109,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96844 if (next && next->vm_end == end) /* cases 6, 7, 8 */
96845 next = next->vm_next;
96846
96847+#ifdef CONFIG_PAX_SEGMEXEC
96848+ if (prev)
96849+ prev_m = pax_find_mirror_vma(prev);
96850+ if (area)
96851+ area_m = pax_find_mirror_vma(area);
96852+ if (next)
96853+ next_m = pax_find_mirror_vma(next);
96854+#endif
96855+
96856 /*
96857 * Can it merge with the predecessor?
96858 */
96859@@ -1080,9 +1137,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96860 /* cases 1, 6 */
96861 err = vma_adjust(prev, prev->vm_start,
96862 next->vm_end, prev->vm_pgoff, NULL);
96863- } else /* cases 2, 5, 7 */
96864+
96865+#ifdef CONFIG_PAX_SEGMEXEC
96866+ if (!err && prev_m)
96867+ err = vma_adjust(prev_m, prev_m->vm_start,
96868+ next_m->vm_end, prev_m->vm_pgoff, NULL);
96869+#endif
96870+
96871+ } else { /* cases 2, 5, 7 */
96872 err = vma_adjust(prev, prev->vm_start,
96873 end, prev->vm_pgoff, NULL);
96874+
96875+#ifdef CONFIG_PAX_SEGMEXEC
96876+ if (!err && prev_m)
96877+ err = vma_adjust(prev_m, prev_m->vm_start,
96878+ end_m, prev_m->vm_pgoff, NULL);
96879+#endif
96880+
96881+ }
96882 if (err)
96883 return NULL;
96884 khugepaged_enter_vma_merge(prev, vm_flags);
96885@@ -1096,12 +1168,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96886 mpol_equal(policy, vma_policy(next)) &&
96887 can_vma_merge_before(next, vm_flags,
96888 anon_vma, file, pgoff+pglen)) {
96889- if (prev && addr < prev->vm_end) /* case 4 */
96890+ if (prev && addr < prev->vm_end) { /* case 4 */
96891 err = vma_adjust(prev, prev->vm_start,
96892 addr, prev->vm_pgoff, NULL);
96893- else /* cases 3, 8 */
96894+
96895+#ifdef CONFIG_PAX_SEGMEXEC
96896+ if (!err && prev_m)
96897+ err = vma_adjust(prev_m, prev_m->vm_start,
96898+ addr_m, prev_m->vm_pgoff, NULL);
96899+#endif
96900+
96901+ } else { /* cases 3, 8 */
96902 err = vma_adjust(area, addr, next->vm_end,
96903 next->vm_pgoff - pglen, NULL);
96904+
96905+#ifdef CONFIG_PAX_SEGMEXEC
96906+ if (!err && area_m)
96907+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
96908+ next_m->vm_pgoff - pglen, NULL);
96909+#endif
96910+
96911+ }
96912 if (err)
96913 return NULL;
96914 khugepaged_enter_vma_merge(area, vm_flags);
96915@@ -1210,8 +1297,10 @@ none:
96916 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
96917 struct file *file, long pages)
96918 {
96919- const unsigned long stack_flags
96920- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
96921+
96922+#ifdef CONFIG_PAX_RANDMMAP
96923+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
96924+#endif
96925
96926 mm->total_vm += pages;
96927
96928@@ -1219,7 +1308,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
96929 mm->shared_vm += pages;
96930 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
96931 mm->exec_vm += pages;
96932- } else if (flags & stack_flags)
96933+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
96934 mm->stack_vm += pages;
96935 }
96936 #endif /* CONFIG_PROC_FS */
96937@@ -1249,6 +1338,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
96938 locked += mm->locked_vm;
96939 lock_limit = rlimit(RLIMIT_MEMLOCK);
96940 lock_limit >>= PAGE_SHIFT;
96941+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
96942 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
96943 return -EAGAIN;
96944 }
96945@@ -1275,7 +1365,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96946 * (the exception is when the underlying filesystem is noexec
96947 * mounted, in which case we dont add PROT_EXEC.)
96948 */
96949- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
96950+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
96951 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
96952 prot |= PROT_EXEC;
96953
96954@@ -1301,7 +1391,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96955 /* Obtain the address to map to. we verify (or select) it and ensure
96956 * that it represents a valid section of the address space.
96957 */
96958- addr = get_unmapped_area(file, addr, len, pgoff, flags);
96959+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
96960 if (addr & ~PAGE_MASK)
96961 return addr;
96962
96963@@ -1312,6 +1402,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96964 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
96965 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
96966
96967+#ifdef CONFIG_PAX_MPROTECT
96968+ if (mm->pax_flags & MF_PAX_MPROTECT) {
96969+
96970+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
96971+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
96972+ mm->binfmt->handle_mmap)
96973+ mm->binfmt->handle_mmap(file);
96974+#endif
96975+
96976+#ifndef CONFIG_PAX_MPROTECT_COMPAT
96977+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
96978+ gr_log_rwxmmap(file);
96979+
96980+#ifdef CONFIG_PAX_EMUPLT
96981+ vm_flags &= ~VM_EXEC;
96982+#else
96983+ return -EPERM;
96984+#endif
96985+
96986+ }
96987+
96988+ if (!(vm_flags & VM_EXEC))
96989+ vm_flags &= ~VM_MAYEXEC;
96990+#else
96991+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
96992+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
96993+#endif
96994+ else
96995+ vm_flags &= ~VM_MAYWRITE;
96996+ }
96997+#endif
96998+
96999+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97000+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97001+ vm_flags &= ~VM_PAGEEXEC;
97002+#endif
97003+
97004 if (flags & MAP_LOCKED)
97005 if (!can_do_mlock())
97006 return -EPERM;
97007@@ -1399,6 +1526,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97008 vm_flags |= VM_NORESERVE;
97009 }
97010
97011+ if (!gr_acl_handle_mmap(file, prot))
97012+ return -EACCES;
97013+
97014 addr = mmap_region(file, addr, len, vm_flags, pgoff);
97015 if (!IS_ERR_VALUE(addr) &&
97016 ((vm_flags & VM_LOCKED) ||
97017@@ -1492,7 +1622,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
97018 vm_flags_t vm_flags = vma->vm_flags;
97019
97020 /* If it was private or non-writable, the write bit is already clear */
97021- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
97022+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
97023 return 0;
97024
97025 /* The backer wishes to know when pages are first written to? */
97026@@ -1543,7 +1673,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97027 struct rb_node **rb_link, *rb_parent;
97028 unsigned long charged = 0;
97029
97030+#ifdef CONFIG_PAX_SEGMEXEC
97031+ struct vm_area_struct *vma_m = NULL;
97032+#endif
97033+
97034+ /*
97035+ * mm->mmap_sem is required to protect against another thread
97036+ * changing the mappings in case we sleep.
97037+ */
97038+ verify_mm_writelocked(mm);
97039+
97040 /* Check against address space limit. */
97041+
97042+#ifdef CONFIG_PAX_RANDMMAP
97043+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97044+#endif
97045+
97046 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
97047 unsigned long nr_pages;
97048
97049@@ -1562,11 +1707,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97050
97051 /* Clear old maps */
97052 error = -ENOMEM;
97053-munmap_back:
97054 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97055 if (do_munmap(mm, addr, len))
97056 return -ENOMEM;
97057- goto munmap_back;
97058+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97059 }
97060
97061 /*
97062@@ -1597,6 +1741,16 @@ munmap_back:
97063 goto unacct_error;
97064 }
97065
97066+#ifdef CONFIG_PAX_SEGMEXEC
97067+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97068+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97069+ if (!vma_m) {
97070+ error = -ENOMEM;
97071+ goto free_vma;
97072+ }
97073+ }
97074+#endif
97075+
97076 vma->vm_mm = mm;
97077 vma->vm_start = addr;
97078 vma->vm_end = addr + len;
97079@@ -1627,6 +1781,13 @@ munmap_back:
97080 if (error)
97081 goto unmap_and_free_vma;
97082
97083+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97084+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97085+ vma->vm_flags |= VM_PAGEEXEC;
97086+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97087+ }
97088+#endif
97089+
97090 /* Can addr have changed??
97091 *
97092 * Answer: Yes, several device drivers can do it in their
97093@@ -1645,6 +1806,12 @@ munmap_back:
97094 }
97095
97096 vma_link(mm, vma, prev, rb_link, rb_parent);
97097+
97098+#ifdef CONFIG_PAX_SEGMEXEC
97099+ if (vma_m)
97100+ BUG_ON(pax_mirror_vma(vma_m, vma));
97101+#endif
97102+
97103 /* Once vma denies write, undo our temporary denial count */
97104 if (file) {
97105 if (vm_flags & VM_SHARED)
97106@@ -1657,6 +1824,7 @@ out:
97107 perf_event_mmap(vma);
97108
97109 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97110+ track_exec_limit(mm, addr, addr + len, vm_flags);
97111 if (vm_flags & VM_LOCKED) {
97112 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
97113 vma == get_gate_vma(current->mm)))
97114@@ -1694,6 +1862,12 @@ allow_write_and_free_vma:
97115 if (vm_flags & VM_DENYWRITE)
97116 allow_write_access(file);
97117 free_vma:
97118+
97119+#ifdef CONFIG_PAX_SEGMEXEC
97120+ if (vma_m)
97121+ kmem_cache_free(vm_area_cachep, vma_m);
97122+#endif
97123+
97124 kmem_cache_free(vm_area_cachep, vma);
97125 unacct_error:
97126 if (charged)
97127@@ -1701,7 +1875,63 @@ unacct_error:
97128 return error;
97129 }
97130
97131-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97132+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
97133+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
97134+{
97135+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
97136+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
97137+
97138+ return 0;
97139+}
97140+#endif
97141+
97142+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
97143+{
97144+ if (!vma) {
97145+#ifdef CONFIG_STACK_GROWSUP
97146+ if (addr > sysctl_heap_stack_gap)
97147+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97148+ else
97149+ vma = find_vma(current->mm, 0);
97150+ if (vma && (vma->vm_flags & VM_GROWSUP))
97151+ return false;
97152+#endif
97153+ return true;
97154+ }
97155+
97156+ if (addr + len > vma->vm_start)
97157+ return false;
97158+
97159+ if (vma->vm_flags & VM_GROWSDOWN)
97160+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97161+#ifdef CONFIG_STACK_GROWSUP
97162+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97163+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
97164+#endif
97165+ else if (offset)
97166+ return offset <= vma->vm_start - addr - len;
97167+
97168+ return true;
97169+}
97170+
97171+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
97172+{
97173+ if (vma->vm_start < len)
97174+ return -ENOMEM;
97175+
97176+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
97177+ if (offset <= vma->vm_start - len)
97178+ return vma->vm_start - len - offset;
97179+ else
97180+ return -ENOMEM;
97181+ }
97182+
97183+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
97184+ return vma->vm_start - len - sysctl_heap_stack_gap;
97185+ return -ENOMEM;
97186+}
97187+
97188+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
97189 {
97190 /*
97191 * We implement the search by looking for an rbtree node that
97192@@ -1749,11 +1979,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97193 }
97194 }
97195
97196- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
97197+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
97198 check_current:
97199 /* Check if current node has a suitable gap */
97200 if (gap_start > high_limit)
97201 return -ENOMEM;
97202+
97203+ if (gap_end - gap_start > info->threadstack_offset)
97204+ gap_start += info->threadstack_offset;
97205+ else
97206+ gap_start = gap_end;
97207+
97208+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97209+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97210+ gap_start += sysctl_heap_stack_gap;
97211+ else
97212+ gap_start = gap_end;
97213+ }
97214+ if (vma->vm_flags & VM_GROWSDOWN) {
97215+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97216+ gap_end -= sysctl_heap_stack_gap;
97217+ else
97218+ gap_end = gap_start;
97219+ }
97220 if (gap_end >= low_limit && gap_end - gap_start >= length)
97221 goto found;
97222
97223@@ -1803,7 +2051,7 @@ found:
97224 return gap_start;
97225 }
97226
97227-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
97228+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
97229 {
97230 struct mm_struct *mm = current->mm;
97231 struct vm_area_struct *vma;
97232@@ -1857,6 +2105,24 @@ check_current:
97233 gap_end = vma->vm_start;
97234 if (gap_end < low_limit)
97235 return -ENOMEM;
97236+
97237+ if (gap_end - gap_start > info->threadstack_offset)
97238+ gap_end -= info->threadstack_offset;
97239+ else
97240+ gap_end = gap_start;
97241+
97242+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97243+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97244+ gap_start += sysctl_heap_stack_gap;
97245+ else
97246+ gap_start = gap_end;
97247+ }
97248+ if (vma->vm_flags & VM_GROWSDOWN) {
97249+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97250+ gap_end -= sysctl_heap_stack_gap;
97251+ else
97252+ gap_end = gap_start;
97253+ }
97254 if (gap_start <= high_limit && gap_end - gap_start >= length)
97255 goto found;
97256
97257@@ -1920,6 +2186,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97258 struct mm_struct *mm = current->mm;
97259 struct vm_area_struct *vma;
97260 struct vm_unmapped_area_info info;
97261+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97262
97263 if (len > TASK_SIZE - mmap_min_addr)
97264 return -ENOMEM;
97265@@ -1927,11 +2194,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97266 if (flags & MAP_FIXED)
97267 return addr;
97268
97269+#ifdef CONFIG_PAX_RANDMMAP
97270+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97271+#endif
97272+
97273 if (addr) {
97274 addr = PAGE_ALIGN(addr);
97275 vma = find_vma(mm, addr);
97276 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97277- (!vma || addr + len <= vma->vm_start))
97278+ check_heap_stack_gap(vma, addr, len, offset))
97279 return addr;
97280 }
97281
97282@@ -1940,6 +2211,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97283 info.low_limit = mm->mmap_base;
97284 info.high_limit = TASK_SIZE;
97285 info.align_mask = 0;
97286+ info.threadstack_offset = offset;
97287 return vm_unmapped_area(&info);
97288 }
97289 #endif
97290@@ -1958,6 +2230,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97291 struct mm_struct *mm = current->mm;
97292 unsigned long addr = addr0;
97293 struct vm_unmapped_area_info info;
97294+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97295
97296 /* requested length too big for entire address space */
97297 if (len > TASK_SIZE - mmap_min_addr)
97298@@ -1966,12 +2239,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97299 if (flags & MAP_FIXED)
97300 return addr;
97301
97302+#ifdef CONFIG_PAX_RANDMMAP
97303+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97304+#endif
97305+
97306 /* requesting a specific address */
97307 if (addr) {
97308 addr = PAGE_ALIGN(addr);
97309 vma = find_vma(mm, addr);
97310 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97311- (!vma || addr + len <= vma->vm_start))
97312+ check_heap_stack_gap(vma, addr, len, offset))
97313 return addr;
97314 }
97315
97316@@ -1980,6 +2257,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97317 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
97318 info.high_limit = mm->mmap_base;
97319 info.align_mask = 0;
97320+ info.threadstack_offset = offset;
97321 addr = vm_unmapped_area(&info);
97322
97323 /*
97324@@ -1992,6 +2270,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97325 VM_BUG_ON(addr != -ENOMEM);
97326 info.flags = 0;
97327 info.low_limit = TASK_UNMAPPED_BASE;
97328+
97329+#ifdef CONFIG_PAX_RANDMMAP
97330+ if (mm->pax_flags & MF_PAX_RANDMMAP)
97331+ info.low_limit += mm->delta_mmap;
97332+#endif
97333+
97334 info.high_limit = TASK_SIZE;
97335 addr = vm_unmapped_area(&info);
97336 }
97337@@ -2092,6 +2376,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
97338 return vma;
97339 }
97340
97341+#ifdef CONFIG_PAX_SEGMEXEC
97342+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
97343+{
97344+ struct vm_area_struct *vma_m;
97345+
97346+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
97347+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
97348+ BUG_ON(vma->vm_mirror);
97349+ return NULL;
97350+ }
97351+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
97352+ vma_m = vma->vm_mirror;
97353+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
97354+ BUG_ON(vma->vm_file != vma_m->vm_file);
97355+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
97356+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
97357+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
97358+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
97359+ return vma_m;
97360+}
97361+#endif
97362+
97363 /*
97364 * Verify that the stack growth is acceptable and
97365 * update accounting. This is shared with both the
97366@@ -2109,8 +2415,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97367
97368 /* Stack limit test */
97369 actual_size = size;
97370- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
97371- actual_size -= PAGE_SIZE;
97372+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
97373 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
97374 return -ENOMEM;
97375
97376@@ -2121,6 +2426,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97377 locked = mm->locked_vm + grow;
97378 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
97379 limit >>= PAGE_SHIFT;
97380+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97381 if (locked > limit && !capable(CAP_IPC_LOCK))
97382 return -ENOMEM;
97383 }
97384@@ -2150,37 +2456,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97385 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
97386 * vma is the last one with address > vma->vm_end. Have to extend vma.
97387 */
97388+#ifndef CONFIG_IA64
97389+static
97390+#endif
97391 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97392 {
97393 int error;
97394+ bool locknext;
97395
97396 if (!(vma->vm_flags & VM_GROWSUP))
97397 return -EFAULT;
97398
97399+ /* Also guard against wrapping around to address 0. */
97400+ if (address < PAGE_ALIGN(address+1))
97401+ address = PAGE_ALIGN(address+1);
97402+ else
97403+ return -ENOMEM;
97404+
97405 /*
97406 * We must make sure the anon_vma is allocated
97407 * so that the anon_vma locking is not a noop.
97408 */
97409 if (unlikely(anon_vma_prepare(vma)))
97410 return -ENOMEM;
97411+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
97412+ if (locknext && anon_vma_prepare(vma->vm_next))
97413+ return -ENOMEM;
97414 vma_lock_anon_vma(vma);
97415+ if (locknext)
97416+ vma_lock_anon_vma(vma->vm_next);
97417
97418 /*
97419 * vma->vm_start/vm_end cannot change under us because the caller
97420 * is required to hold the mmap_sem in read mode. We need the
97421- * anon_vma lock to serialize against concurrent expand_stacks.
97422- * Also guard against wrapping around to address 0.
97423+ * anon_vma locks to serialize against concurrent expand_stacks
97424+ * and expand_upwards.
97425 */
97426- if (address < PAGE_ALIGN(address+4))
97427- address = PAGE_ALIGN(address+4);
97428- else {
97429- vma_unlock_anon_vma(vma);
97430- return -ENOMEM;
97431- }
97432 error = 0;
97433
97434 /* Somebody else might have raced and expanded it already */
97435- if (address > vma->vm_end) {
97436+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
97437+ error = -ENOMEM;
97438+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
97439 unsigned long size, grow;
97440
97441 size = address - vma->vm_start;
97442@@ -2215,6 +2532,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97443 }
97444 }
97445 }
97446+ if (locknext)
97447+ vma_unlock_anon_vma(vma->vm_next);
97448 vma_unlock_anon_vma(vma);
97449 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97450 validate_mm(vma->vm_mm);
97451@@ -2229,6 +2548,8 @@ int expand_downwards(struct vm_area_struct *vma,
97452 unsigned long address)
97453 {
97454 int error;
97455+ bool lockprev = false;
97456+ struct vm_area_struct *prev;
97457
97458 /*
97459 * We must make sure the anon_vma is allocated
97460@@ -2242,6 +2563,15 @@ int expand_downwards(struct vm_area_struct *vma,
97461 if (error)
97462 return error;
97463
97464+ prev = vma->vm_prev;
97465+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97466+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97467+#endif
97468+ if (lockprev && anon_vma_prepare(prev))
97469+ return -ENOMEM;
97470+ if (lockprev)
97471+ vma_lock_anon_vma(prev);
97472+
97473 vma_lock_anon_vma(vma);
97474
97475 /*
97476@@ -2251,9 +2581,17 @@ int expand_downwards(struct vm_area_struct *vma,
97477 */
97478
97479 /* Somebody else might have raced and expanded it already */
97480- if (address < vma->vm_start) {
97481+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97482+ error = -ENOMEM;
97483+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97484 unsigned long size, grow;
97485
97486+#ifdef CONFIG_PAX_SEGMEXEC
97487+ struct vm_area_struct *vma_m;
97488+
97489+ vma_m = pax_find_mirror_vma(vma);
97490+#endif
97491+
97492 size = vma->vm_end - address;
97493 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97494
97495@@ -2278,13 +2616,27 @@ int expand_downwards(struct vm_area_struct *vma,
97496 vma->vm_pgoff -= grow;
97497 anon_vma_interval_tree_post_update_vma(vma);
97498 vma_gap_update(vma);
97499+
97500+#ifdef CONFIG_PAX_SEGMEXEC
97501+ if (vma_m) {
97502+ anon_vma_interval_tree_pre_update_vma(vma_m);
97503+ vma_m->vm_start -= grow << PAGE_SHIFT;
97504+ vma_m->vm_pgoff -= grow;
97505+ anon_vma_interval_tree_post_update_vma(vma_m);
97506+ vma_gap_update(vma_m);
97507+ }
97508+#endif
97509+
97510 spin_unlock(&vma->vm_mm->page_table_lock);
97511
97512+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97513 perf_event_mmap(vma);
97514 }
97515 }
97516 }
97517 vma_unlock_anon_vma(vma);
97518+ if (lockprev)
97519+ vma_unlock_anon_vma(prev);
97520 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97521 validate_mm(vma->vm_mm);
97522 return error;
97523@@ -2384,6 +2736,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97524 do {
97525 long nrpages = vma_pages(vma);
97526
97527+#ifdef CONFIG_PAX_SEGMEXEC
97528+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97529+ vma = remove_vma(vma);
97530+ continue;
97531+ }
97532+#endif
97533+
97534 if (vma->vm_flags & VM_ACCOUNT)
97535 nr_accounted += nrpages;
97536 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97537@@ -2428,6 +2787,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97538 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97539 vma->vm_prev = NULL;
97540 do {
97541+
97542+#ifdef CONFIG_PAX_SEGMEXEC
97543+ if (vma->vm_mirror) {
97544+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97545+ vma->vm_mirror->vm_mirror = NULL;
97546+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
97547+ vma->vm_mirror = NULL;
97548+ }
97549+#endif
97550+
97551 vma_rb_erase(vma, &mm->mm_rb);
97552 mm->map_count--;
97553 tail_vma = vma;
97554@@ -2455,14 +2824,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97555 struct vm_area_struct *new;
97556 int err = -ENOMEM;
97557
97558+#ifdef CONFIG_PAX_SEGMEXEC
97559+ struct vm_area_struct *vma_m, *new_m = NULL;
97560+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97561+#endif
97562+
97563 if (is_vm_hugetlb_page(vma) && (addr &
97564 ~(huge_page_mask(hstate_vma(vma)))))
97565 return -EINVAL;
97566
97567+#ifdef CONFIG_PAX_SEGMEXEC
97568+ vma_m = pax_find_mirror_vma(vma);
97569+#endif
97570+
97571 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97572 if (!new)
97573 goto out_err;
97574
97575+#ifdef CONFIG_PAX_SEGMEXEC
97576+ if (vma_m) {
97577+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97578+ if (!new_m) {
97579+ kmem_cache_free(vm_area_cachep, new);
97580+ goto out_err;
97581+ }
97582+ }
97583+#endif
97584+
97585 /* most fields are the same, copy all, and then fixup */
97586 *new = *vma;
97587
97588@@ -2475,6 +2863,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97589 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97590 }
97591
97592+#ifdef CONFIG_PAX_SEGMEXEC
97593+ if (vma_m) {
97594+ *new_m = *vma_m;
97595+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
97596+ new_m->vm_mirror = new;
97597+ new->vm_mirror = new_m;
97598+
97599+ if (new_below)
97600+ new_m->vm_end = addr_m;
97601+ else {
97602+ new_m->vm_start = addr_m;
97603+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
97604+ }
97605+ }
97606+#endif
97607+
97608 err = vma_dup_policy(vma, new);
97609 if (err)
97610 goto out_free_vma;
97611@@ -2495,6 +2899,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97612 else
97613 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
97614
97615+#ifdef CONFIG_PAX_SEGMEXEC
97616+ if (!err && vma_m) {
97617+ struct mempolicy *pol = vma_policy(new);
97618+
97619+ if (anon_vma_clone(new_m, vma_m))
97620+ goto out_free_mpol;
97621+
97622+ mpol_get(pol);
97623+ set_vma_policy(new_m, pol);
97624+
97625+ if (new_m->vm_file)
97626+ get_file(new_m->vm_file);
97627+
97628+ if (new_m->vm_ops && new_m->vm_ops->open)
97629+ new_m->vm_ops->open(new_m);
97630+
97631+ if (new_below)
97632+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
97633+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
97634+ else
97635+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
97636+
97637+ if (err) {
97638+ if (new_m->vm_ops && new_m->vm_ops->close)
97639+ new_m->vm_ops->close(new_m);
97640+ if (new_m->vm_file)
97641+ fput(new_m->vm_file);
97642+ mpol_put(pol);
97643+ }
97644+ }
97645+#endif
97646+
97647 /* Success. */
97648 if (!err)
97649 return 0;
97650@@ -2504,10 +2940,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97651 new->vm_ops->close(new);
97652 if (new->vm_file)
97653 fput(new->vm_file);
97654- unlink_anon_vmas(new);
97655 out_free_mpol:
97656 mpol_put(vma_policy(new));
97657 out_free_vma:
97658+
97659+#ifdef CONFIG_PAX_SEGMEXEC
97660+ if (new_m) {
97661+ unlink_anon_vmas(new_m);
97662+ kmem_cache_free(vm_area_cachep, new_m);
97663+ }
97664+#endif
97665+
97666+ unlink_anon_vmas(new);
97667 kmem_cache_free(vm_area_cachep, new);
97668 out_err:
97669 return err;
97670@@ -2520,6 +2964,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97671 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97672 unsigned long addr, int new_below)
97673 {
97674+
97675+#ifdef CONFIG_PAX_SEGMEXEC
97676+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97677+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
97678+ if (mm->map_count >= sysctl_max_map_count-1)
97679+ return -ENOMEM;
97680+ } else
97681+#endif
97682+
97683 if (mm->map_count >= sysctl_max_map_count)
97684 return -ENOMEM;
97685
97686@@ -2531,11 +2984,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97687 * work. This now handles partial unmappings.
97688 * Jeremy Fitzhardinge <jeremy@goop.org>
97689 */
97690+#ifdef CONFIG_PAX_SEGMEXEC
97691 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97692 {
97693+ int ret = __do_munmap(mm, start, len);
97694+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
97695+ return ret;
97696+
97697+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
97698+}
97699+
97700+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97701+#else
97702+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97703+#endif
97704+{
97705 unsigned long end;
97706 struct vm_area_struct *vma, *prev, *last;
97707
97708+ /*
97709+ * mm->mmap_sem is required to protect against another thread
97710+ * changing the mappings in case we sleep.
97711+ */
97712+ verify_mm_writelocked(mm);
97713+
97714 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
97715 return -EINVAL;
97716
97717@@ -2613,6 +3085,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97718 /* Fix up all other VM information */
97719 remove_vma_list(mm, vma);
97720
97721+ track_exec_limit(mm, start, end, 0UL);
97722+
97723 return 0;
97724 }
97725
97726@@ -2621,6 +3095,13 @@ int vm_munmap(unsigned long start, size_t len)
97727 int ret;
97728 struct mm_struct *mm = current->mm;
97729
97730+
97731+#ifdef CONFIG_PAX_SEGMEXEC
97732+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
97733+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
97734+ return -EINVAL;
97735+#endif
97736+
97737 down_write(&mm->mmap_sem);
97738 ret = do_munmap(mm, start, len);
97739 up_write(&mm->mmap_sem);
97740@@ -2634,16 +3115,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
97741 return vm_munmap(addr, len);
97742 }
97743
97744-static inline void verify_mm_writelocked(struct mm_struct *mm)
97745-{
97746-#ifdef CONFIG_DEBUG_VM
97747- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97748- WARN_ON(1);
97749- up_read(&mm->mmap_sem);
97750- }
97751-#endif
97752-}
97753-
97754 /*
97755 * this is really a simplified "do_mmap". it only handles
97756 * anonymous maps. eventually we may be able to do some
97757@@ -2657,6 +3128,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97758 struct rb_node **rb_link, *rb_parent;
97759 pgoff_t pgoff = addr >> PAGE_SHIFT;
97760 int error;
97761+ unsigned long charged;
97762
97763 len = PAGE_ALIGN(len);
97764 if (!len)
97765@@ -2664,10 +3136,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97766
97767 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
97768
97769+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
97770+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
97771+ flags &= ~VM_EXEC;
97772+
97773+#ifdef CONFIG_PAX_MPROTECT
97774+ if (mm->pax_flags & MF_PAX_MPROTECT)
97775+ flags &= ~VM_MAYEXEC;
97776+#endif
97777+
97778+ }
97779+#endif
97780+
97781 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
97782 if (error & ~PAGE_MASK)
97783 return error;
97784
97785+ charged = len >> PAGE_SHIFT;
97786+
97787 error = mlock_future_check(mm, mm->def_flags, len);
97788 if (error)
97789 return error;
97790@@ -2681,21 +3167,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97791 /*
97792 * Clear old maps. this also does some error checking for us
97793 */
97794- munmap_back:
97795 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97796 if (do_munmap(mm, addr, len))
97797 return -ENOMEM;
97798- goto munmap_back;
97799+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97800 }
97801
97802 /* Check against address space limits *after* clearing old maps... */
97803- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
97804+ if (!may_expand_vm(mm, charged))
97805 return -ENOMEM;
97806
97807 if (mm->map_count > sysctl_max_map_count)
97808 return -ENOMEM;
97809
97810- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
97811+ if (security_vm_enough_memory_mm(mm, charged))
97812 return -ENOMEM;
97813
97814 /* Can we just expand an old private anonymous mapping? */
97815@@ -2709,7 +3194,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97816 */
97817 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97818 if (!vma) {
97819- vm_unacct_memory(len >> PAGE_SHIFT);
97820+ vm_unacct_memory(charged);
97821 return -ENOMEM;
97822 }
97823
97824@@ -2723,10 +3208,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97825 vma_link(mm, vma, prev, rb_link, rb_parent);
97826 out:
97827 perf_event_mmap(vma);
97828- mm->total_vm += len >> PAGE_SHIFT;
97829+ mm->total_vm += charged;
97830 if (flags & VM_LOCKED)
97831- mm->locked_vm += (len >> PAGE_SHIFT);
97832+ mm->locked_vm += charged;
97833 vma->vm_flags |= VM_SOFTDIRTY;
97834+ track_exec_limit(mm, addr, addr + len, flags);
97835 return addr;
97836 }
97837
97838@@ -2788,6 +3274,7 @@ void exit_mmap(struct mm_struct *mm)
97839 while (vma) {
97840 if (vma->vm_flags & VM_ACCOUNT)
97841 nr_accounted += vma_pages(vma);
97842+ vma->vm_mirror = NULL;
97843 vma = remove_vma(vma);
97844 }
97845 vm_unacct_memory(nr_accounted);
97846@@ -2805,6 +3292,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
97847 struct vm_area_struct *prev;
97848 struct rb_node **rb_link, *rb_parent;
97849
97850+#ifdef CONFIG_PAX_SEGMEXEC
97851+ struct vm_area_struct *vma_m = NULL;
97852+#endif
97853+
97854+ if (security_mmap_addr(vma->vm_start))
97855+ return -EPERM;
97856+
97857 /*
97858 * The vm_pgoff of a purely anonymous vma should be irrelevant
97859 * until its first write fault, when page's anon_vma and index
97860@@ -2828,7 +3322,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
97861 security_vm_enough_memory_mm(mm, vma_pages(vma)))
97862 return -ENOMEM;
97863
97864+#ifdef CONFIG_PAX_SEGMEXEC
97865+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
97866+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97867+ if (!vma_m)
97868+ return -ENOMEM;
97869+ }
97870+#endif
97871+
97872 vma_link(mm, vma, prev, rb_link, rb_parent);
97873+
97874+#ifdef CONFIG_PAX_SEGMEXEC
97875+ if (vma_m)
97876+ BUG_ON(pax_mirror_vma(vma_m, vma));
97877+#endif
97878+
97879 return 0;
97880 }
97881
97882@@ -2847,6 +3355,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
97883 struct rb_node **rb_link, *rb_parent;
97884 bool faulted_in_anon_vma = true;
97885
97886+ BUG_ON(vma->vm_mirror);
97887+
97888 /*
97889 * If anonymous vma has not yet been faulted, update new pgoff
97890 * to match new location, to increase its chance of merging.
97891@@ -2911,6 +3421,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
97892 return NULL;
97893 }
97894
97895+#ifdef CONFIG_PAX_SEGMEXEC
97896+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
97897+{
97898+ struct vm_area_struct *prev_m;
97899+ struct rb_node **rb_link_m, *rb_parent_m;
97900+ struct mempolicy *pol_m;
97901+
97902+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
97903+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
97904+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
97905+ *vma_m = *vma;
97906+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
97907+ if (anon_vma_clone(vma_m, vma))
97908+ return -ENOMEM;
97909+ pol_m = vma_policy(vma_m);
97910+ mpol_get(pol_m);
97911+ set_vma_policy(vma_m, pol_m);
97912+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
97913+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
97914+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
97915+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
97916+ if (vma_m->vm_file)
97917+ get_file(vma_m->vm_file);
97918+ if (vma_m->vm_ops && vma_m->vm_ops->open)
97919+ vma_m->vm_ops->open(vma_m);
97920+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
97921+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
97922+ vma_m->vm_mirror = vma;
97923+ vma->vm_mirror = vma_m;
97924+ return 0;
97925+}
97926+#endif
97927+
97928 /*
97929 * Return true if the calling process may expand its vm space by the passed
97930 * number of pages
97931@@ -2922,6 +3465,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
97932
97933 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
97934
97935+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
97936 if (cur + npages > lim)
97937 return 0;
97938 return 1;
97939@@ -3004,6 +3548,22 @@ static struct vm_area_struct *__install_special_mapping(
97940 vma->vm_start = addr;
97941 vma->vm_end = addr + len;
97942
97943+#ifdef CONFIG_PAX_MPROTECT
97944+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97945+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97946+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
97947+ return ERR_PTR(-EPERM);
97948+ if (!(vm_flags & VM_EXEC))
97949+ vm_flags &= ~VM_MAYEXEC;
97950+#else
97951+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97952+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97953+#endif
97954+ else
97955+ vm_flags &= ~VM_MAYWRITE;
97956+ }
97957+#endif
97958+
97959 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
97960 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97961
97962diff --git a/mm/mprotect.c b/mm/mprotect.c
97963index ace9345..63320dc 100644
97964--- a/mm/mprotect.c
97965+++ b/mm/mprotect.c
97966@@ -24,10 +24,18 @@
97967 #include <linux/migrate.h>
97968 #include <linux/perf_event.h>
97969 #include <linux/ksm.h>
97970+#include <linux/sched/sysctl.h>
97971+
97972+#ifdef CONFIG_PAX_MPROTECT
97973+#include <linux/elf.h>
97974+#include <linux/binfmts.h>
97975+#endif
97976+
97977 #include <asm/uaccess.h>
97978 #include <asm/pgtable.h>
97979 #include <asm/cacheflush.h>
97980 #include <asm/tlbflush.h>
97981+#include <asm/mmu_context.h>
97982
97983 /*
97984 * For a prot_numa update we only hold mmap_sem for read so there is a
97985@@ -251,6 +259,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
97986 return pages;
97987 }
97988
97989+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
97990+/* called while holding the mmap semaphor for writing except stack expansion */
97991+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
97992+{
97993+ unsigned long oldlimit, newlimit = 0UL;
97994+
97995+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
97996+ return;
97997+
97998+ spin_lock(&mm->page_table_lock);
97999+ oldlimit = mm->context.user_cs_limit;
98000+ if ((prot & VM_EXEC) && oldlimit < end)
98001+ /* USER_CS limit moved up */
98002+ newlimit = end;
98003+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98004+ /* USER_CS limit moved down */
98005+ newlimit = start;
98006+
98007+ if (newlimit) {
98008+ mm->context.user_cs_limit = newlimit;
98009+
98010+#ifdef CONFIG_SMP
98011+ wmb();
98012+ cpus_clear(mm->context.cpu_user_cs_mask);
98013+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98014+#endif
98015+
98016+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98017+ }
98018+ spin_unlock(&mm->page_table_lock);
98019+ if (newlimit == end) {
98020+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
98021+
98022+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
98023+ if (is_vm_hugetlb_page(vma))
98024+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98025+ else
98026+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
98027+ }
98028+}
98029+#endif
98030+
98031 int
98032 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98033 unsigned long start, unsigned long end, unsigned long newflags)
98034@@ -263,11 +313,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98035 int error;
98036 int dirty_accountable = 0;
98037
98038+#ifdef CONFIG_PAX_SEGMEXEC
98039+ struct vm_area_struct *vma_m = NULL;
98040+ unsigned long start_m, end_m;
98041+
98042+ start_m = start + SEGMEXEC_TASK_SIZE;
98043+ end_m = end + SEGMEXEC_TASK_SIZE;
98044+#endif
98045+
98046 if (newflags == oldflags) {
98047 *pprev = vma;
98048 return 0;
98049 }
98050
98051+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98052+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98053+
98054+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98055+ return -ENOMEM;
98056+
98057+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98058+ return -ENOMEM;
98059+ }
98060+
98061 /*
98062 * If we make a private mapping writable we increase our commit;
98063 * but (without finer accounting) cannot reduce our commit if we
98064@@ -284,6 +352,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98065 }
98066 }
98067
98068+#ifdef CONFIG_PAX_SEGMEXEC
98069+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98070+ if (start != vma->vm_start) {
98071+ error = split_vma(mm, vma, start, 1);
98072+ if (error)
98073+ goto fail;
98074+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98075+ *pprev = (*pprev)->vm_next;
98076+ }
98077+
98078+ if (end != vma->vm_end) {
98079+ error = split_vma(mm, vma, end, 0);
98080+ if (error)
98081+ goto fail;
98082+ }
98083+
98084+ if (pax_find_mirror_vma(vma)) {
98085+ error = __do_munmap(mm, start_m, end_m - start_m);
98086+ if (error)
98087+ goto fail;
98088+ } else {
98089+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98090+ if (!vma_m) {
98091+ error = -ENOMEM;
98092+ goto fail;
98093+ }
98094+ vma->vm_flags = newflags;
98095+ error = pax_mirror_vma(vma_m, vma);
98096+ if (error) {
98097+ vma->vm_flags = oldflags;
98098+ goto fail;
98099+ }
98100+ }
98101+ }
98102+#endif
98103+
98104 /*
98105 * First try to merge with previous and/or next vma.
98106 */
98107@@ -314,7 +418,19 @@ success:
98108 * vm_flags and vm_page_prot are protected by the mmap_sem
98109 * held in write mode.
98110 */
98111+
98112+#ifdef CONFIG_PAX_SEGMEXEC
98113+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98114+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98115+#endif
98116+
98117 vma->vm_flags = newflags;
98118+
98119+#ifdef CONFIG_PAX_MPROTECT
98120+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98121+ mm->binfmt->handle_mprotect(vma, newflags);
98122+#endif
98123+
98124 dirty_accountable = vma_wants_writenotify(vma);
98125 vma_set_page_prot(vma);
98126
98127@@ -350,6 +466,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98128 end = start + len;
98129 if (end <= start)
98130 return -ENOMEM;
98131+
98132+#ifdef CONFIG_PAX_SEGMEXEC
98133+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98134+ if (end > SEGMEXEC_TASK_SIZE)
98135+ return -EINVAL;
98136+ } else
98137+#endif
98138+
98139+ if (end > TASK_SIZE)
98140+ return -EINVAL;
98141+
98142 if (!arch_validate_prot(prot))
98143 return -EINVAL;
98144
98145@@ -357,7 +484,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98146 /*
98147 * Does the application expect PROT_READ to imply PROT_EXEC:
98148 */
98149- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98150+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98151 prot |= PROT_EXEC;
98152
98153 vm_flags = calc_vm_prot_bits(prot);
98154@@ -389,6 +516,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98155 if (start > vma->vm_start)
98156 prev = vma;
98157
98158+#ifdef CONFIG_PAX_MPROTECT
98159+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98160+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
98161+#endif
98162+
98163 for (nstart = start ; ; ) {
98164 unsigned long newflags;
98165
98166@@ -399,6 +531,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98167
98168 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98169 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98170+ if (prot & (PROT_WRITE | PROT_EXEC))
98171+ gr_log_rwxmprotect(vma);
98172+
98173+ error = -EACCES;
98174+ goto out;
98175+ }
98176+
98177+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
98178 error = -EACCES;
98179 goto out;
98180 }
98181@@ -413,6 +553,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98182 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
98183 if (error)
98184 goto out;
98185+
98186+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
98187+
98188 nstart = tmp;
98189
98190 if (nstart < prev->vm_end)
98191diff --git a/mm/mremap.c b/mm/mremap.c
98192index 17fa018..6f7892b 100644
98193--- a/mm/mremap.c
98194+++ b/mm/mremap.c
98195@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
98196 continue;
98197 pte = ptep_get_and_clear(mm, old_addr, old_pte);
98198 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
98199+
98200+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98201+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
98202+ pte = pte_exprotect(pte);
98203+#endif
98204+
98205 pte = move_soft_dirty_pte(pte);
98206 set_pte_at(mm, new_addr, new_pte, pte);
98207 }
98208@@ -346,6 +352,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
98209 if (is_vm_hugetlb_page(vma))
98210 goto Einval;
98211
98212+#ifdef CONFIG_PAX_SEGMEXEC
98213+ if (pax_find_mirror_vma(vma))
98214+ goto Einval;
98215+#endif
98216+
98217 /* We can't remap across vm area boundaries */
98218 if (old_len > vma->vm_end - addr)
98219 goto Efault;
98220@@ -401,20 +412,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
98221 unsigned long ret = -EINVAL;
98222 unsigned long charged = 0;
98223 unsigned long map_flags;
98224+ unsigned long pax_task_size = TASK_SIZE;
98225
98226 if (new_addr & ~PAGE_MASK)
98227 goto out;
98228
98229- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
98230+#ifdef CONFIG_PAX_SEGMEXEC
98231+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98232+ pax_task_size = SEGMEXEC_TASK_SIZE;
98233+#endif
98234+
98235+ pax_task_size -= PAGE_SIZE;
98236+
98237+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
98238 goto out;
98239
98240 /* Check if the location we're moving into overlaps the
98241 * old location at all, and fail if it does.
98242 */
98243- if ((new_addr <= addr) && (new_addr+new_len) > addr)
98244- goto out;
98245-
98246- if ((addr <= new_addr) && (addr+old_len) > new_addr)
98247+ if (addr + old_len > new_addr && new_addr + new_len > addr)
98248 goto out;
98249
98250 ret = do_munmap(mm, new_addr, new_len);
98251@@ -483,6 +499,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98252 unsigned long ret = -EINVAL;
98253 unsigned long charged = 0;
98254 bool locked = false;
98255+ unsigned long pax_task_size = TASK_SIZE;
98256
98257 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
98258 return ret;
98259@@ -504,6 +521,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98260 if (!new_len)
98261 return ret;
98262
98263+#ifdef CONFIG_PAX_SEGMEXEC
98264+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98265+ pax_task_size = SEGMEXEC_TASK_SIZE;
98266+#endif
98267+
98268+ pax_task_size -= PAGE_SIZE;
98269+
98270+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
98271+ old_len > pax_task_size || addr > pax_task_size-old_len)
98272+ return ret;
98273+
98274 down_write(&current->mm->mmap_sem);
98275
98276 if (flags & MREMAP_FIXED) {
98277@@ -554,6 +582,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98278 new_addr = addr;
98279 }
98280 ret = addr;
98281+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
98282 goto out;
98283 }
98284 }
98285@@ -577,7 +606,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98286 goto out;
98287 }
98288
98289+ map_flags = vma->vm_flags;
98290 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
98291+ if (!(ret & ~PAGE_MASK)) {
98292+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
98293+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
98294+ }
98295 }
98296 out:
98297 if (ret & ~PAGE_MASK)
98298diff --git a/mm/nommu.c b/mm/nommu.c
98299index ae5baae..cbb2ed5 100644
98300--- a/mm/nommu.c
98301+++ b/mm/nommu.c
98302@@ -71,7 +71,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
98303 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
98304 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
98305 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
98306-int heap_stack_gap = 0;
98307
98308 atomic_long_t mmap_pages_allocated;
98309
98310@@ -858,15 +857,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
98311 EXPORT_SYMBOL(find_vma);
98312
98313 /*
98314- * find a VMA
98315- * - we don't extend stack VMAs under NOMMU conditions
98316- */
98317-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
98318-{
98319- return find_vma(mm, addr);
98320-}
98321-
98322-/*
98323 * expand a stack to a given address
98324 * - not supported under NOMMU conditions
98325 */
98326@@ -1560,6 +1550,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98327
98328 /* most fields are the same, copy all, and then fixup */
98329 *new = *vma;
98330+ INIT_LIST_HEAD(&new->anon_vma_chain);
98331 *region = *vma->vm_region;
98332 new->vm_region = region;
98333
98334@@ -1990,8 +1981,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
98335 }
98336 EXPORT_SYMBOL(generic_file_remap_pages);
98337
98338-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98339- unsigned long addr, void *buf, int len, int write)
98340+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98341+ unsigned long addr, void *buf, size_t len, int write)
98342 {
98343 struct vm_area_struct *vma;
98344
98345@@ -2032,8 +2023,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98346 *
98347 * The caller must hold a reference on @mm.
98348 */
98349-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98350- void *buf, int len, int write)
98351+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
98352+ void *buf, size_t len, int write)
98353 {
98354 return __access_remote_vm(NULL, mm, addr, buf, len, write);
98355 }
98356@@ -2042,7 +2033,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98357 * Access another process' address space.
98358 * - source/target buffer must be kernel space
98359 */
98360-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
98361+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
98362 {
98363 struct mm_struct *mm;
98364
98365diff --git a/mm/page-writeback.c b/mm/page-writeback.c
98366index 6f43352..e44bf41 100644
98367--- a/mm/page-writeback.c
98368+++ b/mm/page-writeback.c
98369@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
98370 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
98371 * - the bdi dirty thresh drops quickly due to change of JBOD workload
98372 */
98373-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
98374+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
98375 unsigned long thresh,
98376 unsigned long bg_thresh,
98377 unsigned long dirty,
98378diff --git a/mm/page_alloc.c b/mm/page_alloc.c
98379index 8bbef06..a8d1989 100644
98380--- a/mm/page_alloc.c
98381+++ b/mm/page_alloc.c
98382@@ -60,6 +60,7 @@
98383 #include <linux/hugetlb.h>
98384 #include <linux/sched/rt.h>
98385 #include <linux/page_owner.h>
98386+#include <linux/random.h>
98387
98388 #include <asm/sections.h>
98389 #include <asm/tlbflush.h>
98390@@ -358,7 +359,7 @@ out:
98391 * This usage means that zero-order pages may not be compound.
98392 */
98393
98394-static void free_compound_page(struct page *page)
98395+void free_compound_page(struct page *page)
98396 {
98397 __free_pages_ok(page, compound_order(page));
98398 }
98399@@ -511,7 +512,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
98400 __mod_zone_freepage_state(zone, (1 << order), migratetype);
98401 }
98402 #else
98403-struct page_ext_operations debug_guardpage_ops = { NULL, };
98404+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
98405 static inline void set_page_guard(struct zone *zone, struct page *page,
98406 unsigned int order, int migratetype) {}
98407 static inline void clear_page_guard(struct zone *zone, struct page *page,
98408@@ -802,6 +803,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98409 int i;
98410 int bad = 0;
98411
98412+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98413+ unsigned long index = 1UL << order;
98414+#endif
98415+
98416 VM_BUG_ON_PAGE(PageTail(page), page);
98417 VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);
98418
98419@@ -823,6 +828,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98420 debug_check_no_obj_freed(page_address(page),
98421 PAGE_SIZE << order);
98422 }
98423+
98424+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98425+ for (; index; --index)
98426+ sanitize_highpage(page + index - 1);
98427+#endif
98428+
98429 arch_free_page(page, order);
98430 kernel_map_pages(page, 1 << order, 0);
98431
98432@@ -846,6 +857,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98433 local_irq_restore(flags);
98434 }
98435
98436+#ifdef CONFIG_PAX_LATENT_ENTROPY
98437+bool __meminitdata extra_latent_entropy;
98438+
98439+static int __init setup_pax_extra_latent_entropy(char *str)
98440+{
98441+ extra_latent_entropy = true;
98442+ return 0;
98443+}
98444+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
98445+
98446+volatile u64 latent_entropy __latent_entropy;
98447+EXPORT_SYMBOL(latent_entropy);
98448+#endif
98449+
98450 void __init __free_pages_bootmem(struct page *page, unsigned int order)
98451 {
98452 unsigned int nr_pages = 1 << order;
98453@@ -861,6 +886,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
98454 __ClearPageReserved(p);
98455 set_page_count(p, 0);
98456
98457+#ifdef CONFIG_PAX_LATENT_ENTROPY
98458+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
98459+ u64 hash = 0;
98460+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
98461+ const u64 *data = lowmem_page_address(page);
98462+
98463+ for (index = 0; index < end; index++)
98464+ hash ^= hash + data[index];
98465+ latent_entropy ^= hash;
98466+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
98467+ }
98468+#endif
98469+
98470 page_zone(page)->managed_pages += nr_pages;
98471 set_page_refcounted(page);
98472 __free_pages(page, order);
98473@@ -986,8 +1024,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
98474 arch_alloc_page(page, order);
98475 kernel_map_pages(page, 1 << order, 1);
98476
98477+#ifndef CONFIG_PAX_MEMORY_SANITIZE
98478 if (gfp_flags & __GFP_ZERO)
98479 prep_zero_page(page, order, gfp_flags);
98480+#endif
98481
98482 if (order && (gfp_flags & __GFP_COMP))
98483 prep_compound_page(page, order);
98484@@ -1700,7 +1740,7 @@ again:
98485 }
98486
98487 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
98488- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98489+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98490 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
98491 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98492
98493@@ -2021,7 +2061,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
98494 do {
98495 mod_zone_page_state(zone, NR_ALLOC_BATCH,
98496 high_wmark_pages(zone) - low_wmark_pages(zone) -
98497- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98498+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98499 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98500 } while (zone++ != preferred_zone);
98501 }
98502@@ -5781,7 +5821,7 @@ static void __setup_per_zone_wmarks(void)
98503
98504 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
98505 high_wmark_pages(zone) - low_wmark_pages(zone) -
98506- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98507+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98508
98509 setup_zone_migrate_reserve(zone);
98510 spin_unlock_irqrestore(&zone->lock, flags);
98511diff --git a/mm/percpu.c b/mm/percpu.c
98512index d39e2f4..de5f4b4 100644
98513--- a/mm/percpu.c
98514+++ b/mm/percpu.c
98515@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
98516 static unsigned int pcpu_high_unit_cpu __read_mostly;
98517
98518 /* the address of the first chunk which starts with the kernel static area */
98519-void *pcpu_base_addr __read_mostly;
98520+void *pcpu_base_addr __read_only;
98521 EXPORT_SYMBOL_GPL(pcpu_base_addr);
98522
98523 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
98524diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
98525index 5077afc..846c9ef 100644
98526--- a/mm/process_vm_access.c
98527+++ b/mm/process_vm_access.c
98528@@ -13,6 +13,7 @@
98529 #include <linux/uio.h>
98530 #include <linux/sched.h>
98531 #include <linux/highmem.h>
98532+#include <linux/security.h>
98533 #include <linux/ptrace.h>
98534 #include <linux/slab.h>
98535 #include <linux/syscalls.h>
98536@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98537 ssize_t iov_len;
98538 size_t total_len = iov_iter_count(iter);
98539
98540+ return -ENOSYS; // PaX: until properly audited
98541+
98542 /*
98543 * Work out how many pages of struct pages we're going to need
98544 * when eventually calling get_user_pages
98545 */
98546 for (i = 0; i < riovcnt; i++) {
98547 iov_len = rvec[i].iov_len;
98548- if (iov_len > 0) {
98549- nr_pages_iov = ((unsigned long)rvec[i].iov_base
98550- + iov_len)
98551- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
98552- / PAGE_SIZE + 1;
98553- nr_pages = max(nr_pages, nr_pages_iov);
98554- }
98555+ if (iov_len <= 0)
98556+ continue;
98557+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
98558+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
98559+ nr_pages = max(nr_pages, nr_pages_iov);
98560 }
98561
98562 if (nr_pages == 0)
98563@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98564 goto free_proc_pages;
98565 }
98566
98567+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
98568+ rc = -EPERM;
98569+ goto put_task_struct;
98570+ }
98571+
98572 mm = mm_access(task, PTRACE_MODE_ATTACH);
98573 if (!mm || IS_ERR(mm)) {
98574 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
98575diff --git a/mm/rmap.c b/mm/rmap.c
98576index 71cd5bd..e259089 100644
98577--- a/mm/rmap.c
98578+++ b/mm/rmap.c
98579@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98580 struct anon_vma *anon_vma = vma->anon_vma;
98581 struct anon_vma_chain *avc;
98582
98583+#ifdef CONFIG_PAX_SEGMEXEC
98584+ struct anon_vma_chain *avc_m = NULL;
98585+#endif
98586+
98587 might_sleep();
98588 if (unlikely(!anon_vma)) {
98589 struct mm_struct *mm = vma->vm_mm;
98590@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98591 if (!avc)
98592 goto out_enomem;
98593
98594+#ifdef CONFIG_PAX_SEGMEXEC
98595+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
98596+ if (!avc_m)
98597+ goto out_enomem_free_avc;
98598+#endif
98599+
98600 anon_vma = find_mergeable_anon_vma(vma);
98601 allocated = NULL;
98602 if (!anon_vma) {
98603@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98604 /* page_table_lock to protect against threads */
98605 spin_lock(&mm->page_table_lock);
98606 if (likely(!vma->anon_vma)) {
98607+
98608+#ifdef CONFIG_PAX_SEGMEXEC
98609+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
98610+
98611+ if (vma_m) {
98612+ BUG_ON(vma_m->anon_vma);
98613+ vma_m->anon_vma = anon_vma;
98614+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
98615+ anon_vma->degree++;
98616+ avc_m = NULL;
98617+ }
98618+#endif
98619+
98620 vma->anon_vma = anon_vma;
98621 anon_vma_chain_link(vma, avc, anon_vma);
98622 /* vma reference or self-parent link for new root */
98623@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98624
98625 if (unlikely(allocated))
98626 put_anon_vma(allocated);
98627+
98628+#ifdef CONFIG_PAX_SEGMEXEC
98629+ if (unlikely(avc_m))
98630+ anon_vma_chain_free(avc_m);
98631+#endif
98632+
98633 if (unlikely(avc))
98634 anon_vma_chain_free(avc);
98635 }
98636 return 0;
98637
98638 out_enomem_free_avc:
98639+
98640+#ifdef CONFIG_PAX_SEGMEXEC
98641+ if (avc_m)
98642+ anon_vma_chain_free(avc_m);
98643+#endif
98644+
98645 anon_vma_chain_free(avc);
98646 out_enomem:
98647 return -ENOMEM;
98648@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
98649 * good chance of avoiding scanning the whole hierarchy when it searches where
98650 * page is mapped.
98651 */
98652-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98653+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
98654 {
98655 struct anon_vma_chain *avc, *pavc;
98656 struct anon_vma *root = NULL;
98657@@ -296,7 +331,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98658 * the corresponding VMA in the parent process is attached to.
98659 * Returns 0 on success, non-zero on failure.
98660 */
98661-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
98662+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
98663 {
98664 struct anon_vma_chain *avc;
98665 struct anon_vma *anon_vma;
98666@@ -416,8 +451,10 @@ static void anon_vma_ctor(void *data)
98667 void __init anon_vma_init(void)
98668 {
98669 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
98670- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
98671- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
98672+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
98673+ anon_vma_ctor);
98674+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
98675+ SLAB_PANIC|SLAB_NO_SANITIZE);
98676 }
98677
98678 /*
98679diff --git a/mm/shmem.c b/mm/shmem.c
98680index 993e6ba..a962ba3 100644
98681--- a/mm/shmem.c
98682+++ b/mm/shmem.c
98683@@ -33,7 +33,7 @@
98684 #include <linux/swap.h>
98685 #include <linux/aio.h>
98686
98687-static struct vfsmount *shm_mnt;
98688+struct vfsmount *shm_mnt;
98689
98690 #ifdef CONFIG_SHMEM
98691 /*
98692@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
98693 #define BOGO_DIRENT_SIZE 20
98694
98695 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98696-#define SHORT_SYMLINK_LEN 128
98697+#define SHORT_SYMLINK_LEN 64
98698
98699 /*
98700 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
98701@@ -2558,6 +2558,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
98702 static int shmem_xattr_validate(const char *name)
98703 {
98704 struct { const char *prefix; size_t len; } arr[] = {
98705+
98706+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98707+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
98708+#endif
98709+
98710 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
98711 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
98712 };
98713@@ -2613,6 +2618,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
98714 if (err)
98715 return err;
98716
98717+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98718+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
98719+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
98720+ return -EOPNOTSUPP;
98721+ if (size > 8)
98722+ return -EINVAL;
98723+ }
98724+#endif
98725+
98726 return simple_xattr_set(&info->xattrs, name, value, size, flags);
98727 }
98728
98729@@ -2996,8 +3010,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
98730 int err = -ENOMEM;
98731
98732 /* Round up to L1_CACHE_BYTES to resist false sharing */
98733- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
98734- L1_CACHE_BYTES), GFP_KERNEL);
98735+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
98736 if (!sbinfo)
98737 return -ENOMEM;
98738
98739diff --git a/mm/slab.c b/mm/slab.c
98740index 65b5dcb..d53d866 100644
98741--- a/mm/slab.c
98742+++ b/mm/slab.c
98743@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98744 if ((x)->max_freeable < i) \
98745 (x)->max_freeable = i; \
98746 } while (0)
98747-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
98748-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
98749-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
98750-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
98751+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
98752+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
98753+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
98754+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
98755+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
98756+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
98757 #else
98758 #define STATS_INC_ACTIVE(x) do { } while (0)
98759 #define STATS_DEC_ACTIVE(x) do { } while (0)
98760@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98761 #define STATS_INC_ALLOCMISS(x) do { } while (0)
98762 #define STATS_INC_FREEHIT(x) do { } while (0)
98763 #define STATS_INC_FREEMISS(x) do { } while (0)
98764+#define STATS_INC_SANITIZED(x) do { } while (0)
98765+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
98766 #endif
98767
98768 #if DEBUG
98769@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
98770 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
98771 */
98772 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
98773- const struct page *page, void *obj)
98774+ const struct page *page, const void *obj)
98775 {
98776 u32 offset = (obj - page->s_mem);
98777 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
98778@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
98779 * structures first. Without this, further allocations will bug.
98780 */
98781 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
98782- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
98783+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
98784 slab_state = PARTIAL_NODE;
98785
98786 slab_early_init = 0;
98787@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
98788
98789 cachep = find_mergeable(size, align, flags, name, ctor);
98790 if (cachep) {
98791- cachep->refcount++;
98792+ atomic_inc(&cachep->refcount);
98793
98794 /*
98795 * Adjust the object sizes so that we clear
98796@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
98797 struct array_cache *ac = cpu_cache_get(cachep);
98798
98799 check_irq_off();
98800+
98801+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98802+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
98803+ STATS_INC_NOT_SANITIZED(cachep);
98804+ else {
98805+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
98806+
98807+ if (cachep->ctor)
98808+ cachep->ctor(objp);
98809+
98810+ STATS_INC_SANITIZED(cachep);
98811+ }
98812+#endif
98813+
98814 kmemleak_free_recursive(objp, cachep->flags);
98815 objp = cache_free_debugcheck(cachep, objp, caller);
98816
98817@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
98818 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
98819 }
98820
98821-void *__kmalloc_node(size_t size, gfp_t flags, int node)
98822+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
98823 {
98824 return __do_kmalloc_node(size, flags, node, _RET_IP_);
98825 }
98826@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
98827 * @flags: the type of memory to allocate (see kmalloc).
98828 * @caller: function caller for debug tracking of the caller
98829 */
98830-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
98831+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
98832 unsigned long caller)
98833 {
98834 struct kmem_cache *cachep;
98835@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
98836
98837 if (unlikely(ZERO_OR_NULL_PTR(objp)))
98838 return;
98839+ VM_BUG_ON(!virt_addr_valid(objp));
98840 local_irq_save(flags);
98841 kfree_debugcheck(objp);
98842 c = virt_to_cache(objp);
98843@@ -3984,14 +4003,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
98844 }
98845 /* cpu stats */
98846 {
98847- unsigned long allochit = atomic_read(&cachep->allochit);
98848- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
98849- unsigned long freehit = atomic_read(&cachep->freehit);
98850- unsigned long freemiss = atomic_read(&cachep->freemiss);
98851+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
98852+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
98853+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
98854+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
98855
98856 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
98857 allochit, allocmiss, freehit, freemiss);
98858 }
98859+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98860+ {
98861+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
98862+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
98863+
98864+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
98865+ }
98866+#endif
98867 #endif
98868 }
98869
98870@@ -4199,13 +4226,69 @@ static const struct file_operations proc_slabstats_operations = {
98871 static int __init slab_proc_init(void)
98872 {
98873 #ifdef CONFIG_DEBUG_SLAB_LEAK
98874- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
98875+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
98876 #endif
98877 return 0;
98878 }
98879 module_init(slab_proc_init);
98880 #endif
98881
98882+bool is_usercopy_object(const void *ptr)
98883+{
98884+ struct page *page;
98885+ struct kmem_cache *cachep;
98886+
98887+ if (ZERO_OR_NULL_PTR(ptr))
98888+ return false;
98889+
98890+ if (!slab_is_available())
98891+ return false;
98892+
98893+ if (!virt_addr_valid(ptr))
98894+ return false;
98895+
98896+ page = virt_to_head_page(ptr);
98897+
98898+ if (!PageSlab(page))
98899+ return false;
98900+
98901+ cachep = page->slab_cache;
98902+ return cachep->flags & SLAB_USERCOPY;
98903+}
98904+
98905+#ifdef CONFIG_PAX_USERCOPY
98906+const char *check_heap_object(const void *ptr, unsigned long n)
98907+{
98908+ struct page *page;
98909+ struct kmem_cache *cachep;
98910+ unsigned int objnr;
98911+ unsigned long offset;
98912+
98913+ if (ZERO_OR_NULL_PTR(ptr))
98914+ return "<null>";
98915+
98916+ if (!virt_addr_valid(ptr))
98917+ return NULL;
98918+
98919+ page = virt_to_head_page(ptr);
98920+
98921+ if (!PageSlab(page))
98922+ return NULL;
98923+
98924+ cachep = page->slab_cache;
98925+ if (!(cachep->flags & SLAB_USERCOPY))
98926+ return cachep->name;
98927+
98928+ objnr = obj_to_index(cachep, page, ptr);
98929+ BUG_ON(objnr >= cachep->num);
98930+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
98931+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
98932+ return NULL;
98933+
98934+ return cachep->name;
98935+}
98936+#endif
98937+
98938 /**
98939 * ksize - get the actual amount of memory allocated for a given object
98940 * @objp: Pointer to the object
98941diff --git a/mm/slab.h b/mm/slab.h
98942index 1cf40054..10ad563 100644
98943--- a/mm/slab.h
98944+++ b/mm/slab.h
98945@@ -22,7 +22,7 @@ struct kmem_cache {
98946 unsigned int align; /* Alignment as calculated */
98947 unsigned long flags; /* Active flags on the slab */
98948 const char *name; /* Slab name for sysfs */
98949- int refcount; /* Use counter */
98950+ atomic_t refcount; /* Use counter */
98951 void (*ctor)(void *); /* Called on object slot creation */
98952 struct list_head list; /* List of all slab caches on the system */
98953 };
98954@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
98955 /* The slab cache that manages slab cache information */
98956 extern struct kmem_cache *kmem_cache;
98957
98958+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98959+#ifdef CONFIG_X86_64
98960+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
98961+#else
98962+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
98963+#endif
98964+enum pax_sanitize_mode {
98965+ PAX_SANITIZE_SLAB_OFF = 0,
98966+ PAX_SANITIZE_SLAB_FAST,
98967+ PAX_SANITIZE_SLAB_FULL,
98968+};
98969+extern enum pax_sanitize_mode pax_sanitize_slab;
98970+#endif
98971+
98972 unsigned long calculate_alignment(unsigned long flags,
98973 unsigned long align, unsigned long size);
98974
98975@@ -116,7 +130,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
98976
98977 /* Legal flag mask for kmem_cache_create(), for various configurations */
98978 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
98979- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
98980+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
98981+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
98982
98983 #if defined(CONFIG_DEBUG_SLAB)
98984 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
98985@@ -300,6 +315,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
98986 return s;
98987
98988 page = virt_to_head_page(x);
98989+
98990+ BUG_ON(!PageSlab(page));
98991+
98992 cachep = page->slab_cache;
98993 if (slab_equal_or_root(cachep, s))
98994 return cachep;
98995diff --git a/mm/slab_common.c b/mm/slab_common.c
98996index e03dd6f..c475838 100644
98997--- a/mm/slab_common.c
98998+++ b/mm/slab_common.c
98999@@ -25,11 +25,35 @@
99000
99001 #include "slab.h"
99002
99003-enum slab_state slab_state;
99004+enum slab_state slab_state __read_only;
99005 LIST_HEAD(slab_caches);
99006 DEFINE_MUTEX(slab_mutex);
99007 struct kmem_cache *kmem_cache;
99008
99009+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99010+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
99011+static int __init pax_sanitize_slab_setup(char *str)
99012+{
99013+ if (!str)
99014+ return 0;
99015+
99016+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
99017+ pr_info("PaX slab sanitization: %s\n", "disabled");
99018+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
99019+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
99020+ pr_info("PaX slab sanitization: %s\n", "fast");
99021+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
99022+ } else if (!strcmp(str, "full")) {
99023+ pr_info("PaX slab sanitization: %s\n", "full");
99024+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
99025+ } else
99026+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
99027+
99028+ return 0;
99029+}
99030+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
99031+#endif
99032+
99033 /*
99034 * Set of flags that will prevent slab merging
99035 */
99036@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
99037 * Merge control. If this is set then no merging of slab caches will occur.
99038 * (Could be removed. This was introduced to pacify the merge skeptics.)
99039 */
99040-static int slab_nomerge;
99041+static int slab_nomerge = 1;
99042
99043 static int __init setup_slab_nomerge(char *str)
99044 {
99045@@ -218,7 +242,7 @@ int slab_unmergeable(struct kmem_cache *s)
99046 /*
99047 * We may have set a slab to be unmergeable during bootstrap.
99048 */
99049- if (s->refcount < 0)
99050+ if (atomic_read(&s->refcount) < 0)
99051 return 1;
99052
99053 return 0;
99054@@ -322,7 +346,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
99055 if (err)
99056 goto out_free_cache;
99057
99058- s->refcount = 1;
99059+ atomic_set(&s->refcount, 1);
99060 list_add(&s->list, &slab_caches);
99061 out:
99062 if (err)
99063@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
99064 */
99065 flags &= CACHE_CREATE_MASK;
99066
99067+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99068+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
99069+ flags |= SLAB_NO_SANITIZE;
99070+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
99071+ flags &= ~SLAB_NO_SANITIZE;
99072+#endif
99073+
99074 s = __kmem_cache_alias(name, size, align, flags, ctor);
99075 if (s)
99076 goto out_unlock;
99077@@ -505,8 +536,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99078
99079 mutex_lock(&slab_mutex);
99080
99081- s->refcount--;
99082- if (s->refcount)
99083+ if (!atomic_dec_and_test(&s->refcount))
99084 goto out_unlock;
99085
99086 if (memcg_cleanup_cache_params(s) != 0)
99087@@ -526,7 +556,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99088 rcu_barrier();
99089
99090 memcg_free_cache_params(s);
99091-#ifdef SLAB_SUPPORTS_SYSFS
99092+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99093 sysfs_slab_remove(s);
99094 #else
99095 slab_kmem_cache_release(s);
99096@@ -582,7 +612,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
99097 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
99098 name, size, err);
99099
99100- s->refcount = -1; /* Exempt from merging for now */
99101+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
99102 }
99103
99104 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99105@@ -595,7 +625,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99106
99107 create_boot_cache(s, name, size, flags);
99108 list_add(&s->list, &slab_caches);
99109- s->refcount = 1;
99110+ atomic_set(&s->refcount, 1);
99111 return s;
99112 }
99113
99114@@ -607,6 +637,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
99115 EXPORT_SYMBOL(kmalloc_dma_caches);
99116 #endif
99117
99118+#ifdef CONFIG_PAX_USERCOPY_SLABS
99119+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
99120+EXPORT_SYMBOL(kmalloc_usercopy_caches);
99121+#endif
99122+
99123 /*
99124 * Conversion table for small slabs sizes / 8 to the index in the
99125 * kmalloc array. This is necessary for slabs < 192 since we have non power
99126@@ -671,6 +706,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
99127 return kmalloc_dma_caches[index];
99128
99129 #endif
99130+
99131+#ifdef CONFIG_PAX_USERCOPY_SLABS
99132+ if (unlikely((flags & GFP_USERCOPY)))
99133+ return kmalloc_usercopy_caches[index];
99134+
99135+#endif
99136+
99137 return kmalloc_caches[index];
99138 }
99139
99140@@ -727,7 +769,7 @@ void __init create_kmalloc_caches(unsigned long flags)
99141 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
99142 if (!kmalloc_caches[i]) {
99143 kmalloc_caches[i] = create_kmalloc_cache(NULL,
99144- 1 << i, flags);
99145+ 1 << i, SLAB_USERCOPY | flags);
99146 }
99147
99148 /*
99149@@ -736,10 +778,10 @@ void __init create_kmalloc_caches(unsigned long flags)
99150 * earlier power of two caches
99151 */
99152 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
99153- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
99154+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
99155
99156 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
99157- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
99158+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
99159 }
99160
99161 /* Kmalloc array is now usable */
99162@@ -772,6 +814,23 @@ void __init create_kmalloc_caches(unsigned long flags)
99163 }
99164 }
99165 #endif
99166+
99167+#ifdef CONFIG_PAX_USERCOPY_SLABS
99168+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
99169+ struct kmem_cache *s = kmalloc_caches[i];
99170+
99171+ if (s) {
99172+ int size = kmalloc_size(i);
99173+ char *n = kasprintf(GFP_NOWAIT,
99174+ "usercopy-kmalloc-%d", size);
99175+
99176+ BUG_ON(!n);
99177+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
99178+ size, SLAB_USERCOPY | flags);
99179+ }
99180+ }
99181+#endif
99182+
99183 }
99184 #endif /* !CONFIG_SLOB */
99185
99186@@ -830,6 +889,9 @@ static void print_slabinfo_header(struct seq_file *m)
99187 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
99188 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
99189 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
99190+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99191+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
99192+#endif
99193 #endif
99194 seq_putc(m, '\n');
99195 }
99196@@ -964,7 +1026,7 @@ static int __init slab_proc_init(void)
99197 module_init(slab_proc_init);
99198 #endif /* CONFIG_SLABINFO */
99199
99200-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
99201+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
99202 gfp_t flags)
99203 {
99204 void *ret;
99205diff --git a/mm/slob.c b/mm/slob.c
99206index 96a8620..46b3f12 100644
99207--- a/mm/slob.c
99208+++ b/mm/slob.c
99209@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
99210 /*
99211 * Return the size of a slob block.
99212 */
99213-static slobidx_t slob_units(slob_t *s)
99214+static slobidx_t slob_units(const slob_t *s)
99215 {
99216 if (s->units > 0)
99217 return s->units;
99218@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
99219 /*
99220 * Return the next free slob block pointer after this one.
99221 */
99222-static slob_t *slob_next(slob_t *s)
99223+static slob_t *slob_next(const slob_t *s)
99224 {
99225 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
99226 slobidx_t next;
99227@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
99228 /*
99229 * Returns true if s is the last free block in its page.
99230 */
99231-static int slob_last(slob_t *s)
99232+static int slob_last(const slob_t *s)
99233 {
99234 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
99235 }
99236
99237-static void *slob_new_pages(gfp_t gfp, int order, int node)
99238+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
99239 {
99240- void *page;
99241+ struct page *page;
99242
99243 #ifdef CONFIG_NUMA
99244 if (node != NUMA_NO_NODE)
99245@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
99246 if (!page)
99247 return NULL;
99248
99249- return page_address(page);
99250+ __SetPageSlab(page);
99251+ return page;
99252 }
99253
99254-static void slob_free_pages(void *b, int order)
99255+static void slob_free_pages(struct page *sp, int order)
99256 {
99257 if (current->reclaim_state)
99258 current->reclaim_state->reclaimed_slab += 1 << order;
99259- free_pages((unsigned long)b, order);
99260+ __ClearPageSlab(sp);
99261+ page_mapcount_reset(sp);
99262+ sp->private = 0;
99263+ __free_pages(sp, order);
99264 }
99265
99266 /*
99267@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99268
99269 /* Not enough space: must allocate a new page */
99270 if (!b) {
99271- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99272- if (!b)
99273+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99274+ if (!sp)
99275 return NULL;
99276- sp = virt_to_page(b);
99277- __SetPageSlab(sp);
99278+ b = page_address(sp);
99279
99280 spin_lock_irqsave(&slob_lock, flags);
99281 sp->units = SLOB_UNITS(PAGE_SIZE);
99282 sp->freelist = b;
99283+ sp->private = 0;
99284 INIT_LIST_HEAD(&sp->lru);
99285 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
99286 set_slob_page_free(sp, slob_list);
99287@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99288 /*
99289 * slob_free: entry point into the slob allocator.
99290 */
99291-static void slob_free(void *block, int size)
99292+static void slob_free(struct kmem_cache *c, void *block, int size)
99293 {
99294 struct page *sp;
99295 slob_t *prev, *next, *b = (slob_t *)block;
99296@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
99297 if (slob_page_free(sp))
99298 clear_slob_page_free(sp);
99299 spin_unlock_irqrestore(&slob_lock, flags);
99300- __ClearPageSlab(sp);
99301- page_mapcount_reset(sp);
99302- slob_free_pages(b, 0);
99303+ slob_free_pages(sp, 0);
99304 return;
99305 }
99306
99307+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99308+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
99309+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
99310+#endif
99311+
99312 if (!slob_page_free(sp)) {
99313 /* This slob page is about to become partially free. Easy! */
99314 sp->units = units;
99315@@ -424,11 +431,10 @@ out:
99316 */
99317
99318 static __always_inline void *
99319-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99320+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
99321 {
99322- unsigned int *m;
99323- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99324- void *ret;
99325+ slob_t *m;
99326+ void *ret = NULL;
99327
99328 gfp &= gfp_allowed_mask;
99329
99330@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99331
99332 if (!m)
99333 return NULL;
99334- *m = size;
99335+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
99336+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
99337+ m[0].units = size;
99338+ m[1].units = align;
99339 ret = (void *)m + align;
99340
99341 trace_kmalloc_node(caller, ret,
99342 size, size + align, gfp, node);
99343 } else {
99344 unsigned int order = get_order(size);
99345+ struct page *page;
99346
99347 if (likely(order))
99348 gfp |= __GFP_COMP;
99349- ret = slob_new_pages(gfp, order, node);
99350+ page = slob_new_pages(gfp, order, node);
99351+ if (page) {
99352+ ret = page_address(page);
99353+ page->private = size;
99354+ }
99355
99356 trace_kmalloc_node(caller, ret,
99357 size, PAGE_SIZE << order, gfp, node);
99358 }
99359
99360- kmemleak_alloc(ret, size, 1, gfp);
99361 return ret;
99362 }
99363
99364-void *__kmalloc(size_t size, gfp_t gfp)
99365+static __always_inline void *
99366+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99367+{
99368+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99369+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
99370+
99371+ if (!ZERO_OR_NULL_PTR(ret))
99372+ kmemleak_alloc(ret, size, 1, gfp);
99373+ return ret;
99374+}
99375+
99376+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
99377 {
99378 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
99379 }
99380@@ -491,34 +515,112 @@ void kfree(const void *block)
99381 return;
99382 kmemleak_free(block);
99383
99384+ VM_BUG_ON(!virt_addr_valid(block));
99385 sp = virt_to_page(block);
99386- if (PageSlab(sp)) {
99387+ VM_BUG_ON(!PageSlab(sp));
99388+ if (!sp->private) {
99389 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99390- unsigned int *m = (unsigned int *)(block - align);
99391- slob_free(m, *m + align);
99392- } else
99393+ slob_t *m = (slob_t *)(block - align);
99394+ slob_free(NULL, m, m[0].units + align);
99395+ } else {
99396+ __ClearPageSlab(sp);
99397+ page_mapcount_reset(sp);
99398+ sp->private = 0;
99399 __free_pages(sp, compound_order(sp));
99400+ }
99401 }
99402 EXPORT_SYMBOL(kfree);
99403
99404+bool is_usercopy_object(const void *ptr)
99405+{
99406+ if (!slab_is_available())
99407+ return false;
99408+
99409+ // PAX: TODO
99410+
99411+ return false;
99412+}
99413+
99414+#ifdef CONFIG_PAX_USERCOPY
99415+const char *check_heap_object(const void *ptr, unsigned long n)
99416+{
99417+ struct page *page;
99418+ const slob_t *free;
99419+ const void *base;
99420+ unsigned long flags;
99421+
99422+ if (ZERO_OR_NULL_PTR(ptr))
99423+ return "<null>";
99424+
99425+ if (!virt_addr_valid(ptr))
99426+ return NULL;
99427+
99428+ page = virt_to_head_page(ptr);
99429+ if (!PageSlab(page))
99430+ return NULL;
99431+
99432+ if (page->private) {
99433+ base = page;
99434+ if (base <= ptr && n <= page->private - (ptr - base))
99435+ return NULL;
99436+ return "<slob>";
99437+ }
99438+
99439+ /* some tricky double walking to find the chunk */
99440+ spin_lock_irqsave(&slob_lock, flags);
99441+ base = (void *)((unsigned long)ptr & PAGE_MASK);
99442+ free = page->freelist;
99443+
99444+ while (!slob_last(free) && (void *)free <= ptr) {
99445+ base = free + slob_units(free);
99446+ free = slob_next(free);
99447+ }
99448+
99449+ while (base < (void *)free) {
99450+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
99451+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
99452+ int offset;
99453+
99454+ if (ptr < base + align)
99455+ break;
99456+
99457+ offset = ptr - base - align;
99458+ if (offset >= m) {
99459+ base += size;
99460+ continue;
99461+ }
99462+
99463+ if (n > m - offset)
99464+ break;
99465+
99466+ spin_unlock_irqrestore(&slob_lock, flags);
99467+ return NULL;
99468+ }
99469+
99470+ spin_unlock_irqrestore(&slob_lock, flags);
99471+ return "<slob>";
99472+}
99473+#endif
99474+
99475 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
99476 size_t ksize(const void *block)
99477 {
99478 struct page *sp;
99479 int align;
99480- unsigned int *m;
99481+ slob_t *m;
99482
99483 BUG_ON(!block);
99484 if (unlikely(block == ZERO_SIZE_PTR))
99485 return 0;
99486
99487 sp = virt_to_page(block);
99488- if (unlikely(!PageSlab(sp)))
99489- return PAGE_SIZE << compound_order(sp);
99490+ VM_BUG_ON(!PageSlab(sp));
99491+ if (sp->private)
99492+ return sp->private;
99493
99494 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99495- m = (unsigned int *)(block - align);
99496- return SLOB_UNITS(*m) * SLOB_UNIT;
99497+ m = (slob_t *)(block - align);
99498+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
99499 }
99500 EXPORT_SYMBOL(ksize);
99501
99502@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
99503
99504 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
99505 {
99506- void *b;
99507+ void *b = NULL;
99508
99509 flags &= gfp_allowed_mask;
99510
99511 lockdep_trace_alloc(flags);
99512
99513+#ifdef CONFIG_PAX_USERCOPY_SLABS
99514+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
99515+#else
99516 if (c->size < PAGE_SIZE) {
99517 b = slob_alloc(c->size, flags, c->align, node);
99518 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99519 SLOB_UNITS(c->size) * SLOB_UNIT,
99520 flags, node);
99521 } else {
99522- b = slob_new_pages(flags, get_order(c->size), node);
99523+ struct page *sp;
99524+
99525+ sp = slob_new_pages(flags, get_order(c->size), node);
99526+ if (sp) {
99527+ b = page_address(sp);
99528+ sp->private = c->size;
99529+ }
99530 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99531 PAGE_SIZE << get_order(c->size),
99532 flags, node);
99533 }
99534+#endif
99535
99536 if (b && c->ctor)
99537 c->ctor(b);
99538@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
99539 EXPORT_SYMBOL(kmem_cache_alloc);
99540
99541 #ifdef CONFIG_NUMA
99542-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
99543+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
99544 {
99545 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
99546 }
99547@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
99548 EXPORT_SYMBOL(kmem_cache_alloc_node);
99549 #endif
99550
99551-static void __kmem_cache_free(void *b, int size)
99552+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
99553 {
99554- if (size < PAGE_SIZE)
99555- slob_free(b, size);
99556+ struct page *sp;
99557+
99558+ sp = virt_to_page(b);
99559+ BUG_ON(!PageSlab(sp));
99560+ if (!sp->private)
99561+ slob_free(c, b, size);
99562 else
99563- slob_free_pages(b, get_order(size));
99564+ slob_free_pages(sp, get_order(size));
99565 }
99566
99567 static void kmem_rcu_free(struct rcu_head *head)
99568@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
99569 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
99570 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
99571
99572- __kmem_cache_free(b, slob_rcu->size);
99573+ __kmem_cache_free(NULL, b, slob_rcu->size);
99574 }
99575
99576 void kmem_cache_free(struct kmem_cache *c, void *b)
99577 {
99578+ int size = c->size;
99579+
99580+#ifdef CONFIG_PAX_USERCOPY_SLABS
99581+ if (size + c->align < PAGE_SIZE) {
99582+ size += c->align;
99583+ b -= c->align;
99584+ }
99585+#endif
99586+
99587 kmemleak_free_recursive(b, c->flags);
99588 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
99589 struct slob_rcu *slob_rcu;
99590- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
99591- slob_rcu->size = c->size;
99592+ slob_rcu = b + (size - sizeof(struct slob_rcu));
99593+ slob_rcu->size = size;
99594 call_rcu(&slob_rcu->head, kmem_rcu_free);
99595 } else {
99596- __kmem_cache_free(b, c->size);
99597+ __kmem_cache_free(c, b, size);
99598 }
99599
99600+#ifdef CONFIG_PAX_USERCOPY_SLABS
99601+ trace_kfree(_RET_IP_, b);
99602+#else
99603 trace_kmem_cache_free(_RET_IP_, b);
99604+#endif
99605+
99606 }
99607 EXPORT_SYMBOL(kmem_cache_free);
99608
99609diff --git a/mm/slub.c b/mm/slub.c
99610index fe376fe..2f5757c 100644
99611--- a/mm/slub.c
99612+++ b/mm/slub.c
99613@@ -197,7 +197,7 @@ struct track {
99614
99615 enum track_item { TRACK_ALLOC, TRACK_FREE };
99616
99617-#ifdef CONFIG_SYSFS
99618+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99619 static int sysfs_slab_add(struct kmem_cache *);
99620 static int sysfs_slab_alias(struct kmem_cache *, const char *);
99621 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
99622@@ -535,7 +535,7 @@ static void print_track(const char *s, struct track *t)
99623 if (!t->addr)
99624 return;
99625
99626- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
99627+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
99628 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
99629 #ifdef CONFIG_STACKTRACE
99630 {
99631@@ -2652,6 +2652,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
99632
99633 slab_free_hook(s, x);
99634
99635+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99636+ if (!(s->flags & SLAB_NO_SANITIZE)) {
99637+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
99638+ if (s->ctor)
99639+ s->ctor(x);
99640+ }
99641+#endif
99642+
99643 redo:
99644 /*
99645 * Determine the currently cpus per cpu slab.
99646@@ -2989,6 +2997,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
99647 s->inuse = size;
99648
99649 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
99650+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99651+ (!(flags & SLAB_NO_SANITIZE)) ||
99652+#endif
99653 s->ctor)) {
99654 /*
99655 * Relocate free pointer after the object if it is not
99656@@ -3243,7 +3254,7 @@ static int __init setup_slub_min_objects(char *str)
99657
99658 __setup("slub_min_objects=", setup_slub_min_objects);
99659
99660-void *__kmalloc(size_t size, gfp_t flags)
99661+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
99662 {
99663 struct kmem_cache *s;
99664 void *ret;
99665@@ -3279,7 +3290,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
99666 return ptr;
99667 }
99668
99669-void *__kmalloc_node(size_t size, gfp_t flags, int node)
99670+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
99671 {
99672 struct kmem_cache *s;
99673 void *ret;
99674@@ -3308,6 +3319,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
99675 EXPORT_SYMBOL(__kmalloc_node);
99676 #endif
99677
99678+bool is_usercopy_object(const void *ptr)
99679+{
99680+ struct page *page;
99681+ struct kmem_cache *s;
99682+
99683+ if (ZERO_OR_NULL_PTR(ptr))
99684+ return false;
99685+
99686+ if (!slab_is_available())
99687+ return false;
99688+
99689+ if (!virt_addr_valid(ptr))
99690+ return false;
99691+
99692+ page = virt_to_head_page(ptr);
99693+
99694+ if (!PageSlab(page))
99695+ return false;
99696+
99697+ s = page->slab_cache;
99698+ return s->flags & SLAB_USERCOPY;
99699+}
99700+
99701+#ifdef CONFIG_PAX_USERCOPY
99702+const char *check_heap_object(const void *ptr, unsigned long n)
99703+{
99704+ struct page *page;
99705+ struct kmem_cache *s;
99706+ unsigned long offset;
99707+
99708+ if (ZERO_OR_NULL_PTR(ptr))
99709+ return "<null>";
99710+
99711+ if (!virt_addr_valid(ptr))
99712+ return NULL;
99713+
99714+ page = virt_to_head_page(ptr);
99715+
99716+ if (!PageSlab(page))
99717+ return NULL;
99718+
99719+ s = page->slab_cache;
99720+ if (!(s->flags & SLAB_USERCOPY))
99721+ return s->name;
99722+
99723+ offset = (ptr - page_address(page)) % s->size;
99724+ if (offset <= s->object_size && n <= s->object_size - offset)
99725+ return NULL;
99726+
99727+ return s->name;
99728+}
99729+#endif
99730+
99731 size_t ksize(const void *object)
99732 {
99733 struct page *page;
99734@@ -3336,6 +3400,7 @@ void kfree(const void *x)
99735 if (unlikely(ZERO_OR_NULL_PTR(x)))
99736 return;
99737
99738+ VM_BUG_ON(!virt_addr_valid(x));
99739 page = virt_to_head_page(x);
99740 if (unlikely(!PageSlab(page))) {
99741 BUG_ON(!PageCompound(page));
99742@@ -3631,7 +3696,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99743 int i;
99744 struct kmem_cache *c;
99745
99746- s->refcount++;
99747+ atomic_inc(&s->refcount);
99748
99749 /*
99750 * Adjust the object sizes so that we clear
99751@@ -3650,7 +3715,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99752 }
99753
99754 if (sysfs_slab_alias(s, name)) {
99755- s->refcount--;
99756+ atomic_dec(&s->refcount);
99757 s = NULL;
99758 }
99759 }
99760@@ -3767,7 +3832,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
99761 }
99762 #endif
99763
99764-#ifdef CONFIG_SYSFS
99765+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99766 static int count_inuse(struct page *page)
99767 {
99768 return page->inuse;
99769@@ -4048,7 +4113,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
99770 len += sprintf(buf + len, "%7ld ", l->count);
99771
99772 if (l->addr)
99773+#ifdef CONFIG_GRKERNSEC_HIDESYM
99774+ len += sprintf(buf + len, "%pS", NULL);
99775+#else
99776 len += sprintf(buf + len, "%pS", (void *)l->addr);
99777+#endif
99778 else
99779 len += sprintf(buf + len, "<not-available>");
99780
99781@@ -4150,12 +4219,12 @@ static void __init resiliency_test(void)
99782 validate_slab_cache(kmalloc_caches[9]);
99783 }
99784 #else
99785-#ifdef CONFIG_SYSFS
99786+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99787 static void resiliency_test(void) {};
99788 #endif
99789 #endif
99790
99791-#ifdef CONFIG_SYSFS
99792+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99793 enum slab_stat_type {
99794 SL_ALL, /* All slabs */
99795 SL_PARTIAL, /* Only partially allocated slabs */
99796@@ -4392,13 +4461,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
99797 {
99798 if (!s->ctor)
99799 return 0;
99800+#ifdef CONFIG_GRKERNSEC_HIDESYM
99801+ return sprintf(buf, "%pS\n", NULL);
99802+#else
99803 return sprintf(buf, "%pS\n", s->ctor);
99804+#endif
99805 }
99806 SLAB_ATTR_RO(ctor);
99807
99808 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
99809 {
99810- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
99811+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
99812 }
99813 SLAB_ATTR_RO(aliases);
99814
99815@@ -4486,6 +4559,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
99816 SLAB_ATTR_RO(cache_dma);
99817 #endif
99818
99819+#ifdef CONFIG_PAX_USERCOPY_SLABS
99820+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
99821+{
99822+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
99823+}
99824+SLAB_ATTR_RO(usercopy);
99825+#endif
99826+
99827+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99828+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
99829+{
99830+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
99831+}
99832+SLAB_ATTR_RO(sanitize);
99833+#endif
99834+
99835 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
99836 {
99837 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
99838@@ -4541,7 +4630,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
99839 * as well as cause other issues like converting a mergeable
99840 * cache into an umergeable one.
99841 */
99842- if (s->refcount > 1)
99843+ if (atomic_read(&s->refcount) > 1)
99844 return -EINVAL;
99845
99846 s->flags &= ~SLAB_TRACE;
99847@@ -4661,7 +4750,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
99848 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
99849 size_t length)
99850 {
99851- if (s->refcount > 1)
99852+ if (atomic_read(&s->refcount) > 1)
99853 return -EINVAL;
99854
99855 s->flags &= ~SLAB_FAILSLAB;
99856@@ -4831,6 +4920,12 @@ static struct attribute *slab_attrs[] = {
99857 #ifdef CONFIG_ZONE_DMA
99858 &cache_dma_attr.attr,
99859 #endif
99860+#ifdef CONFIG_PAX_USERCOPY_SLABS
99861+ &usercopy_attr.attr,
99862+#endif
99863+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99864+ &sanitize_attr.attr,
99865+#endif
99866 #ifdef CONFIG_NUMA
99867 &remote_node_defrag_ratio_attr.attr,
99868 #endif
99869@@ -5075,6 +5170,7 @@ static char *create_unique_id(struct kmem_cache *s)
99870 return name;
99871 }
99872
99873+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99874 static int sysfs_slab_add(struct kmem_cache *s)
99875 {
99876 int err;
99877@@ -5148,6 +5244,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
99878 kobject_del(&s->kobj);
99879 kobject_put(&s->kobj);
99880 }
99881+#endif
99882
99883 /*
99884 * Need to buffer aliases during bootup until sysfs becomes
99885@@ -5161,6 +5258,7 @@ struct saved_alias {
99886
99887 static struct saved_alias *alias_list;
99888
99889+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99890 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99891 {
99892 struct saved_alias *al;
99893@@ -5183,6 +5281,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99894 alias_list = al;
99895 return 0;
99896 }
99897+#endif
99898
99899 static int __init slab_sysfs_init(void)
99900 {
99901diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
99902index 4cba9c2..b4f9fcc 100644
99903--- a/mm/sparse-vmemmap.c
99904+++ b/mm/sparse-vmemmap.c
99905@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
99906 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
99907 if (!p)
99908 return NULL;
99909- pud_populate(&init_mm, pud, p);
99910+ pud_populate_kernel(&init_mm, pud, p);
99911 }
99912 return pud;
99913 }
99914@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
99915 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
99916 if (!p)
99917 return NULL;
99918- pgd_populate(&init_mm, pgd, p);
99919+ pgd_populate_kernel(&init_mm, pgd, p);
99920 }
99921 return pgd;
99922 }
99923diff --git a/mm/sparse.c b/mm/sparse.c
99924index d1b48b6..6e8590e 100644
99925--- a/mm/sparse.c
99926+++ b/mm/sparse.c
99927@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
99928
99929 for (i = 0; i < PAGES_PER_SECTION; i++) {
99930 if (PageHWPoison(&memmap[i])) {
99931- atomic_long_sub(1, &num_poisoned_pages);
99932+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
99933 ClearPageHWPoison(&memmap[i]);
99934 }
99935 }
99936diff --git a/mm/swap.c b/mm/swap.c
99937index 8a12b33..7068e78 100644
99938--- a/mm/swap.c
99939+++ b/mm/swap.c
99940@@ -31,6 +31,7 @@
99941 #include <linux/memcontrol.h>
99942 #include <linux/gfp.h>
99943 #include <linux/uio.h>
99944+#include <linux/hugetlb.h>
99945
99946 #include "internal.h"
99947
99948@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
99949
99950 __page_cache_release(page);
99951 dtor = get_compound_page_dtor(page);
99952+ if (!PageHuge(page))
99953+ BUG_ON(dtor != free_compound_page);
99954 (*dtor)(page);
99955 }
99956
99957diff --git a/mm/swapfile.c b/mm/swapfile.c
99958index 63f55cc..31874e6 100644
99959--- a/mm/swapfile.c
99960+++ b/mm/swapfile.c
99961@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
99962
99963 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
99964 /* Activity counter to indicate that a swapon or swapoff has occurred */
99965-static atomic_t proc_poll_event = ATOMIC_INIT(0);
99966+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
99967
99968 static inline unsigned char swap_count(unsigned char ent)
99969 {
99970@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
99971 spin_unlock(&swap_lock);
99972
99973 err = 0;
99974- atomic_inc(&proc_poll_event);
99975+ atomic_inc_unchecked(&proc_poll_event);
99976 wake_up_interruptible(&proc_poll_wait);
99977
99978 out_dput:
99979@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
99980
99981 poll_wait(file, &proc_poll_wait, wait);
99982
99983- if (seq->poll_event != atomic_read(&proc_poll_event)) {
99984- seq->poll_event = atomic_read(&proc_poll_event);
99985+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
99986+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
99987 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
99988 }
99989
99990@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
99991 return ret;
99992
99993 seq = file->private_data;
99994- seq->poll_event = atomic_read(&proc_poll_event);
99995+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
99996 return 0;
99997 }
99998
99999@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
100000 (frontswap_map) ? "FS" : "");
100001
100002 mutex_unlock(&swapon_mutex);
100003- atomic_inc(&proc_poll_event);
100004+ atomic_inc_unchecked(&proc_poll_event);
100005 wake_up_interruptible(&proc_poll_wait);
100006
100007 if (S_ISREG(inode->i_mode))
100008diff --git a/mm/util.c b/mm/util.c
100009index fec39d4..3e60325 100644
100010--- a/mm/util.c
100011+++ b/mm/util.c
100012@@ -195,6 +195,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
100013 void arch_pick_mmap_layout(struct mm_struct *mm)
100014 {
100015 mm->mmap_base = TASK_UNMAPPED_BASE;
100016+
100017+#ifdef CONFIG_PAX_RANDMMAP
100018+ if (mm->pax_flags & MF_PAX_RANDMMAP)
100019+ mm->mmap_base += mm->delta_mmap;
100020+#endif
100021+
100022 mm->get_unmapped_area = arch_get_unmapped_area;
100023 }
100024 #endif
100025@@ -371,6 +377,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
100026 if (!mm->arg_end)
100027 goto out_mm; /* Shh! No looking before we're done */
100028
100029+ if (gr_acl_handle_procpidmem(task))
100030+ goto out_mm;
100031+
100032 len = mm->arg_end - mm->arg_start;
100033
100034 if (len > buflen)
100035diff --git a/mm/vmalloc.c b/mm/vmalloc.c
100036index 39c3388..7d976d4 100644
100037--- a/mm/vmalloc.c
100038+++ b/mm/vmalloc.c
100039@@ -39,20 +39,65 @@ struct vfree_deferred {
100040 struct work_struct wq;
100041 };
100042 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
100043+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
100044+
100045+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100046+struct stack_deferred_llist {
100047+ struct llist_head list;
100048+ void *stack;
100049+ void *lowmem_stack;
100050+};
100051+
100052+struct stack_deferred {
100053+ struct stack_deferred_llist list;
100054+ struct work_struct wq;
100055+};
100056+
100057+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
100058+#endif
100059
100060 static void __vunmap(const void *, int);
100061
100062-static void free_work(struct work_struct *w)
100063+static void vfree_work(struct work_struct *w)
100064+{
100065+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100066+ struct llist_node *llnode = llist_del_all(&p->list);
100067+ while (llnode) {
100068+ void *x = llnode;
100069+ llnode = llist_next(llnode);
100070+ __vunmap(x, 1);
100071+ }
100072+}
100073+
100074+static void vunmap_work(struct work_struct *w)
100075 {
100076 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100077 struct llist_node *llnode = llist_del_all(&p->list);
100078 while (llnode) {
100079 void *p = llnode;
100080 llnode = llist_next(llnode);
100081- __vunmap(p, 1);
100082+ __vunmap(p, 0);
100083 }
100084 }
100085
100086+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100087+static void unmap_work(struct work_struct *w)
100088+{
100089+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
100090+ struct llist_node *llnode = llist_del_all(&p->list.list);
100091+ while (llnode) {
100092+ struct stack_deferred_llist *x =
100093+ llist_entry((struct llist_head *)llnode,
100094+ struct stack_deferred_llist, list);
100095+ void *stack = ACCESS_ONCE(x->stack);
100096+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
100097+ llnode = llist_next(llnode);
100098+ __vunmap(stack, 0);
100099+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
100100+ }
100101+}
100102+#endif
100103+
100104 /*** Page table manipulation functions ***/
100105
100106 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100107@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100108
100109 pte = pte_offset_kernel(pmd, addr);
100110 do {
100111- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100112- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100113+
100114+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100115+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
100116+ BUG_ON(!pte_exec(*pte));
100117+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
100118+ continue;
100119+ }
100120+#endif
100121+
100122+ {
100123+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100124+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100125+ }
100126 } while (pte++, addr += PAGE_SIZE, addr != end);
100127 }
100128
100129@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
100130 pte = pte_alloc_kernel(pmd, addr);
100131 if (!pte)
100132 return -ENOMEM;
100133+
100134+ pax_open_kernel();
100135 do {
100136 struct page *page = pages[*nr];
100137
100138- if (WARN_ON(!pte_none(*pte)))
100139+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100140+ if (pgprot_val(prot) & _PAGE_NX)
100141+#endif
100142+
100143+ if (!pte_none(*pte)) {
100144+ pax_close_kernel();
100145+ WARN_ON(1);
100146 return -EBUSY;
100147- if (WARN_ON(!page))
100148+ }
100149+ if (!page) {
100150+ pax_close_kernel();
100151+ WARN_ON(1);
100152 return -ENOMEM;
100153+ }
100154 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
100155 (*nr)++;
100156 } while (pte++, addr += PAGE_SIZE, addr != end);
100157+ pax_close_kernel();
100158 return 0;
100159 }
100160
100161@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
100162 pmd_t *pmd;
100163 unsigned long next;
100164
100165- pmd = pmd_alloc(&init_mm, pud, addr);
100166+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
100167 if (!pmd)
100168 return -ENOMEM;
100169 do {
100170@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
100171 pud_t *pud;
100172 unsigned long next;
100173
100174- pud = pud_alloc(&init_mm, pgd, addr);
100175+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
100176 if (!pud)
100177 return -ENOMEM;
100178 do {
100179@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
100180 if (addr >= MODULES_VADDR && addr < MODULES_END)
100181 return 1;
100182 #endif
100183+
100184+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100185+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
100186+ return 1;
100187+#endif
100188+
100189 return is_vmalloc_addr(x);
100190 }
100191
100192@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
100193
100194 if (!pgd_none(*pgd)) {
100195 pud_t *pud = pud_offset(pgd, addr);
100196+#ifdef CONFIG_X86
100197+ if (!pud_large(*pud))
100198+#endif
100199 if (!pud_none(*pud)) {
100200 pmd_t *pmd = pmd_offset(pud, addr);
100201+#ifdef CONFIG_X86
100202+ if (!pmd_large(*pmd))
100203+#endif
100204 if (!pmd_none(*pmd)) {
100205 pte_t *ptep, pte;
100206
100207@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
100208 * Allocate a region of KVA of the specified size and alignment, within the
100209 * vstart and vend.
100210 */
100211-static struct vmap_area *alloc_vmap_area(unsigned long size,
100212+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
100213 unsigned long align,
100214 unsigned long vstart, unsigned long vend,
100215 int node, gfp_t gfp_mask)
100216@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
100217 for_each_possible_cpu(i) {
100218 struct vmap_block_queue *vbq;
100219 struct vfree_deferred *p;
100220+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100221+ struct stack_deferred *p2;
100222+#endif
100223
100224 vbq = &per_cpu(vmap_block_queue, i);
100225 spin_lock_init(&vbq->lock);
100226 INIT_LIST_HEAD(&vbq->free);
100227+
100228 p = &per_cpu(vfree_deferred, i);
100229 init_llist_head(&p->list);
100230- INIT_WORK(&p->wq, free_work);
100231+ INIT_WORK(&p->wq, vfree_work);
100232+
100233+ p = &per_cpu(vunmap_deferred, i);
100234+ init_llist_head(&p->list);
100235+ INIT_WORK(&p->wq, vunmap_work);
100236+
100237+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100238+ p2 = &per_cpu(stack_deferred, i);
100239+ init_llist_head(&p2->list.list);
100240+ INIT_WORK(&p2->wq, unmap_work);
100241+#endif
100242 }
100243
100244 /* Import existing vmlist entries. */
100245@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
100246 struct vm_struct *area;
100247
100248 BUG_ON(in_interrupt());
100249+
100250+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100251+ if (flags & VM_KERNEXEC) {
100252+ if (start != VMALLOC_START || end != VMALLOC_END)
100253+ return NULL;
100254+ start = (unsigned long)MODULES_EXEC_VADDR;
100255+ end = (unsigned long)MODULES_EXEC_END;
100256+ }
100257+#endif
100258+
100259 if (flags & VM_IOREMAP)
100260 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
100261
100262@@ -1511,13 +1616,37 @@ EXPORT_SYMBOL(vfree);
100263 */
100264 void vunmap(const void *addr)
100265 {
100266- BUG_ON(in_interrupt());
100267- might_sleep();
100268- if (addr)
100269+ if (!addr)
100270+ return;
100271+
100272+ if (unlikely(in_interrupt())) {
100273+ struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
100274+ if (llist_add((struct llist_node *)addr, &p->list))
100275+ schedule_work(&p->wq);
100276+ } else {
100277+ might_sleep();
100278 __vunmap(addr, 0);
100279+ }
100280 }
100281 EXPORT_SYMBOL(vunmap);
100282
100283+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100284+void unmap_process_stacks(struct task_struct *task)
100285+{
100286+ if (unlikely(in_interrupt())) {
100287+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
100288+ struct stack_deferred_llist *list = task->stack;
100289+ list->stack = task->stack;
100290+ list->lowmem_stack = task->lowmem_stack;
100291+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
100292+ schedule_work(&p->wq);
100293+ } else {
100294+ __vunmap(task->stack, 0);
100295+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
100296+ }
100297+}
100298+#endif
100299+
100300 /**
100301 * vmap - map an array of pages into virtually contiguous space
100302 * @pages: array of page pointers
100303@@ -1538,6 +1667,11 @@ void *vmap(struct page **pages, unsigned int count,
100304 if (count > totalram_pages)
100305 return NULL;
100306
100307+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100308+ if (!(pgprot_val(prot) & _PAGE_NX))
100309+ flags |= VM_KERNEXEC;
100310+#endif
100311+
100312 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
100313 __builtin_return_address(0));
100314 if (!area)
100315@@ -1640,6 +1774,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
100316 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
100317 goto fail;
100318
100319+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100320+ if (!(pgprot_val(prot) & _PAGE_NX))
100321+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
100322+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
100323+ else
100324+#endif
100325+
100326 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
100327 start, end, node, gfp_mask, caller);
100328 if (!area)
100329@@ -1816,10 +1957,9 @@ EXPORT_SYMBOL(vzalloc_node);
100330 * For tight control over page level allocator and protection flags
100331 * use __vmalloc() instead.
100332 */
100333-
100334 void *vmalloc_exec(unsigned long size)
100335 {
100336- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
100337+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
100338 NUMA_NO_NODE, __builtin_return_address(0));
100339 }
100340
100341@@ -2126,6 +2266,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
100342 {
100343 struct vm_struct *area;
100344
100345+ BUG_ON(vma->vm_mirror);
100346+
100347 size = PAGE_ALIGN(size);
100348
100349 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
100350@@ -2608,7 +2750,11 @@ static int s_show(struct seq_file *m, void *p)
100351 v->addr, v->addr + v->size, v->size);
100352
100353 if (v->caller)
100354+#ifdef CONFIG_GRKERNSEC_HIDESYM
100355+ seq_printf(m, " %pK", v->caller);
100356+#else
100357 seq_printf(m, " %pS", v->caller);
100358+#endif
100359
100360 if (v->nr_pages)
100361 seq_printf(m, " pages=%d", v->nr_pages);
100362diff --git a/mm/vmstat.c b/mm/vmstat.c
100363index cdac773..7dd324e 100644
100364--- a/mm/vmstat.c
100365+++ b/mm/vmstat.c
100366@@ -24,6 +24,7 @@
100367 #include <linux/mm_inline.h>
100368 #include <linux/page_ext.h>
100369 #include <linux/page_owner.h>
100370+#include <linux/grsecurity.h>
100371
100372 #include "internal.h"
100373
100374@@ -83,7 +84,7 @@ void vm_events_fold_cpu(int cpu)
100375 *
100376 * vm_stat contains the global counters
100377 */
100378-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100379+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100380 EXPORT_SYMBOL(vm_stat);
100381
100382 #ifdef CONFIG_SMP
100383@@ -435,7 +436,7 @@ static int fold_diff(int *diff)
100384
100385 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100386 if (diff[i]) {
100387- atomic_long_add(diff[i], &vm_stat[i]);
100388+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
100389 changes++;
100390 }
100391 return changes;
100392@@ -473,7 +474,7 @@ static int refresh_cpu_vm_stats(void)
100393 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
100394 if (v) {
100395
100396- atomic_long_add(v, &zone->vm_stat[i]);
100397+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100398 global_diff[i] += v;
100399 #ifdef CONFIG_NUMA
100400 /* 3 seconds idle till flush */
100401@@ -537,7 +538,7 @@ void cpu_vm_stats_fold(int cpu)
100402
100403 v = p->vm_stat_diff[i];
100404 p->vm_stat_diff[i] = 0;
100405- atomic_long_add(v, &zone->vm_stat[i]);
100406+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100407 global_diff[i] += v;
100408 }
100409 }
100410@@ -557,8 +558,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
100411 if (pset->vm_stat_diff[i]) {
100412 int v = pset->vm_stat_diff[i];
100413 pset->vm_stat_diff[i] = 0;
100414- atomic_long_add(v, &zone->vm_stat[i]);
100415- atomic_long_add(v, &vm_stat[i]);
100416+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100417+ atomic_long_add_unchecked(v, &vm_stat[i]);
100418 }
100419 }
100420 #endif
100421@@ -1291,10 +1292,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
100422 stat_items_size += sizeof(struct vm_event_state);
100423 #endif
100424
100425- v = kmalloc(stat_items_size, GFP_KERNEL);
100426+ v = kzalloc(stat_items_size, GFP_KERNEL);
100427 m->private = v;
100428 if (!v)
100429 return ERR_PTR(-ENOMEM);
100430+
100431+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100432+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
100433+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
100434+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
100435+ && !in_group_p(grsec_proc_gid)
100436+#endif
100437+ )
100438+ return (unsigned long *)m->private + *pos;
100439+#endif
100440+#endif
100441+
100442 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100443 v[i] = global_page_state(i);
100444 v += NR_VM_ZONE_STAT_ITEMS;
100445@@ -1526,10 +1539,16 @@ static int __init setup_vmstat(void)
100446 cpu_notifier_register_done();
100447 #endif
100448 #ifdef CONFIG_PROC_FS
100449- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
100450- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
100451- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100452- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
100453+ {
100454+ mode_t gr_mode = S_IRUGO;
100455+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100456+ gr_mode = S_IRUSR;
100457+#endif
100458+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
100459+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
100460+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100461+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
100462+ }
100463 #endif
100464 return 0;
100465 }
100466diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
100467index 64c6bed..b79a5de 100644
100468--- a/net/8021q/vlan.c
100469+++ b/net/8021q/vlan.c
100470@@ -481,7 +481,7 @@ out:
100471 return NOTIFY_DONE;
100472 }
100473
100474-static struct notifier_block vlan_notifier_block __read_mostly = {
100475+static struct notifier_block vlan_notifier_block = {
100476 .notifier_call = vlan_device_event,
100477 };
100478
100479@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
100480 err = -EPERM;
100481 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
100482 break;
100483- if ((args.u.name_type >= 0) &&
100484- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
100485+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
100486 struct vlan_net *vn;
100487
100488 vn = net_generic(net, vlan_net_id);
100489diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
100490index 8ac8a5c..991defc 100644
100491--- a/net/8021q/vlan_netlink.c
100492+++ b/net/8021q/vlan_netlink.c
100493@@ -238,7 +238,7 @@ nla_put_failure:
100494 return -EMSGSIZE;
100495 }
100496
100497-struct rtnl_link_ops vlan_link_ops __read_mostly = {
100498+struct rtnl_link_ops vlan_link_ops = {
100499 .kind = "vlan",
100500 .maxtype = IFLA_VLAN_MAX,
100501 .policy = vlan_policy,
100502diff --git a/net/9p/client.c b/net/9p/client.c
100503index e86a9bea..e91f70e 100644
100504--- a/net/9p/client.c
100505+++ b/net/9p/client.c
100506@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
100507 len - inline_len);
100508 } else {
100509 err = copy_from_user(ename + inline_len,
100510- uidata, len - inline_len);
100511+ (char __force_user *)uidata, len - inline_len);
100512 if (err) {
100513 err = -EFAULT;
100514 goto out_err;
100515@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
100516 kernel_buf = 1;
100517 indata = data;
100518 } else
100519- indata = (__force char *)udata;
100520+ indata = (__force_kernel char *)udata;
100521 /*
100522 * response header len is 11
100523 * PDU Header(7) + IO Size (4)
100524@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
100525 kernel_buf = 1;
100526 odata = data;
100527 } else
100528- odata = (char *)udata;
100529+ odata = (char __force_kernel *)udata;
100530 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
100531 P9_ZC_HDR_SZ, kernel_buf, "dqd",
100532 fid->fid, offset, rsize);
100533diff --git a/net/9p/mod.c b/net/9p/mod.c
100534index 6ab36ae..6f1841b 100644
100535--- a/net/9p/mod.c
100536+++ b/net/9p/mod.c
100537@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
100538 void v9fs_register_trans(struct p9_trans_module *m)
100539 {
100540 spin_lock(&v9fs_trans_lock);
100541- list_add_tail(&m->list, &v9fs_trans_list);
100542+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
100543 spin_unlock(&v9fs_trans_lock);
100544 }
100545 EXPORT_SYMBOL(v9fs_register_trans);
100546@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
100547 void v9fs_unregister_trans(struct p9_trans_module *m)
100548 {
100549 spin_lock(&v9fs_trans_lock);
100550- list_del_init(&m->list);
100551+ pax_list_del_init((struct list_head *)&m->list);
100552 spin_unlock(&v9fs_trans_lock);
100553 }
100554 EXPORT_SYMBOL(v9fs_unregister_trans);
100555diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
100556index 80d08f6..de63fd1 100644
100557--- a/net/9p/trans_fd.c
100558+++ b/net/9p/trans_fd.c
100559@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
100560 oldfs = get_fs();
100561 set_fs(get_ds());
100562 /* The cast to a user pointer is valid due to the set_fs() */
100563- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
100564+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
100565 set_fs(oldfs);
100566
100567 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
100568diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
100569index af46bc4..f9adfcd 100644
100570--- a/net/appletalk/atalk_proc.c
100571+++ b/net/appletalk/atalk_proc.c
100572@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
100573 struct proc_dir_entry *p;
100574 int rc = -ENOMEM;
100575
100576- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
100577+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
100578 if (!atalk_proc_dir)
100579 goto out;
100580
100581diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
100582index 876fbe8..8bbea9f 100644
100583--- a/net/atm/atm_misc.c
100584+++ b/net/atm/atm_misc.c
100585@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
100586 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
100587 return 1;
100588 atm_return(vcc, truesize);
100589- atomic_inc(&vcc->stats->rx_drop);
100590+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100591 return 0;
100592 }
100593 EXPORT_SYMBOL(atm_charge);
100594@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
100595 }
100596 }
100597 atm_return(vcc, guess);
100598- atomic_inc(&vcc->stats->rx_drop);
100599+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100600 return NULL;
100601 }
100602 EXPORT_SYMBOL(atm_alloc_charge);
100603@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
100604
100605 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100606 {
100607-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100608+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100609 __SONET_ITEMS
100610 #undef __HANDLE_ITEM
100611 }
100612@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
100613
100614 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100615 {
100616-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100617+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
100618 __SONET_ITEMS
100619 #undef __HANDLE_ITEM
100620 }
100621diff --git a/net/atm/lec.c b/net/atm/lec.c
100622index 4b98f89..5a2f6cb 100644
100623--- a/net/atm/lec.c
100624+++ b/net/atm/lec.c
100625@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
100626 }
100627
100628 static struct lane2_ops lane2_ops = {
100629- lane2_resolve, /* resolve, spec 3.1.3 */
100630- lane2_associate_req, /* associate_req, spec 3.1.4 */
100631- NULL /* associate indicator, spec 3.1.5 */
100632+ .resolve = lane2_resolve,
100633+ .associate_req = lane2_associate_req,
100634+ .associate_indicator = NULL
100635 };
100636
100637 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
100638diff --git a/net/atm/lec.h b/net/atm/lec.h
100639index 4149db1..f2ab682 100644
100640--- a/net/atm/lec.h
100641+++ b/net/atm/lec.h
100642@@ -48,7 +48,7 @@ struct lane2_ops {
100643 const u8 *tlvs, u32 sizeoftlvs);
100644 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
100645 const u8 *tlvs, u32 sizeoftlvs);
100646-};
100647+} __no_const;
100648
100649 /*
100650 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
100651diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
100652index d1b2d9a..d549f7f 100644
100653--- a/net/atm/mpoa_caches.c
100654+++ b/net/atm/mpoa_caches.c
100655@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
100656
100657
100658 static struct in_cache_ops ingress_ops = {
100659- in_cache_add_entry, /* add_entry */
100660- in_cache_get, /* get */
100661- in_cache_get_with_mask, /* get_with_mask */
100662- in_cache_get_by_vcc, /* get_by_vcc */
100663- in_cache_put, /* put */
100664- in_cache_remove_entry, /* remove_entry */
100665- cache_hit, /* cache_hit */
100666- clear_count_and_expired, /* clear_count */
100667- check_resolving_entries, /* check_resolving */
100668- refresh_entries, /* refresh */
100669- in_destroy_cache /* destroy_cache */
100670+ .add_entry = in_cache_add_entry,
100671+ .get = in_cache_get,
100672+ .get_with_mask = in_cache_get_with_mask,
100673+ .get_by_vcc = in_cache_get_by_vcc,
100674+ .put = in_cache_put,
100675+ .remove_entry = in_cache_remove_entry,
100676+ .cache_hit = cache_hit,
100677+ .clear_count = clear_count_and_expired,
100678+ .check_resolving = check_resolving_entries,
100679+ .refresh = refresh_entries,
100680+ .destroy_cache = in_destroy_cache
100681 };
100682
100683 static struct eg_cache_ops egress_ops = {
100684- eg_cache_add_entry, /* add_entry */
100685- eg_cache_get_by_cache_id, /* get_by_cache_id */
100686- eg_cache_get_by_tag, /* get_by_tag */
100687- eg_cache_get_by_vcc, /* get_by_vcc */
100688- eg_cache_get_by_src_ip, /* get_by_src_ip */
100689- eg_cache_put, /* put */
100690- eg_cache_remove_entry, /* remove_entry */
100691- update_eg_cache_entry, /* update */
100692- clear_expired, /* clear_expired */
100693- eg_destroy_cache /* destroy_cache */
100694+ .add_entry = eg_cache_add_entry,
100695+ .get_by_cache_id = eg_cache_get_by_cache_id,
100696+ .get_by_tag = eg_cache_get_by_tag,
100697+ .get_by_vcc = eg_cache_get_by_vcc,
100698+ .get_by_src_ip = eg_cache_get_by_src_ip,
100699+ .put = eg_cache_put,
100700+ .remove_entry = eg_cache_remove_entry,
100701+ .update = update_eg_cache_entry,
100702+ .clear_expired = clear_expired,
100703+ .destroy_cache = eg_destroy_cache
100704 };
100705
100706
100707diff --git a/net/atm/proc.c b/net/atm/proc.c
100708index bbb6461..cf04016 100644
100709--- a/net/atm/proc.c
100710+++ b/net/atm/proc.c
100711@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
100712 const struct k_atm_aal_stats *stats)
100713 {
100714 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
100715- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
100716- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
100717- atomic_read(&stats->rx_drop));
100718+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
100719+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
100720+ atomic_read_unchecked(&stats->rx_drop));
100721 }
100722
100723 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
100724diff --git a/net/atm/resources.c b/net/atm/resources.c
100725index 0447d5d..3cf4728 100644
100726--- a/net/atm/resources.c
100727+++ b/net/atm/resources.c
100728@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
100729 static void copy_aal_stats(struct k_atm_aal_stats *from,
100730 struct atm_aal_stats *to)
100731 {
100732-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100733+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100734 __AAL_STAT_ITEMS
100735 #undef __HANDLE_ITEM
100736 }
100737@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
100738 static void subtract_aal_stats(struct k_atm_aal_stats *from,
100739 struct atm_aal_stats *to)
100740 {
100741-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100742+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
100743 __AAL_STAT_ITEMS
100744 #undef __HANDLE_ITEM
100745 }
100746diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
100747index 919a5ce..cc6b444 100644
100748--- a/net/ax25/sysctl_net_ax25.c
100749+++ b/net/ax25/sysctl_net_ax25.c
100750@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
100751 {
100752 char path[sizeof("net/ax25/") + IFNAMSIZ];
100753 int k;
100754- struct ctl_table *table;
100755+ ctl_table_no_const *table;
100756
100757 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
100758 if (!table)
100759diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
100760index 1e80539..676c37a 100644
100761--- a/net/batman-adv/bat_iv_ogm.c
100762+++ b/net/batman-adv/bat_iv_ogm.c
100763@@ -313,7 +313,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
100764
100765 /* randomize initial seqno to avoid collision */
100766 get_random_bytes(&random_seqno, sizeof(random_seqno));
100767- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100768+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100769
100770 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
100771 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
100772@@ -918,9 +918,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
100773 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
100774
100775 /* change sequence number to network order */
100776- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
100777+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
100778 batadv_ogm_packet->seqno = htonl(seqno);
100779- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
100780+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
100781
100782 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
100783
100784@@ -1597,7 +1597,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
100785 return;
100786
100787 /* could be changed by schedule_own_packet() */
100788- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
100789+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
100790
100791 if (ogm_packet->flags & BATADV_DIRECTLINK)
100792 has_directlink_flag = true;
100793diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
100794index 00f9e14..e1c7203 100644
100795--- a/net/batman-adv/fragmentation.c
100796+++ b/net/batman-adv/fragmentation.c
100797@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
100798 frag_header.packet_type = BATADV_UNICAST_FRAG;
100799 frag_header.version = BATADV_COMPAT_VERSION;
100800 frag_header.ttl = BATADV_TTL;
100801- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
100802+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
100803 frag_header.reserved = 0;
100804 frag_header.no = 0;
100805 frag_header.total_size = htons(skb->len);
100806diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
100807index 5467955..75ad4e3 100644
100808--- a/net/batman-adv/soft-interface.c
100809+++ b/net/batman-adv/soft-interface.c
100810@@ -296,7 +296,7 @@ send:
100811 primary_if->net_dev->dev_addr);
100812
100813 /* set broadcast sequence number */
100814- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
100815+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
100816 bcast_packet->seqno = htonl(seqno);
100817
100818 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
100819@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100820 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
100821
100822 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
100823- atomic_set(&bat_priv->bcast_seqno, 1);
100824+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
100825 atomic_set(&bat_priv->tt.vn, 0);
100826 atomic_set(&bat_priv->tt.local_changes, 0);
100827 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
100828@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100829
100830 /* randomize initial seqno to avoid collision */
100831 get_random_bytes(&random_seqno, sizeof(random_seqno));
100832- atomic_set(&bat_priv->frag_seqno, random_seqno);
100833+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
100834
100835 bat_priv->primary_if = NULL;
100836 bat_priv->num_ifaces = 0;
100837@@ -983,7 +983,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
100838 return 0;
100839 }
100840
100841-struct rtnl_link_ops batadv_link_ops __read_mostly = {
100842+struct rtnl_link_ops batadv_link_ops = {
100843 .kind = "batadv",
100844 .priv_size = sizeof(struct batadv_priv),
100845 .setup = batadv_softif_init_early,
100846diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
100847index 8854c05..ee5d5497 100644
100848--- a/net/batman-adv/types.h
100849+++ b/net/batman-adv/types.h
100850@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
100851 struct batadv_hard_iface_bat_iv {
100852 unsigned char *ogm_buff;
100853 int ogm_buff_len;
100854- atomic_t ogm_seqno;
100855+ atomic_unchecked_t ogm_seqno;
100856 };
100857
100858 /**
100859@@ -768,7 +768,7 @@ struct batadv_priv {
100860 atomic_t bonding;
100861 atomic_t fragmentation;
100862 atomic_t packet_size_max;
100863- atomic_t frag_seqno;
100864+ atomic_unchecked_t frag_seqno;
100865 #ifdef CONFIG_BATMAN_ADV_BLA
100866 atomic_t bridge_loop_avoidance;
100867 #endif
100868@@ -787,7 +787,7 @@ struct batadv_priv {
100869 #endif
100870 uint32_t isolation_mark;
100871 uint32_t isolation_mark_mask;
100872- atomic_t bcast_seqno;
100873+ atomic_unchecked_t bcast_seqno;
100874 atomic_t bcast_queue_left;
100875 atomic_t batman_queue_left;
100876 char num_ifaces;
100877diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
100878index 2c245fd..dccf543 100644
100879--- a/net/bluetooth/hci_sock.c
100880+++ b/net/bluetooth/hci_sock.c
100881@@ -1067,7 +1067,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
100882 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
100883 }
100884
100885- len = min_t(unsigned int, len, sizeof(uf));
100886+ len = min((size_t)len, sizeof(uf));
100887 if (copy_from_user(&uf, optval, len)) {
100888 err = -EFAULT;
100889 break;
100890diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
100891index d04dc00..d25d576 100644
100892--- a/net/bluetooth/l2cap_core.c
100893+++ b/net/bluetooth/l2cap_core.c
100894@@ -3524,8 +3524,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
100895 break;
100896
100897 case L2CAP_CONF_RFC:
100898- if (olen == sizeof(rfc))
100899- memcpy(&rfc, (void *)val, olen);
100900+ if (olen != sizeof(rfc))
100901+ break;
100902+
100903+ memcpy(&rfc, (void *)val, olen);
100904
100905 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
100906 rfc.mode != chan->mode)
100907diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
100908index f65caf4..c07110c 100644
100909--- a/net/bluetooth/l2cap_sock.c
100910+++ b/net/bluetooth/l2cap_sock.c
100911@@ -634,7 +634,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
100912 struct sock *sk = sock->sk;
100913 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
100914 struct l2cap_options opts;
100915- int len, err = 0;
100916+ int err = 0;
100917+ size_t len = optlen;
100918 u32 opt;
100919
100920 BT_DBG("sk %p", sk);
100921@@ -661,7 +662,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
100922 opts.max_tx = chan->max_tx;
100923 opts.txwin_size = chan->tx_win;
100924
100925- len = min_t(unsigned int, sizeof(opts), optlen);
100926+ len = min(sizeof(opts), len);
100927 if (copy_from_user((char *) &opts, optval, len)) {
100928 err = -EFAULT;
100929 break;
100930@@ -748,7 +749,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100931 struct bt_security sec;
100932 struct bt_power pwr;
100933 struct l2cap_conn *conn;
100934- int len, err = 0;
100935+ int err = 0;
100936+ size_t len = optlen;
100937 u32 opt;
100938
100939 BT_DBG("sk %p", sk);
100940@@ -772,7 +774,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100941
100942 sec.level = BT_SECURITY_LOW;
100943
100944- len = min_t(unsigned int, sizeof(sec), optlen);
100945+ len = min(sizeof(sec), len);
100946 if (copy_from_user((char *) &sec, optval, len)) {
100947 err = -EFAULT;
100948 break;
100949@@ -868,7 +870,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100950
100951 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
100952
100953- len = min_t(unsigned int, sizeof(pwr), optlen);
100954+ len = min(sizeof(pwr), len);
100955 if (copy_from_user((char *) &pwr, optval, len)) {
100956 err = -EFAULT;
100957 break;
100958diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
100959index 2348176..b9b6cf2 100644
100960--- a/net/bluetooth/rfcomm/sock.c
100961+++ b/net/bluetooth/rfcomm/sock.c
100962@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
100963 struct sock *sk = sock->sk;
100964 struct bt_security sec;
100965 int err = 0;
100966- size_t len;
100967+ size_t len = optlen;
100968 u32 opt;
100969
100970 BT_DBG("sk %p", sk);
100971@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
100972
100973 sec.level = BT_SECURITY_LOW;
100974
100975- len = min_t(unsigned int, sizeof(sec), optlen);
100976+ len = min(sizeof(sec), len);
100977 if (copy_from_user((char *) &sec, optval, len)) {
100978 err = -EFAULT;
100979 break;
100980diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
100981index 8e385a0..a5bdd8e 100644
100982--- a/net/bluetooth/rfcomm/tty.c
100983+++ b/net/bluetooth/rfcomm/tty.c
100984@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
100985 BT_DBG("tty %p id %d", tty, tty->index);
100986
100987 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
100988- dev->channel, dev->port.count);
100989+ dev->channel, atomic_read(&dev->port.count));
100990
100991 err = tty_port_open(&dev->port, tty, filp);
100992 if (err)
100993@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
100994 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
100995
100996 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
100997- dev->port.count);
100998+ atomic_read(&dev->port.count));
100999
101000 tty_port_close(&dev->port, tty, filp);
101001 }
101002diff --git a/net/bridge/br.c b/net/bridge/br.c
101003index 44425af..4ee730e 100644
101004--- a/net/bridge/br.c
101005+++ b/net/bridge/br.c
101006@@ -147,6 +147,8 @@ static int __init br_init(void)
101007 {
101008 int err;
101009
101010+ BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
101011+
101012 err = stp_proto_register(&br_stp_proto);
101013 if (err < 0) {
101014 pr_err("bridge: can't register sap for STP\n");
101015diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
101016index 9f5eb55..45ab9c5 100644
101017--- a/net/bridge/br_netlink.c
101018+++ b/net/bridge/br_netlink.c
101019@@ -566,7 +566,7 @@ static struct rtnl_af_ops br_af_ops = {
101020 .get_link_af_size = br_get_link_af_size,
101021 };
101022
101023-struct rtnl_link_ops br_link_ops __read_mostly = {
101024+struct rtnl_link_ops br_link_ops = {
101025 .kind = "bridge",
101026 .priv_size = sizeof(struct net_bridge),
101027 .setup = br_dev_setup,
101028diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
101029index d9a8c05..8dadc6c6 100644
101030--- a/net/bridge/netfilter/ebtables.c
101031+++ b/net/bridge/netfilter/ebtables.c
101032@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
101033 tmp.valid_hooks = t->table->valid_hooks;
101034 }
101035 mutex_unlock(&ebt_mutex);
101036- if (copy_to_user(user, &tmp, *len) != 0) {
101037+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101038 BUGPRINT("c2u Didn't work\n");
101039 ret = -EFAULT;
101040 break;
101041@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101042 goto out;
101043 tmp.valid_hooks = t->valid_hooks;
101044
101045- if (copy_to_user(user, &tmp, *len) != 0) {
101046+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101047 ret = -EFAULT;
101048 break;
101049 }
101050@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101051 tmp.entries_size = t->table->entries_size;
101052 tmp.valid_hooks = t->table->valid_hooks;
101053
101054- if (copy_to_user(user, &tmp, *len) != 0) {
101055+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101056 ret = -EFAULT;
101057 break;
101058 }
101059diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
101060index f5afda1..dcf770a 100644
101061--- a/net/caif/cfctrl.c
101062+++ b/net/caif/cfctrl.c
101063@@ -10,6 +10,7 @@
101064 #include <linux/spinlock.h>
101065 #include <linux/slab.h>
101066 #include <linux/pkt_sched.h>
101067+#include <linux/sched.h>
101068 #include <net/caif/caif_layer.h>
101069 #include <net/caif/cfpkt.h>
101070 #include <net/caif/cfctrl.h>
101071@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
101072 memset(&dev_info, 0, sizeof(dev_info));
101073 dev_info.id = 0xff;
101074 cfsrvl_init(&this->serv, 0, &dev_info, false);
101075- atomic_set(&this->req_seq_no, 1);
101076- atomic_set(&this->rsp_seq_no, 1);
101077+ atomic_set_unchecked(&this->req_seq_no, 1);
101078+ atomic_set_unchecked(&this->rsp_seq_no, 1);
101079 this->serv.layer.receive = cfctrl_recv;
101080 sprintf(this->serv.layer.name, "ctrl");
101081 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
101082@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
101083 struct cfctrl_request_info *req)
101084 {
101085 spin_lock_bh(&ctrl->info_list_lock);
101086- atomic_inc(&ctrl->req_seq_no);
101087- req->sequence_no = atomic_read(&ctrl->req_seq_no);
101088+ atomic_inc_unchecked(&ctrl->req_seq_no);
101089+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
101090 list_add_tail(&req->list, &ctrl->list);
101091 spin_unlock_bh(&ctrl->info_list_lock);
101092 }
101093@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
101094 if (p != first)
101095 pr_warn("Requests are not received in order\n");
101096
101097- atomic_set(&ctrl->rsp_seq_no,
101098+ atomic_set_unchecked(&ctrl->rsp_seq_no,
101099 p->sequence_no);
101100 list_del(&p->list);
101101 goto out;
101102diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
101103index 67a4a36..8d28068 100644
101104--- a/net/caif/chnl_net.c
101105+++ b/net/caif/chnl_net.c
101106@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
101107 };
101108
101109
101110-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
101111+static struct rtnl_link_ops ipcaif_link_ops = {
101112 .kind = "caif",
101113 .priv_size = sizeof(struct chnl_net),
101114 .setup = ipcaif_net_setup,
101115diff --git a/net/can/af_can.c b/net/can/af_can.c
101116index 32d710e..93bcf05 100644
101117--- a/net/can/af_can.c
101118+++ b/net/can/af_can.c
101119@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
101120 };
101121
101122 /* notifier block for netdevice event */
101123-static struct notifier_block can_netdev_notifier __read_mostly = {
101124+static struct notifier_block can_netdev_notifier = {
101125 .notifier_call = can_notifier,
101126 };
101127
101128diff --git a/net/can/bcm.c b/net/can/bcm.c
101129index ee9ffd9..dfdf3d4 100644
101130--- a/net/can/bcm.c
101131+++ b/net/can/bcm.c
101132@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
101133 }
101134
101135 /* create /proc/net/can-bcm directory */
101136- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
101137+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
101138 return 0;
101139 }
101140
101141diff --git a/net/can/gw.c b/net/can/gw.c
101142index 295f62e..0c3b09e 100644
101143--- a/net/can/gw.c
101144+++ b/net/can/gw.c
101145@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
101146 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
101147
101148 static HLIST_HEAD(cgw_list);
101149-static struct notifier_block notifier;
101150
101151 static struct kmem_cache *cgw_cache __read_mostly;
101152
101153@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
101154 return err;
101155 }
101156
101157+static struct notifier_block notifier = {
101158+ .notifier_call = cgw_notifier
101159+};
101160+
101161 static __init int cgw_module_init(void)
101162 {
101163 /* sanitize given module parameter */
101164@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
101165 return -ENOMEM;
101166
101167 /* set notifier */
101168- notifier.notifier_call = cgw_notifier;
101169 register_netdevice_notifier(&notifier);
101170
101171 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
101172diff --git a/net/can/proc.c b/net/can/proc.c
101173index 1a19b98..df2b4ec 100644
101174--- a/net/can/proc.c
101175+++ b/net/can/proc.c
101176@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
101177 void can_init_proc(void)
101178 {
101179 /* create /proc/net/can directory */
101180- can_dir = proc_mkdir("can", init_net.proc_net);
101181+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
101182
101183 if (!can_dir) {
101184 printk(KERN_INFO "can: failed to create /proc/net/can . "
101185diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
101186index 33a2f20..371bd09 100644
101187--- a/net/ceph/messenger.c
101188+++ b/net/ceph/messenger.c
101189@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
101190 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
101191
101192 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
101193-static atomic_t addr_str_seq = ATOMIC_INIT(0);
101194+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
101195
101196 static struct page *zero_page; /* used in certain error cases */
101197
101198@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
101199 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
101200 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
101201
101202- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101203+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101204 s = addr_str[i];
101205
101206 switch (ss->ss_family) {
101207diff --git a/net/compat.c b/net/compat.c
101208index f7bd286..76ea56a 100644
101209--- a/net/compat.c
101210+++ b/net/compat.c
101211@@ -100,20 +100,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
101212
101213 #define CMSG_COMPAT_FIRSTHDR(msg) \
101214 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
101215- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
101216+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
101217 (struct compat_cmsghdr __user *)NULL)
101218
101219 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
101220 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
101221 (ucmlen) <= (unsigned long) \
101222 ((mhdr)->msg_controllen - \
101223- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
101224+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
101225
101226 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
101227 struct compat_cmsghdr __user *cmsg, int cmsg_len)
101228 {
101229 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
101230- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
101231+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
101232 msg->msg_controllen)
101233 return NULL;
101234 return (struct compat_cmsghdr __user *)ptr;
101235@@ -203,7 +203,7 @@ Efault:
101236
101237 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
101238 {
101239- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101240+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101241 struct compat_cmsghdr cmhdr;
101242 struct compat_timeval ctv;
101243 struct compat_timespec cts[3];
101244@@ -259,7 +259,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
101245
101246 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
101247 {
101248- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101249+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101250 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
101251 int fdnum = scm->fp->count;
101252 struct file **fp = scm->fp->fp;
101253@@ -347,7 +347,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
101254 return -EFAULT;
101255 old_fs = get_fs();
101256 set_fs(KERNEL_DS);
101257- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
101258+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
101259 set_fs(old_fs);
101260
101261 return err;
101262@@ -408,7 +408,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
101263 len = sizeof(ktime);
101264 old_fs = get_fs();
101265 set_fs(KERNEL_DS);
101266- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
101267+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
101268 set_fs(old_fs);
101269
101270 if (!err) {
101271@@ -551,7 +551,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101272 case MCAST_JOIN_GROUP:
101273 case MCAST_LEAVE_GROUP:
101274 {
101275- struct compat_group_req __user *gr32 = (void *)optval;
101276+ struct compat_group_req __user *gr32 = (void __user *)optval;
101277 struct group_req __user *kgr =
101278 compat_alloc_user_space(sizeof(struct group_req));
101279 u32 interface;
101280@@ -572,7 +572,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101281 case MCAST_BLOCK_SOURCE:
101282 case MCAST_UNBLOCK_SOURCE:
101283 {
101284- struct compat_group_source_req __user *gsr32 = (void *)optval;
101285+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
101286 struct group_source_req __user *kgsr = compat_alloc_user_space(
101287 sizeof(struct group_source_req));
101288 u32 interface;
101289@@ -593,7 +593,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101290 }
101291 case MCAST_MSFILTER:
101292 {
101293- struct compat_group_filter __user *gf32 = (void *)optval;
101294+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101295 struct group_filter __user *kgf;
101296 u32 interface, fmode, numsrc;
101297
101298@@ -631,7 +631,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
101299 char __user *optval, int __user *optlen,
101300 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
101301 {
101302- struct compat_group_filter __user *gf32 = (void *)optval;
101303+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101304 struct group_filter __user *kgf;
101305 int __user *koptlen;
101306 u32 interface, fmode, numsrc;
101307@@ -775,7 +775,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
101308
101309 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
101310 return -EINVAL;
101311- if (copy_from_user(a, args, nas[call]))
101312+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
101313 return -EFAULT;
101314 a0 = a[0];
101315 a1 = a[1];
101316diff --git a/net/core/datagram.c b/net/core/datagram.c
101317index df493d6..1145766 100644
101318--- a/net/core/datagram.c
101319+++ b/net/core/datagram.c
101320@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
101321 }
101322
101323 kfree_skb(skb);
101324- atomic_inc(&sk->sk_drops);
101325+ atomic_inc_unchecked(&sk->sk_drops);
101326 sk_mem_reclaim_partial(sk);
101327
101328 return err;
101329diff --git a/net/core/dev.c b/net/core/dev.c
101330index 4ff46f8..e877e78 100644
101331--- a/net/core/dev.c
101332+++ b/net/core/dev.c
101333@@ -1680,14 +1680,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
101334 {
101335 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
101336 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
101337- atomic_long_inc(&dev->rx_dropped);
101338+ atomic_long_inc_unchecked(&dev->rx_dropped);
101339 kfree_skb(skb);
101340 return NET_RX_DROP;
101341 }
101342 }
101343
101344 if (unlikely(!is_skb_forwardable(dev, skb))) {
101345- atomic_long_inc(&dev->rx_dropped);
101346+ atomic_long_inc_unchecked(&dev->rx_dropped);
101347 kfree_skb(skb);
101348 return NET_RX_DROP;
101349 }
101350@@ -2958,7 +2958,7 @@ recursion_alert:
101351 drop:
101352 rcu_read_unlock_bh();
101353
101354- atomic_long_inc(&dev->tx_dropped);
101355+ atomic_long_inc_unchecked(&dev->tx_dropped);
101356 kfree_skb_list(skb);
101357 return rc;
101358 out:
101359@@ -3301,7 +3301,7 @@ enqueue:
101360
101361 local_irq_restore(flags);
101362
101363- atomic_long_inc(&skb->dev->rx_dropped);
101364+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101365 kfree_skb(skb);
101366 return NET_RX_DROP;
101367 }
101368@@ -3378,7 +3378,7 @@ int netif_rx_ni(struct sk_buff *skb)
101369 }
101370 EXPORT_SYMBOL(netif_rx_ni);
101371
101372-static void net_tx_action(struct softirq_action *h)
101373+static __latent_entropy void net_tx_action(void)
101374 {
101375 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101376
101377@@ -3711,7 +3711,7 @@ ncls:
101378 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
101379 } else {
101380 drop:
101381- atomic_long_inc(&skb->dev->rx_dropped);
101382+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101383 kfree_skb(skb);
101384 /* Jamal, now you will not able to escape explaining
101385 * me how you were going to use this. :-)
101386@@ -4599,7 +4599,7 @@ out_unlock:
101387 return work;
101388 }
101389
101390-static void net_rx_action(struct softirq_action *h)
101391+static __latent_entropy void net_rx_action(void)
101392 {
101393 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101394 unsigned long time_limit = jiffies + 2;
101395@@ -6610,8 +6610,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
101396 } else {
101397 netdev_stats_to_stats64(storage, &dev->stats);
101398 }
101399- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
101400- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
101401+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
101402+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
101403 return storage;
101404 }
101405 EXPORT_SYMBOL(dev_get_stats);
101406diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
101407index b94b1d2..da3ed7c 100644
101408--- a/net/core/dev_ioctl.c
101409+++ b/net/core/dev_ioctl.c
101410@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
101411 no_module = !dev;
101412 if (no_module && capable(CAP_NET_ADMIN))
101413 no_module = request_module("netdev-%s", name);
101414- if (no_module && capable(CAP_SYS_MODULE))
101415+ if (no_module && capable(CAP_SYS_MODULE)) {
101416+#ifdef CONFIG_GRKERNSEC_MODHARDEN
101417+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
101418+#else
101419 request_module("%s", name);
101420+#endif
101421+ }
101422 }
101423 EXPORT_SYMBOL(dev_load);
101424
101425diff --git a/net/core/filter.c b/net/core/filter.c
101426index ec9baea..dd6195d 100644
101427--- a/net/core/filter.c
101428+++ b/net/core/filter.c
101429@@ -533,7 +533,11 @@ do_pass:
101430
101431 /* Unkown instruction. */
101432 default:
101433- goto err;
101434+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
101435+ fp->code, fp->jt, fp->jf, fp->k);
101436+ kfree(addrs);
101437+ BUG();
101438+ return -EINVAL;
101439 }
101440
101441 insn++;
101442@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
101443 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
101444 int pc, ret = 0;
101445
101446- BUILD_BUG_ON(BPF_MEMWORDS > 16);
101447+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
101448
101449 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
101450 if (!masks)
101451@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
101452 if (!fp)
101453 return -ENOMEM;
101454
101455- memcpy(fp->insns, fprog->filter, fsize);
101456+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
101457
101458 fp->len = fprog->len;
101459 /* Since unattached filters are not copied back to user
101460diff --git a/net/core/flow.c b/net/core/flow.c
101461index 1033725..340f65d 100644
101462--- a/net/core/flow.c
101463+++ b/net/core/flow.c
101464@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
101465 static int flow_entry_valid(struct flow_cache_entry *fle,
101466 struct netns_xfrm *xfrm)
101467 {
101468- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
101469+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
101470 return 0;
101471 if (fle->object && !fle->object->ops->check(fle->object))
101472 return 0;
101473@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
101474 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
101475 fcp->hash_count++;
101476 }
101477- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
101478+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
101479 flo = fle->object;
101480 if (!flo)
101481 goto ret_object;
101482@@ -263,7 +263,7 @@ nocache:
101483 }
101484 flo = resolver(net, key, family, dir, flo, ctx);
101485 if (fle) {
101486- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
101487+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
101488 if (!IS_ERR(flo))
101489 fle->object = flo;
101490 else
101491diff --git a/net/core/neighbour.c b/net/core/neighbour.c
101492index 8d614c9..55752ea 100644
101493--- a/net/core/neighbour.c
101494+++ b/net/core/neighbour.c
101495@@ -2802,7 +2802,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
101496 void __user *buffer, size_t *lenp, loff_t *ppos)
101497 {
101498 int size, ret;
101499- struct ctl_table tmp = *ctl;
101500+ ctl_table_no_const tmp = *ctl;
101501
101502 tmp.extra1 = &zero;
101503 tmp.extra2 = &unres_qlen_max;
101504@@ -2864,7 +2864,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
101505 void __user *buffer,
101506 size_t *lenp, loff_t *ppos)
101507 {
101508- struct ctl_table tmp = *ctl;
101509+ ctl_table_no_const tmp = *ctl;
101510 int ret;
101511
101512 tmp.extra1 = &zero;
101513diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
101514index 2bf8329..2eb1423 100644
101515--- a/net/core/net-procfs.c
101516+++ b/net/core/net-procfs.c
101517@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
101518 struct rtnl_link_stats64 temp;
101519 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
101520
101521- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101522+ if (gr_proc_is_restricted())
101523+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101524+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101525+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
101526+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
101527+ else
101528+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101529 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101530 dev->name, stats->rx_bytes, stats->rx_packets,
101531 stats->rx_errors,
101532@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
101533 return 0;
101534 }
101535
101536-static const struct seq_operations dev_seq_ops = {
101537+const struct seq_operations dev_seq_ops = {
101538 .start = dev_seq_start,
101539 .next = dev_seq_next,
101540 .stop = dev_seq_stop,
101541@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
101542
101543 static int softnet_seq_open(struct inode *inode, struct file *file)
101544 {
101545- return seq_open(file, &softnet_seq_ops);
101546+ return seq_open_restrict(file, &softnet_seq_ops);
101547 }
101548
101549 static const struct file_operations softnet_seq_fops = {
101550@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
101551 else
101552 seq_printf(seq, "%04x", ntohs(pt->type));
101553
101554+#ifdef CONFIG_GRKERNSEC_HIDESYM
101555+ seq_printf(seq, " %-8s %pf\n",
101556+ pt->dev ? pt->dev->name : "", NULL);
101557+#else
101558 seq_printf(seq, " %-8s %pf\n",
101559 pt->dev ? pt->dev->name : "", pt->func);
101560+#endif
101561 }
101562
101563 return 0;
101564diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
101565index 9993412..2a4672b 100644
101566--- a/net/core/net-sysfs.c
101567+++ b/net/core/net-sysfs.c
101568@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
101569 {
101570 struct net_device *netdev = to_net_dev(dev);
101571 return sprintf(buf, fmt_dec,
101572- atomic_read(&netdev->carrier_changes));
101573+ atomic_read_unchecked(&netdev->carrier_changes));
101574 }
101575 static DEVICE_ATTR_RO(carrier_changes);
101576
101577diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
101578index ce780c7..6d296b3 100644
101579--- a/net/core/net_namespace.c
101580+++ b/net/core/net_namespace.c
101581@@ -448,7 +448,7 @@ static int __register_pernet_operations(struct list_head *list,
101582 int error;
101583 LIST_HEAD(net_exit_list);
101584
101585- list_add_tail(&ops->list, list);
101586+ pax_list_add_tail((struct list_head *)&ops->list, list);
101587 if (ops->init || (ops->id && ops->size)) {
101588 for_each_net(net) {
101589 error = ops_init(ops, net);
101590@@ -461,7 +461,7 @@ static int __register_pernet_operations(struct list_head *list,
101591
101592 out_undo:
101593 /* If I have an error cleanup all namespaces I initialized */
101594- list_del(&ops->list);
101595+ pax_list_del((struct list_head *)&ops->list);
101596 ops_exit_list(ops, &net_exit_list);
101597 ops_free_list(ops, &net_exit_list);
101598 return error;
101599@@ -472,7 +472,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
101600 struct net *net;
101601 LIST_HEAD(net_exit_list);
101602
101603- list_del(&ops->list);
101604+ pax_list_del((struct list_head *)&ops->list);
101605 for_each_net(net)
101606 list_add_tail(&net->exit_list, &net_exit_list);
101607 ops_exit_list(ops, &net_exit_list);
101608@@ -606,7 +606,7 @@ int register_pernet_device(struct pernet_operations *ops)
101609 mutex_lock(&net_mutex);
101610 error = register_pernet_operations(&pernet_list, ops);
101611 if (!error && (first_device == &pernet_list))
101612- first_device = &ops->list;
101613+ first_device = (struct list_head *)&ops->list;
101614 mutex_unlock(&net_mutex);
101615 return error;
101616 }
101617diff --git a/net/core/netpoll.c b/net/core/netpoll.c
101618index e0ad5d1..04fa7f7 100644
101619--- a/net/core/netpoll.c
101620+++ b/net/core/netpoll.c
101621@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101622 struct udphdr *udph;
101623 struct iphdr *iph;
101624 struct ethhdr *eth;
101625- static atomic_t ip_ident;
101626+ static atomic_unchecked_t ip_ident;
101627 struct ipv6hdr *ip6h;
101628
101629 udp_len = len + sizeof(*udph);
101630@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101631 put_unaligned(0x45, (unsigned char *)iph);
101632 iph->tos = 0;
101633 put_unaligned(htons(ip_len), &(iph->tot_len));
101634- iph->id = htons(atomic_inc_return(&ip_ident));
101635+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
101636 iph->frag_off = 0;
101637 iph->ttl = 64;
101638 iph->protocol = IPPROTO_UDP;
101639diff --git a/net/core/pktgen.c b/net/core/pktgen.c
101640index 352d183..1bddfaf 100644
101641--- a/net/core/pktgen.c
101642+++ b/net/core/pktgen.c
101643@@ -3755,7 +3755,7 @@ static int __net_init pg_net_init(struct net *net)
101644 pn->net = net;
101645 INIT_LIST_HEAD(&pn->pktgen_threads);
101646 pn->pktgen_exiting = false;
101647- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
101648+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
101649 if (!pn->proc_dir) {
101650 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
101651 return -ENODEV;
101652diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
101653index 76ec6c5..9cfb81c 100644
101654--- a/net/core/rtnetlink.c
101655+++ b/net/core/rtnetlink.c
101656@@ -60,7 +60,7 @@ struct rtnl_link {
101657 rtnl_doit_func doit;
101658 rtnl_dumpit_func dumpit;
101659 rtnl_calcit_func calcit;
101660-};
101661+} __no_const;
101662
101663 static DEFINE_MUTEX(rtnl_mutex);
101664
101665@@ -306,10 +306,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
101666 * to use the ops for creating device. So do not
101667 * fill up dellink as well. That disables rtnl_dellink.
101668 */
101669- if (ops->setup && !ops->dellink)
101670- ops->dellink = unregister_netdevice_queue;
101671+ if (ops->setup && !ops->dellink) {
101672+ pax_open_kernel();
101673+ *(void **)&ops->dellink = unregister_netdevice_queue;
101674+ pax_close_kernel();
101675+ }
101676
101677- list_add_tail(&ops->list, &link_ops);
101678+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
101679 return 0;
101680 }
101681 EXPORT_SYMBOL_GPL(__rtnl_link_register);
101682@@ -356,7 +359,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
101683 for_each_net(net) {
101684 __rtnl_kill_links(net, ops);
101685 }
101686- list_del(&ops->list);
101687+ pax_list_del((struct list_head *)&ops->list);
101688 }
101689 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
101690
101691@@ -1035,7 +1038,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
101692 (dev->ifalias &&
101693 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
101694 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
101695- atomic_read(&dev->carrier_changes)))
101696+ atomic_read_unchecked(&dev->carrier_changes)))
101697 goto nla_put_failure;
101698
101699 if (1) {
101700@@ -2094,6 +2097,10 @@ replay:
101701 if (IS_ERR(dest_net))
101702 return PTR_ERR(dest_net);
101703
101704+ err = -EPERM;
101705+ if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
101706+ goto out;
101707+
101708 dev = rtnl_create_link(dest_net, ifname, name_assign_type, ops, tb);
101709 if (IS_ERR(dev)) {
101710 err = PTR_ERR(dev);
101711diff --git a/net/core/scm.c b/net/core/scm.c
101712index 3b6899b..cf36238 100644
101713--- a/net/core/scm.c
101714+++ b/net/core/scm.c
101715@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
101716 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101717 {
101718 struct cmsghdr __user *cm
101719- = (__force struct cmsghdr __user *)msg->msg_control;
101720+ = (struct cmsghdr __force_user *)msg->msg_control;
101721 struct cmsghdr cmhdr;
101722 int cmlen = CMSG_LEN(len);
101723 int err;
101724@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101725 err = -EFAULT;
101726 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
101727 goto out;
101728- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
101729+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
101730 goto out;
101731 cmlen = CMSG_SPACE(len);
101732 if (msg->msg_controllen < cmlen)
101733@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
101734 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101735 {
101736 struct cmsghdr __user *cm
101737- = (__force struct cmsghdr __user*)msg->msg_control;
101738+ = (struct cmsghdr __force_user *)msg->msg_control;
101739
101740 int fdmax = 0;
101741 int fdnum = scm->fp->count;
101742@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101743 if (fdnum < fdmax)
101744 fdmax = fdnum;
101745
101746- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
101747+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
101748 i++, cmfptr++)
101749 {
101750 struct socket *sock;
101751diff --git a/net/core/skbuff.c b/net/core/skbuff.c
101752index 62c67be..01893a0a 100644
101753--- a/net/core/skbuff.c
101754+++ b/net/core/skbuff.c
101755@@ -2123,7 +2123,7 @@ EXPORT_SYMBOL(__skb_checksum);
101756 __wsum skb_checksum(const struct sk_buff *skb, int offset,
101757 int len, __wsum csum)
101758 {
101759- const struct skb_checksum_ops ops = {
101760+ static const struct skb_checksum_ops ops = {
101761 .update = csum_partial_ext,
101762 .combine = csum_block_add_ext,
101763 };
101764@@ -3363,12 +3363,14 @@ void __init skb_init(void)
101765 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
101766 sizeof(struct sk_buff),
101767 0,
101768- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101769+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101770+ SLAB_NO_SANITIZE,
101771 NULL);
101772 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
101773 sizeof(struct sk_buff_fclones),
101774 0,
101775- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101776+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101777+ SLAB_NO_SANITIZE,
101778 NULL);
101779 }
101780
101781diff --git a/net/core/sock.c b/net/core/sock.c
101782index 1c7a33d..a3817e2 100644
101783--- a/net/core/sock.c
101784+++ b/net/core/sock.c
101785@@ -441,7 +441,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101786 struct sk_buff_head *list = &sk->sk_receive_queue;
101787
101788 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
101789- atomic_inc(&sk->sk_drops);
101790+ atomic_inc_unchecked(&sk->sk_drops);
101791 trace_sock_rcvqueue_full(sk, skb);
101792 return -ENOMEM;
101793 }
101794@@ -451,7 +451,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101795 return err;
101796
101797 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
101798- atomic_inc(&sk->sk_drops);
101799+ atomic_inc_unchecked(&sk->sk_drops);
101800 return -ENOBUFS;
101801 }
101802
101803@@ -464,7 +464,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101804 skb_dst_force(skb);
101805
101806 spin_lock_irqsave(&list->lock, flags);
101807- skb->dropcount = atomic_read(&sk->sk_drops);
101808+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
101809 __skb_queue_tail(list, skb);
101810 spin_unlock_irqrestore(&list->lock, flags);
101811
101812@@ -484,7 +484,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101813 skb->dev = NULL;
101814
101815 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
101816- atomic_inc(&sk->sk_drops);
101817+ atomic_inc_unchecked(&sk->sk_drops);
101818 goto discard_and_relse;
101819 }
101820 if (nested)
101821@@ -502,7 +502,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101822 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
101823 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
101824 bh_unlock_sock(sk);
101825- atomic_inc(&sk->sk_drops);
101826+ atomic_inc_unchecked(&sk->sk_drops);
101827 goto discard_and_relse;
101828 }
101829
101830@@ -888,6 +888,7 @@ set_rcvbuf:
101831 }
101832 break;
101833
101834+#ifndef GRKERNSEC_BPF_HARDEN
101835 case SO_ATTACH_BPF:
101836 ret = -EINVAL;
101837 if (optlen == sizeof(u32)) {
101838@@ -900,7 +901,7 @@ set_rcvbuf:
101839 ret = sk_attach_bpf(ufd, sk);
101840 }
101841 break;
101842-
101843+#endif
101844 case SO_DETACH_FILTER:
101845 ret = sk_detach_filter(sk);
101846 break;
101847@@ -1004,12 +1005,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
101848 struct timeval tm;
101849 } v;
101850
101851- int lv = sizeof(int);
101852- int len;
101853+ unsigned int lv = sizeof(int);
101854+ unsigned int len;
101855
101856 if (get_user(len, optlen))
101857 return -EFAULT;
101858- if (len < 0)
101859+ if (len > INT_MAX)
101860 return -EINVAL;
101861
101862 memset(&v, 0, sizeof(v));
101863@@ -1147,11 +1148,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
101864
101865 case SO_PEERNAME:
101866 {
101867- char address[128];
101868+ char address[_K_SS_MAXSIZE];
101869
101870 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
101871 return -ENOTCONN;
101872- if (lv < len)
101873+ if (lv < len || sizeof address < len)
101874 return -EINVAL;
101875 if (copy_to_user(optval, address, len))
101876 return -EFAULT;
101877@@ -1236,7 +1237,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
101878
101879 if (len > lv)
101880 len = lv;
101881- if (copy_to_user(optval, &v, len))
101882+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
101883 return -EFAULT;
101884 lenout:
101885 if (put_user(len, optlen))
101886@@ -2349,7 +2350,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
101887 */
101888 smp_wmb();
101889 atomic_set(&sk->sk_refcnt, 1);
101890- atomic_set(&sk->sk_drops, 0);
101891+ atomic_set_unchecked(&sk->sk_drops, 0);
101892 }
101893 EXPORT_SYMBOL(sock_init_data);
101894
101895@@ -2477,6 +2478,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
101896 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
101897 int level, int type)
101898 {
101899+ struct sock_extended_err ee;
101900 struct sock_exterr_skb *serr;
101901 struct sk_buff *skb;
101902 int copied, err;
101903@@ -2498,7 +2500,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
101904 sock_recv_timestamp(msg, sk, skb);
101905
101906 serr = SKB_EXT_ERR(skb);
101907- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
101908+ ee = serr->ee;
101909+ put_cmsg(msg, level, type, sizeof ee, &ee);
101910
101911 msg->msg_flags |= MSG_ERRQUEUE;
101912 err = copied;
101913diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
101914index ad704c7..ca48aff 100644
101915--- a/net/core/sock_diag.c
101916+++ b/net/core/sock_diag.c
101917@@ -9,26 +9,33 @@
101918 #include <linux/inet_diag.h>
101919 #include <linux/sock_diag.h>
101920
101921-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
101922+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
101923 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
101924 static DEFINE_MUTEX(sock_diag_table_mutex);
101925
101926 int sock_diag_check_cookie(void *sk, __u32 *cookie)
101927 {
101928+#ifndef CONFIG_GRKERNSEC_HIDESYM
101929 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
101930 cookie[1] != INET_DIAG_NOCOOKIE) &&
101931 ((u32)(unsigned long)sk != cookie[0] ||
101932 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
101933 return -ESTALE;
101934 else
101935+#endif
101936 return 0;
101937 }
101938 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
101939
101940 void sock_diag_save_cookie(void *sk, __u32 *cookie)
101941 {
101942+#ifdef CONFIG_GRKERNSEC_HIDESYM
101943+ cookie[0] = 0;
101944+ cookie[1] = 0;
101945+#else
101946 cookie[0] = (u32)(unsigned long)sk;
101947 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
101948+#endif
101949 }
101950 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
101951
101952@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
101953 mutex_lock(&sock_diag_table_mutex);
101954 if (sock_diag_handlers[hndl->family])
101955 err = -EBUSY;
101956- else
101957+ else {
101958+ pax_open_kernel();
101959 sock_diag_handlers[hndl->family] = hndl;
101960+ pax_close_kernel();
101961+ }
101962 mutex_unlock(&sock_diag_table_mutex);
101963
101964 return err;
101965@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
101966
101967 mutex_lock(&sock_diag_table_mutex);
101968 BUG_ON(sock_diag_handlers[family] != hnld);
101969+ pax_open_kernel();
101970 sock_diag_handlers[family] = NULL;
101971+ pax_close_kernel();
101972 mutex_unlock(&sock_diag_table_mutex);
101973 }
101974 EXPORT_SYMBOL_GPL(sock_diag_unregister);
101975diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
101976index bbb1d5a..754e2e5 100644
101977--- a/net/core/sysctl_net_core.c
101978+++ b/net/core/sysctl_net_core.c
101979@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
101980 {
101981 unsigned int orig_size, size;
101982 int ret, i;
101983- struct ctl_table tmp = {
101984+ ctl_table_no_const tmp = {
101985 .data = &size,
101986 .maxlen = sizeof(size),
101987 .mode = table->mode
101988@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
101989 void __user *buffer, size_t *lenp, loff_t *ppos)
101990 {
101991 char id[IFNAMSIZ];
101992- struct ctl_table tbl = {
101993+ ctl_table_no_const tbl = {
101994 .data = id,
101995 .maxlen = IFNAMSIZ,
101996 };
101997@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
101998 static int proc_do_rss_key(struct ctl_table *table, int write,
101999 void __user *buffer, size_t *lenp, loff_t *ppos)
102000 {
102001- struct ctl_table fake_table;
102002+ ctl_table_no_const fake_table;
102003 char buf[NETDEV_RSS_KEY_LEN * 3];
102004
102005 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
102006@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
102007 .mode = 0444,
102008 .proc_handler = proc_do_rss_key,
102009 },
102010-#ifdef CONFIG_BPF_JIT
102011+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
102012 {
102013 .procname = "bpf_jit_enable",
102014 .data = &bpf_jit_enable,
102015@@ -402,13 +402,12 @@ static struct ctl_table netns_core_table[] = {
102016
102017 static __net_init int sysctl_core_net_init(struct net *net)
102018 {
102019- struct ctl_table *tbl;
102020+ ctl_table_no_const *tbl = NULL;
102021
102022 net->core.sysctl_somaxconn = SOMAXCONN;
102023
102024- tbl = netns_core_table;
102025 if (!net_eq(net, &init_net)) {
102026- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
102027+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
102028 if (tbl == NULL)
102029 goto err_dup;
102030
102031@@ -418,17 +417,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
102032 if (net->user_ns != &init_user_ns) {
102033 tbl[0].procname = NULL;
102034 }
102035- }
102036-
102037- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102038+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102039+ } else
102040+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
102041 if (net->core.sysctl_hdr == NULL)
102042 goto err_reg;
102043
102044 return 0;
102045
102046 err_reg:
102047- if (tbl != netns_core_table)
102048- kfree(tbl);
102049+ kfree(tbl);
102050 err_dup:
102051 return -ENOMEM;
102052 }
102053@@ -443,7 +441,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
102054 kfree(tbl);
102055 }
102056
102057-static __net_initdata struct pernet_operations sysctl_core_ops = {
102058+static __net_initconst struct pernet_operations sysctl_core_ops = {
102059 .init = sysctl_core_net_init,
102060 .exit = sysctl_core_net_exit,
102061 };
102062diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
102063index 8102286..a0c2755 100644
102064--- a/net/decnet/af_decnet.c
102065+++ b/net/decnet/af_decnet.c
102066@@ -466,6 +466,7 @@ static struct proto dn_proto = {
102067 .sysctl_rmem = sysctl_decnet_rmem,
102068 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
102069 .obj_size = sizeof(struct dn_sock),
102070+ .slab_flags = SLAB_USERCOPY,
102071 };
102072
102073 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
102074diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
102075index 4400da7..3429972 100644
102076--- a/net/decnet/dn_dev.c
102077+++ b/net/decnet/dn_dev.c
102078@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
102079 .extra1 = &min_t3,
102080 .extra2 = &max_t3
102081 },
102082- {0}
102083+ { }
102084 },
102085 };
102086
102087diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
102088index 5325b54..a0d4d69 100644
102089--- a/net/decnet/sysctl_net_decnet.c
102090+++ b/net/decnet/sysctl_net_decnet.c
102091@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
102092
102093 if (len > *lenp) len = *lenp;
102094
102095- if (copy_to_user(buffer, addr, len))
102096+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
102097 return -EFAULT;
102098
102099 *lenp = len;
102100@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
102101
102102 if (len > *lenp) len = *lenp;
102103
102104- if (copy_to_user(buffer, devname, len))
102105+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
102106 return -EFAULT;
102107
102108 *lenp = len;
102109diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
102110index a2c7e4c..3dc9f67 100644
102111--- a/net/hsr/hsr_netlink.c
102112+++ b/net/hsr/hsr_netlink.c
102113@@ -102,7 +102,7 @@ nla_put_failure:
102114 return -EMSGSIZE;
102115 }
102116
102117-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
102118+static struct rtnl_link_ops hsr_link_ops = {
102119 .kind = "hsr",
102120 .maxtype = IFLA_HSR_MAX,
102121 .policy = hsr_policy,
102122diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
102123index 27eaa65..7083217 100644
102124--- a/net/ieee802154/6lowpan_rtnl.c
102125+++ b/net/ieee802154/6lowpan_rtnl.c
102126@@ -642,7 +642,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
102127 dev_put(real_dev);
102128 }
102129
102130-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
102131+static struct rtnl_link_ops lowpan_link_ops = {
102132 .kind = "lowpan",
102133 .priv_size = sizeof(struct lowpan_dev_info),
102134 .setup = lowpan_setup,
102135diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
102136index 9d980ed..7d01e12 100644
102137--- a/net/ieee802154/reassembly.c
102138+++ b/net/ieee802154/reassembly.c
102139@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
102140
102141 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102142 {
102143- struct ctl_table *table;
102144+ ctl_table_no_const *table = NULL;
102145 struct ctl_table_header *hdr;
102146 struct netns_ieee802154_lowpan *ieee802154_lowpan =
102147 net_ieee802154_lowpan(net);
102148
102149- table = lowpan_frags_ns_ctl_table;
102150 if (!net_eq(net, &init_net)) {
102151- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
102152+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
102153 GFP_KERNEL);
102154 if (table == NULL)
102155 goto err_alloc;
102156@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102157 /* Don't export sysctls to unprivileged users */
102158 if (net->user_ns != &init_user_ns)
102159 table[0].procname = NULL;
102160- }
102161-
102162- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102163+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102164+ } else
102165+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
102166 if (hdr == NULL)
102167 goto err_reg;
102168
102169@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102170 return 0;
102171
102172 err_reg:
102173- if (!net_eq(net, &init_net))
102174- kfree(table);
102175+ kfree(table);
102176 err_alloc:
102177 return -ENOMEM;
102178 }
102179diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
102180index a44773c..a6ae415 100644
102181--- a/net/ipv4/af_inet.c
102182+++ b/net/ipv4/af_inet.c
102183@@ -1392,7 +1392,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
102184 return ip_recv_error(sk, msg, len, addr_len);
102185 #if IS_ENABLED(CONFIG_IPV6)
102186 if (sk->sk_family == AF_INET6)
102187- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
102188+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
102189 #endif
102190 return -EINVAL;
102191 }
102192diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
102193index 214882e..ec032f6 100644
102194--- a/net/ipv4/devinet.c
102195+++ b/net/ipv4/devinet.c
102196@@ -69,7 +69,8 @@
102197
102198 static struct ipv4_devconf ipv4_devconf = {
102199 .data = {
102200- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102201+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102202+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102203 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102204 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102205 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102206@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
102207
102208 static struct ipv4_devconf ipv4_devconf_dflt = {
102209 .data = {
102210- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102211+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102212+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102213 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102214 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102215 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102216@@ -1548,7 +1550,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
102217 idx = 0;
102218 head = &net->dev_index_head[h];
102219 rcu_read_lock();
102220- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102221+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102222 net->dev_base_seq;
102223 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102224 if (idx < s_idx)
102225@@ -1866,7 +1868,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
102226 idx = 0;
102227 head = &net->dev_index_head[h];
102228 rcu_read_lock();
102229- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102230+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102231 net->dev_base_seq;
102232 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102233 if (idx < s_idx)
102234@@ -2101,7 +2103,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
102235 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
102236 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
102237
102238-static struct devinet_sysctl_table {
102239+static const struct devinet_sysctl_table {
102240 struct ctl_table_header *sysctl_header;
102241 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
102242 } devinet_sysctl = {
102243@@ -2233,7 +2235,7 @@ static __net_init int devinet_init_net(struct net *net)
102244 int err;
102245 struct ipv4_devconf *all, *dflt;
102246 #ifdef CONFIG_SYSCTL
102247- struct ctl_table *tbl = ctl_forward_entry;
102248+ ctl_table_no_const *tbl = NULL;
102249 struct ctl_table_header *forw_hdr;
102250 #endif
102251
102252@@ -2251,7 +2253,7 @@ static __net_init int devinet_init_net(struct net *net)
102253 goto err_alloc_dflt;
102254
102255 #ifdef CONFIG_SYSCTL
102256- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
102257+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
102258 if (tbl == NULL)
102259 goto err_alloc_ctl;
102260
102261@@ -2271,7 +2273,10 @@ static __net_init int devinet_init_net(struct net *net)
102262 goto err_reg_dflt;
102263
102264 err = -ENOMEM;
102265- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102266+ if (!net_eq(net, &init_net))
102267+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102268+ else
102269+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
102270 if (forw_hdr == NULL)
102271 goto err_reg_ctl;
102272 net->ipv4.forw_hdr = forw_hdr;
102273@@ -2287,8 +2292,7 @@ err_reg_ctl:
102274 err_reg_dflt:
102275 __devinet_sysctl_unregister(all);
102276 err_reg_all:
102277- if (tbl != ctl_forward_entry)
102278- kfree(tbl);
102279+ kfree(tbl);
102280 err_alloc_ctl:
102281 #endif
102282 if (dflt != &ipv4_devconf_dflt)
102283diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
102284index 23104a3..9f5570b 100644
102285--- a/net/ipv4/fib_frontend.c
102286+++ b/net/ipv4/fib_frontend.c
102287@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
102288 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102289 fib_sync_up(dev);
102290 #endif
102291- atomic_inc(&net->ipv4.dev_addr_genid);
102292+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102293 rt_cache_flush(dev_net(dev));
102294 break;
102295 case NETDEV_DOWN:
102296 fib_del_ifaddr(ifa, NULL);
102297- atomic_inc(&net->ipv4.dev_addr_genid);
102298+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102299 if (ifa->ifa_dev->ifa_list == NULL) {
102300 /* Last address was deleted from this interface.
102301 * Disable IP.
102302@@ -1060,7 +1060,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
102303 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102304 fib_sync_up(dev);
102305 #endif
102306- atomic_inc(&net->ipv4.dev_addr_genid);
102307+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102308 rt_cache_flush(net);
102309 break;
102310 case NETDEV_DOWN:
102311diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
102312index f99f41b..1879da9 100644
102313--- a/net/ipv4/fib_semantics.c
102314+++ b/net/ipv4/fib_semantics.c
102315@@ -770,7 +770,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
102316 nh->nh_saddr = inet_select_addr(nh->nh_dev,
102317 nh->nh_gw,
102318 nh->nh_parent->fib_scope);
102319- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
102320+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
102321
102322 return nh->nh_saddr;
102323 }
102324diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
102325index 9111a4e..3576905 100644
102326--- a/net/ipv4/inet_hashtables.c
102327+++ b/net/ipv4/inet_hashtables.c
102328@@ -18,6 +18,7 @@
102329 #include <linux/sched.h>
102330 #include <linux/slab.h>
102331 #include <linux/wait.h>
102332+#include <linux/security.h>
102333
102334 #include <net/inet_connection_sock.h>
102335 #include <net/inet_hashtables.h>
102336@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
102337 return inet_ehashfn(net, laddr, lport, faddr, fport);
102338 }
102339
102340+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
102341+
102342 /*
102343 * Allocate and initialize a new local port bind bucket.
102344 * The bindhash mutex for snum's hash chain must be held here.
102345@@ -554,6 +557,8 @@ ok:
102346 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
102347 spin_unlock(&head->lock);
102348
102349+ gr_update_task_in_ip_table(inet_sk(sk));
102350+
102351 if (tw) {
102352 inet_twsk_deschedule(tw, death_row);
102353 while (twrefcnt) {
102354diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
102355index 241afd7..31b95d5 100644
102356--- a/net/ipv4/inetpeer.c
102357+++ b/net/ipv4/inetpeer.c
102358@@ -461,7 +461,7 @@ relookup:
102359 if (p) {
102360 p->daddr = *daddr;
102361 atomic_set(&p->refcnt, 1);
102362- atomic_set(&p->rid, 0);
102363+ atomic_set_unchecked(&p->rid, 0);
102364 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
102365 p->rate_tokens = 0;
102366 /* 60*HZ is arbitrary, but chosen enough high so that the first
102367diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
102368index 145a50c..5dd8cc5 100644
102369--- a/net/ipv4/ip_fragment.c
102370+++ b/net/ipv4/ip_fragment.c
102371@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
102372 return 0;
102373
102374 start = qp->rid;
102375- end = atomic_inc_return(&peer->rid);
102376+ end = atomic_inc_return_unchecked(&peer->rid);
102377 qp->rid = end;
102378
102379 rc = qp->q.fragments && (end - start) > max;
102380@@ -748,12 +748,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
102381
102382 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102383 {
102384- struct ctl_table *table;
102385+ ctl_table_no_const *table = NULL;
102386 struct ctl_table_header *hdr;
102387
102388- table = ip4_frags_ns_ctl_table;
102389 if (!net_eq(net, &init_net)) {
102390- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102391+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102392 if (table == NULL)
102393 goto err_alloc;
102394
102395@@ -767,9 +766,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102396 /* Don't export sysctls to unprivileged users */
102397 if (net->user_ns != &init_user_ns)
102398 table[0].procname = NULL;
102399- }
102400+ hdr = register_net_sysctl(net, "net/ipv4", table);
102401+ } else
102402+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
102403
102404- hdr = register_net_sysctl(net, "net/ipv4", table);
102405 if (hdr == NULL)
102406 goto err_reg;
102407
102408@@ -777,8 +777,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102409 return 0;
102410
102411 err_reg:
102412- if (!net_eq(net, &init_net))
102413- kfree(table);
102414+ kfree(table);
102415 err_alloc:
102416 return -ENOMEM;
102417 }
102418diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
102419index 4f4bf5b..2c936fe 100644
102420--- a/net/ipv4/ip_gre.c
102421+++ b/net/ipv4/ip_gre.c
102422@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
102423 module_param(log_ecn_error, bool, 0644);
102424 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102425
102426-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
102427+static struct rtnl_link_ops ipgre_link_ops;
102428 static int ipgre_tunnel_init(struct net_device *dev);
102429
102430 static int ipgre_net_id __read_mostly;
102431@@ -816,7 +816,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
102432 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
102433 };
102434
102435-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102436+static struct rtnl_link_ops ipgre_link_ops = {
102437 .kind = "gre",
102438 .maxtype = IFLA_GRE_MAX,
102439 .policy = ipgre_policy,
102440@@ -830,7 +830,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102441 .fill_info = ipgre_fill_info,
102442 };
102443
102444-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
102445+static struct rtnl_link_ops ipgre_tap_ops = {
102446 .kind = "gretap",
102447 .maxtype = IFLA_GRE_MAX,
102448 .policy = ipgre_policy,
102449diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
102450index 3d4da2c..40f9c29 100644
102451--- a/net/ipv4/ip_input.c
102452+++ b/net/ipv4/ip_input.c
102453@@ -147,6 +147,10 @@
102454 #include <linux/mroute.h>
102455 #include <linux/netlink.h>
102456
102457+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102458+extern int grsec_enable_blackhole;
102459+#endif
102460+
102461 /*
102462 * Process Router Attention IP option (RFC 2113)
102463 */
102464@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
102465 if (!raw) {
102466 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
102467 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
102468+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102469+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
102470+#endif
102471 icmp_send(skb, ICMP_DEST_UNREACH,
102472 ICMP_PROT_UNREACH, 0);
102473 }
102474diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
102475index 6b85adb..cd7e5d3 100644
102476--- a/net/ipv4/ip_sockglue.c
102477+++ b/net/ipv4/ip_sockglue.c
102478@@ -1193,7 +1193,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102479 len = min_t(unsigned int, len, opt->optlen);
102480 if (put_user(len, optlen))
102481 return -EFAULT;
102482- if (copy_to_user(optval, opt->__data, len))
102483+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
102484+ copy_to_user(optval, opt->__data, len))
102485 return -EFAULT;
102486 return 0;
102487 }
102488@@ -1324,7 +1325,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102489 if (sk->sk_type != SOCK_STREAM)
102490 return -ENOPROTOOPT;
102491
102492- msg.msg_control = (__force void *) optval;
102493+ msg.msg_control = (__force_kernel void *) optval;
102494 msg.msg_controllen = len;
102495 msg.msg_flags = flags;
102496
102497diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
102498index 1a7e979..fd05aa4 100644
102499--- a/net/ipv4/ip_vti.c
102500+++ b/net/ipv4/ip_vti.c
102501@@ -45,7 +45,7 @@
102502 #include <net/net_namespace.h>
102503 #include <net/netns/generic.h>
102504
102505-static struct rtnl_link_ops vti_link_ops __read_mostly;
102506+static struct rtnl_link_ops vti_link_ops;
102507
102508 static int vti_net_id __read_mostly;
102509 static int vti_tunnel_init(struct net_device *dev);
102510@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
102511 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
102512 };
102513
102514-static struct rtnl_link_ops vti_link_ops __read_mostly = {
102515+static struct rtnl_link_ops vti_link_ops = {
102516 .kind = "vti",
102517 .maxtype = IFLA_VTI_MAX,
102518 .policy = vti_policy,
102519diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
102520index 7fa18bc..bea16af 100644
102521--- a/net/ipv4/ipconfig.c
102522+++ b/net/ipv4/ipconfig.c
102523@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
102524
102525 mm_segment_t oldfs = get_fs();
102526 set_fs(get_ds());
102527- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102528+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102529 set_fs(oldfs);
102530 return res;
102531 }
102532@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
102533
102534 mm_segment_t oldfs = get_fs();
102535 set_fs(get_ds());
102536- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102537+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102538 set_fs(oldfs);
102539 return res;
102540 }
102541@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
102542
102543 mm_segment_t oldfs = get_fs();
102544 set_fs(get_ds());
102545- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
102546+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
102547 set_fs(oldfs);
102548 return res;
102549 }
102550diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
102551index 40403114..c35c647 100644
102552--- a/net/ipv4/ipip.c
102553+++ b/net/ipv4/ipip.c
102554@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102555 static int ipip_net_id __read_mostly;
102556
102557 static int ipip_tunnel_init(struct net_device *dev);
102558-static struct rtnl_link_ops ipip_link_ops __read_mostly;
102559+static struct rtnl_link_ops ipip_link_ops;
102560
102561 static int ipip_err(struct sk_buff *skb, u32 info)
102562 {
102563@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
102564 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
102565 };
102566
102567-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
102568+static struct rtnl_link_ops ipip_link_ops = {
102569 .kind = "ipip",
102570 .maxtype = IFLA_IPTUN_MAX,
102571 .policy = ipip_policy,
102572diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
102573index f95b6f9..2ee2097 100644
102574--- a/net/ipv4/netfilter/arp_tables.c
102575+++ b/net/ipv4/netfilter/arp_tables.c
102576@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
102577 #endif
102578
102579 static int get_info(struct net *net, void __user *user,
102580- const int *len, int compat)
102581+ int len, int compat)
102582 {
102583 char name[XT_TABLE_MAXNAMELEN];
102584 struct xt_table *t;
102585 int ret;
102586
102587- if (*len != sizeof(struct arpt_getinfo)) {
102588- duprintf("length %u != %Zu\n", *len,
102589+ if (len != sizeof(struct arpt_getinfo)) {
102590+ duprintf("length %u != %Zu\n", len,
102591 sizeof(struct arpt_getinfo));
102592 return -EINVAL;
102593 }
102594@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
102595 info.size = private->size;
102596 strcpy(info.name, name);
102597
102598- if (copy_to_user(user, &info, *len) != 0)
102599+ if (copy_to_user(user, &info, len) != 0)
102600 ret = -EFAULT;
102601 else
102602 ret = 0;
102603@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
102604
102605 switch (cmd) {
102606 case ARPT_SO_GET_INFO:
102607- ret = get_info(sock_net(sk), user, len, 1);
102608+ ret = get_info(sock_net(sk), user, *len, 1);
102609 break;
102610 case ARPT_SO_GET_ENTRIES:
102611 ret = compat_get_entries(sock_net(sk), user, len);
102612@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
102613
102614 switch (cmd) {
102615 case ARPT_SO_GET_INFO:
102616- ret = get_info(sock_net(sk), user, len, 0);
102617+ ret = get_info(sock_net(sk), user, *len, 0);
102618 break;
102619
102620 case ARPT_SO_GET_ENTRIES:
102621diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
102622index 99e810f..3711b81 100644
102623--- a/net/ipv4/netfilter/ip_tables.c
102624+++ b/net/ipv4/netfilter/ip_tables.c
102625@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
102626 #endif
102627
102628 static int get_info(struct net *net, void __user *user,
102629- const int *len, int compat)
102630+ int len, int compat)
102631 {
102632 char name[XT_TABLE_MAXNAMELEN];
102633 struct xt_table *t;
102634 int ret;
102635
102636- if (*len != sizeof(struct ipt_getinfo)) {
102637- duprintf("length %u != %zu\n", *len,
102638+ if (len != sizeof(struct ipt_getinfo)) {
102639+ duprintf("length %u != %zu\n", len,
102640 sizeof(struct ipt_getinfo));
102641 return -EINVAL;
102642 }
102643@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
102644 info.size = private->size;
102645 strcpy(info.name, name);
102646
102647- if (copy_to_user(user, &info, *len) != 0)
102648+ if (copy_to_user(user, &info, len) != 0)
102649 ret = -EFAULT;
102650 else
102651 ret = 0;
102652@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102653
102654 switch (cmd) {
102655 case IPT_SO_GET_INFO:
102656- ret = get_info(sock_net(sk), user, len, 1);
102657+ ret = get_info(sock_net(sk), user, *len, 1);
102658 break;
102659 case IPT_SO_GET_ENTRIES:
102660 ret = compat_get_entries(sock_net(sk), user, len);
102661@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102662
102663 switch (cmd) {
102664 case IPT_SO_GET_INFO:
102665- ret = get_info(sock_net(sk), user, len, 0);
102666+ ret = get_info(sock_net(sk), user, *len, 0);
102667 break;
102668
102669 case IPT_SO_GET_ENTRIES:
102670diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102671index e90f83a..3e6acca 100644
102672--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
102673+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102674@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
102675 spin_lock_init(&cn->lock);
102676
102677 #ifdef CONFIG_PROC_FS
102678- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
102679+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
102680 if (!cn->procdir) {
102681 pr_err("Unable to proc dir entry\n");
102682 return -ENOMEM;
102683diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
102684index 0ae28f5..d32b565 100644
102685--- a/net/ipv4/ping.c
102686+++ b/net/ipv4/ping.c
102687@@ -59,7 +59,7 @@ struct ping_table {
102688 };
102689
102690 static struct ping_table ping_table;
102691-struct pingv6_ops pingv6_ops;
102692+struct pingv6_ops *pingv6_ops;
102693 EXPORT_SYMBOL_GPL(pingv6_ops);
102694
102695 static u16 ping_port_rover;
102696@@ -358,7 +358,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
102697 return -ENODEV;
102698 }
102699 }
102700- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
102701+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
102702 scoped);
102703 rcu_read_unlock();
102704
102705@@ -566,7 +566,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102706 }
102707 #if IS_ENABLED(CONFIG_IPV6)
102708 } else if (skb->protocol == htons(ETH_P_IPV6)) {
102709- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
102710+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
102711 #endif
102712 }
102713
102714@@ -584,7 +584,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102715 info, (u8 *)icmph);
102716 #if IS_ENABLED(CONFIG_IPV6)
102717 } else if (family == AF_INET6) {
102718- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
102719+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
102720 info, (u8 *)icmph);
102721 #endif
102722 }
102723@@ -919,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
102724 }
102725
102726 if (inet6_sk(sk)->rxopt.all)
102727- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
102728+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
102729 if (skb->protocol == htons(ETH_P_IPV6) &&
102730 inet6_sk(sk)->rxopt.all)
102731- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
102732+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
102733 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
102734 ip_cmsg_recv(msg, skb);
102735 #endif
102736@@ -1117,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
102737 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
102738 0, sock_i_ino(sp),
102739 atomic_read(&sp->sk_refcnt), sp,
102740- atomic_read(&sp->sk_drops));
102741+ atomic_read_unchecked(&sp->sk_drops));
102742 }
102743
102744 static int ping_v4_seq_show(struct seq_file *seq, void *v)
102745diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
102746index 0bb68df..59405fc 100644
102747--- a/net/ipv4/raw.c
102748+++ b/net/ipv4/raw.c
102749@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
102750 int raw_rcv(struct sock *sk, struct sk_buff *skb)
102751 {
102752 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
102753- atomic_inc(&sk->sk_drops);
102754+ atomic_inc_unchecked(&sk->sk_drops);
102755 kfree_skb(skb);
102756 return NET_RX_DROP;
102757 }
102758@@ -774,16 +774,20 @@ static int raw_init(struct sock *sk)
102759
102760 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
102761 {
102762+ struct icmp_filter filter;
102763+
102764 if (optlen > sizeof(struct icmp_filter))
102765 optlen = sizeof(struct icmp_filter);
102766- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
102767+ if (copy_from_user(&filter, optval, optlen))
102768 return -EFAULT;
102769+ raw_sk(sk)->filter = filter;
102770 return 0;
102771 }
102772
102773 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
102774 {
102775 int len, ret = -EFAULT;
102776+ struct icmp_filter filter;
102777
102778 if (get_user(len, optlen))
102779 goto out;
102780@@ -793,8 +797,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
102781 if (len > sizeof(struct icmp_filter))
102782 len = sizeof(struct icmp_filter);
102783 ret = -EFAULT;
102784- if (put_user(len, optlen) ||
102785- copy_to_user(optval, &raw_sk(sk)->filter, len))
102786+ filter = raw_sk(sk)->filter;
102787+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
102788 goto out;
102789 ret = 0;
102790 out: return ret;
102791@@ -1023,7 +1027,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
102792 0, 0L, 0,
102793 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
102794 0, sock_i_ino(sp),
102795- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
102796+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
102797 }
102798
102799 static int raw_seq_show(struct seq_file *seq, void *v)
102800diff --git a/net/ipv4/route.c b/net/ipv4/route.c
102801index 52e1f2b..e736cb4 100644
102802--- a/net/ipv4/route.c
102803+++ b/net/ipv4/route.c
102804@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
102805
102806 static int rt_cache_seq_open(struct inode *inode, struct file *file)
102807 {
102808- return seq_open(file, &rt_cache_seq_ops);
102809+ return seq_open_restrict(file, &rt_cache_seq_ops);
102810 }
102811
102812 static const struct file_operations rt_cache_seq_fops = {
102813@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
102814
102815 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
102816 {
102817- return seq_open(file, &rt_cpu_seq_ops);
102818+ return seq_open_restrict(file, &rt_cpu_seq_ops);
102819 }
102820
102821 static const struct file_operations rt_cpu_seq_fops = {
102822@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
102823
102824 static int rt_acct_proc_open(struct inode *inode, struct file *file)
102825 {
102826- return single_open(file, rt_acct_proc_show, NULL);
102827+ return single_open_restrict(file, rt_acct_proc_show, NULL);
102828 }
102829
102830 static const struct file_operations rt_acct_proc_fops = {
102831@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
102832
102833 #define IP_IDENTS_SZ 2048u
102834 struct ip_ident_bucket {
102835- atomic_t id;
102836+ atomic_unchecked_t id;
102837 u32 stamp32;
102838 };
102839
102840-static struct ip_ident_bucket *ip_idents __read_mostly;
102841+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
102842
102843 /* In order to protect privacy, we add a perturbation to identifiers
102844 * if one generator is seldom used. This makes hard for an attacker
102845@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
102846 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
102847 delta = prandom_u32_max(now - old);
102848
102849- return atomic_add_return(segs + delta, &bucket->id) - segs;
102850+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
102851 }
102852 EXPORT_SYMBOL(ip_idents_reserve);
102853
102854@@ -2628,34 +2628,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
102855 .maxlen = sizeof(int),
102856 .mode = 0200,
102857 .proc_handler = ipv4_sysctl_rtcache_flush,
102858+ .extra1 = &init_net,
102859 },
102860 { },
102861 };
102862
102863 static __net_init int sysctl_route_net_init(struct net *net)
102864 {
102865- struct ctl_table *tbl;
102866+ ctl_table_no_const *tbl = NULL;
102867
102868- tbl = ipv4_route_flush_table;
102869 if (!net_eq(net, &init_net)) {
102870- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
102871+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
102872 if (tbl == NULL)
102873 goto err_dup;
102874
102875 /* Don't export sysctls to unprivileged users */
102876 if (net->user_ns != &init_user_ns)
102877 tbl[0].procname = NULL;
102878- }
102879- tbl[0].extra1 = net;
102880+ tbl[0].extra1 = net;
102881+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
102882+ } else
102883+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
102884
102885- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
102886 if (net->ipv4.route_hdr == NULL)
102887 goto err_reg;
102888 return 0;
102889
102890 err_reg:
102891- if (tbl != ipv4_route_flush_table)
102892- kfree(tbl);
102893+ kfree(tbl);
102894 err_dup:
102895 return -ENOMEM;
102896 }
102897@@ -2678,8 +2678,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
102898
102899 static __net_init int rt_genid_init(struct net *net)
102900 {
102901- atomic_set(&net->ipv4.rt_genid, 0);
102902- atomic_set(&net->fnhe_genid, 0);
102903+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
102904+ atomic_set_unchecked(&net->fnhe_genid, 0);
102905 get_random_bytes(&net->ipv4.dev_addr_genid,
102906 sizeof(net->ipv4.dev_addr_genid));
102907 return 0;
102908@@ -2722,11 +2722,7 @@ int __init ip_rt_init(void)
102909 {
102910 int rc = 0;
102911
102912- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
102913- if (!ip_idents)
102914- panic("IP: failed to allocate ip_idents\n");
102915-
102916- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
102917+ prandom_bytes(ip_idents, sizeof(ip_idents));
102918
102919 #ifdef CONFIG_IP_ROUTE_CLASSID
102920 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
102921diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
102922index e0ee384..e2688d9 100644
102923--- a/net/ipv4/sysctl_net_ipv4.c
102924+++ b/net/ipv4/sysctl_net_ipv4.c
102925@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
102926 container_of(table->data, struct net, ipv4.ip_local_ports.range);
102927 int ret;
102928 int range[2];
102929- struct ctl_table tmp = {
102930+ ctl_table_no_const tmp = {
102931 .data = &range,
102932 .maxlen = sizeof(range),
102933 .mode = table->mode,
102934@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
102935 int ret;
102936 gid_t urange[2];
102937 kgid_t low, high;
102938- struct ctl_table tmp = {
102939+ ctl_table_no_const tmp = {
102940 .data = &urange,
102941 .maxlen = sizeof(urange),
102942 .mode = table->mode,
102943@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
102944 void __user *buffer, size_t *lenp, loff_t *ppos)
102945 {
102946 char val[TCP_CA_NAME_MAX];
102947- struct ctl_table tbl = {
102948+ ctl_table_no_const tbl = {
102949 .data = val,
102950 .maxlen = TCP_CA_NAME_MAX,
102951 };
102952@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
102953 void __user *buffer, size_t *lenp,
102954 loff_t *ppos)
102955 {
102956- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
102957+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
102958 int ret;
102959
102960 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
102961@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
102962 void __user *buffer, size_t *lenp,
102963 loff_t *ppos)
102964 {
102965- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
102966+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
102967 int ret;
102968
102969 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
102970@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
102971 void __user *buffer, size_t *lenp,
102972 loff_t *ppos)
102973 {
102974- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
102975+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
102976 struct tcp_fastopen_context *ctxt;
102977 int ret;
102978 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
102979@@ -881,13 +881,12 @@ static struct ctl_table ipv4_net_table[] = {
102980
102981 static __net_init int ipv4_sysctl_init_net(struct net *net)
102982 {
102983- struct ctl_table *table;
102984+ ctl_table_no_const *table = NULL;
102985
102986- table = ipv4_net_table;
102987 if (!net_eq(net, &init_net)) {
102988 int i;
102989
102990- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
102991+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
102992 if (table == NULL)
102993 goto err_alloc;
102994
102995@@ -896,7 +895,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
102996 table[i].data += (void *)net - (void *)&init_net;
102997 }
102998
102999- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103000+ if (!net_eq(net, &init_net))
103001+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103002+ else
103003+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
103004 if (net->ipv4.ipv4_hdr == NULL)
103005 goto err_reg;
103006
103007diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
103008index 075ab4d..623bb9d 100644
103009--- a/net/ipv4/tcp_input.c
103010+++ b/net/ipv4/tcp_input.c
103011@@ -766,7 +766,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
103012 * without any lock. We want to make sure compiler wont store
103013 * intermediate values in this location.
103014 */
103015- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
103016+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
103017 sk->sk_max_pacing_rate);
103018 }
103019
103020@@ -4528,7 +4528,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
103021 * simplifies code)
103022 */
103023 static void
103024-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103025+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103026 struct sk_buff *head, struct sk_buff *tail,
103027 u32 start, u32 end)
103028 {
103029@@ -5506,6 +5506,7 @@ discard:
103030 tcp_paws_reject(&tp->rx_opt, 0))
103031 goto discard_and_undo;
103032
103033+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
103034 if (th->syn) {
103035 /* We see SYN without ACK. It is attempt of
103036 * simultaneous connect with crossed SYNs.
103037@@ -5556,6 +5557,7 @@ discard:
103038 goto discard;
103039 #endif
103040 }
103041+#endif
103042 /* "fifth, if neither of the SYN or RST bits is set then
103043 * drop the segment and return."
103044 */
103045@@ -5602,7 +5604,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
103046 goto discard;
103047
103048 if (th->syn) {
103049- if (th->fin)
103050+ if (th->fin || th->urg || th->psh)
103051 goto discard;
103052 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
103053 return 1;
103054diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
103055index d22f544..62f6787 100644
103056--- a/net/ipv4/tcp_ipv4.c
103057+++ b/net/ipv4/tcp_ipv4.c
103058@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
103059 int sysctl_tcp_low_latency __read_mostly;
103060 EXPORT_SYMBOL(sysctl_tcp_low_latency);
103061
103062+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103063+extern int grsec_enable_blackhole;
103064+#endif
103065+
103066 #ifdef CONFIG_TCP_MD5SIG
103067 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
103068 __be32 daddr, __be32 saddr, const struct tcphdr *th);
103069@@ -1473,6 +1477,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
103070 return 0;
103071
103072 reset:
103073+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103074+ if (!grsec_enable_blackhole)
103075+#endif
103076 tcp_v4_send_reset(rsk, skb);
103077 discard:
103078 kfree_skb(skb);
103079@@ -1637,12 +1644,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
103080 TCP_SKB_CB(skb)->sacked = 0;
103081
103082 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
103083- if (!sk)
103084+ if (!sk) {
103085+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103086+ ret = 1;
103087+#endif
103088 goto no_tcp_socket;
103089-
103090+ }
103091 process:
103092- if (sk->sk_state == TCP_TIME_WAIT)
103093+ if (sk->sk_state == TCP_TIME_WAIT) {
103094+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103095+ ret = 2;
103096+#endif
103097 goto do_time_wait;
103098+ }
103099
103100 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
103101 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
103102@@ -1698,6 +1712,10 @@ csum_error:
103103 bad_packet:
103104 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
103105 } else {
103106+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103107+ if (!grsec_enable_blackhole || (ret == 1 &&
103108+ (skb->dev->flags & IFF_LOOPBACK)))
103109+#endif
103110 tcp_v4_send_reset(NULL, skb);
103111 }
103112
103113diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
103114index 63d2680..2db9d6b 100644
103115--- a/net/ipv4/tcp_minisocks.c
103116+++ b/net/ipv4/tcp_minisocks.c
103117@@ -27,6 +27,10 @@
103118 #include <net/inet_common.h>
103119 #include <net/xfrm.h>
103120
103121+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103122+extern int grsec_enable_blackhole;
103123+#endif
103124+
103125 int sysctl_tcp_syncookies __read_mostly = 1;
103126 EXPORT_SYMBOL(sysctl_tcp_syncookies);
103127
103128@@ -739,7 +743,10 @@ embryonic_reset:
103129 * avoid becoming vulnerable to outside attack aiming at
103130 * resetting legit local connections.
103131 */
103132- req->rsk_ops->send_reset(sk, skb);
103133+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103134+ if (!grsec_enable_blackhole)
103135+#endif
103136+ req->rsk_ops->send_reset(sk, skb);
103137 } else if (fastopen) { /* received a valid RST pkt */
103138 reqsk_fastopen_remove(sk, req, true);
103139 tcp_reset(sk);
103140diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
103141index ebf5ff5..4d1ff32 100644
103142--- a/net/ipv4/tcp_probe.c
103143+++ b/net/ipv4/tcp_probe.c
103144@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
103145 if (cnt + width >= len)
103146 break;
103147
103148- if (copy_to_user(buf + cnt, tbuf, width))
103149+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
103150 return -EFAULT;
103151 cnt += width;
103152 }
103153diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
103154index 1829c7f..c0b3d52 100644
103155--- a/net/ipv4/tcp_timer.c
103156+++ b/net/ipv4/tcp_timer.c
103157@@ -22,6 +22,10 @@
103158 #include <linux/gfp.h>
103159 #include <net/tcp.h>
103160
103161+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103162+extern int grsec_lastack_retries;
103163+#endif
103164+
103165 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
103166 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
103167 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
103168@@ -191,6 +195,13 @@ static int tcp_write_timeout(struct sock *sk)
103169 }
103170 }
103171
103172+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103173+ if ((sk->sk_state == TCP_LAST_ACK) &&
103174+ (grsec_lastack_retries > 0) &&
103175+ (grsec_lastack_retries < retry_until))
103176+ retry_until = grsec_lastack_retries;
103177+#endif
103178+
103179 if (retransmits_timed_out(sk, retry_until,
103180 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
103181 /* Has it gone just too far? */
103182diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
103183index 13b4dcf..b866a2a 100644
103184--- a/net/ipv4/udp.c
103185+++ b/net/ipv4/udp.c
103186@@ -87,6 +87,7 @@
103187 #include <linux/types.h>
103188 #include <linux/fcntl.h>
103189 #include <linux/module.h>
103190+#include <linux/security.h>
103191 #include <linux/socket.h>
103192 #include <linux/sockios.h>
103193 #include <linux/igmp.h>
103194@@ -114,6 +115,10 @@
103195 #include <net/busy_poll.h>
103196 #include "udp_impl.h"
103197
103198+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103199+extern int grsec_enable_blackhole;
103200+#endif
103201+
103202 struct udp_table udp_table __read_mostly;
103203 EXPORT_SYMBOL(udp_table);
103204
103205@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
103206 return true;
103207 }
103208
103209+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
103210+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
103211+
103212 /*
103213 * This routine is called by the ICMP module when it gets some
103214 * sort of error condition. If err < 0 then the socket should
103215@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103216 dport = usin->sin_port;
103217 if (dport == 0)
103218 return -EINVAL;
103219+
103220+ err = gr_search_udp_sendmsg(sk, usin);
103221+ if (err)
103222+ return err;
103223 } else {
103224 if (sk->sk_state != TCP_ESTABLISHED)
103225 return -EDESTADDRREQ;
103226+
103227+ err = gr_search_udp_sendmsg(sk, NULL);
103228+ if (err)
103229+ return err;
103230+
103231 daddr = inet->inet_daddr;
103232 dport = inet->inet_dport;
103233 /* Open fast path for connected socket.
103234@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
103235 IS_UDPLITE(sk));
103236 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103237 IS_UDPLITE(sk));
103238- atomic_inc(&sk->sk_drops);
103239+ atomic_inc_unchecked(&sk->sk_drops);
103240 __skb_unlink(skb, rcvq);
103241 __skb_queue_tail(&list_kill, skb);
103242 }
103243@@ -1275,6 +1292,10 @@ try_again:
103244 if (!skb)
103245 goto out;
103246
103247+ err = gr_search_udp_recvmsg(sk, skb);
103248+ if (err)
103249+ goto out_free;
103250+
103251 ulen = skb->len - sizeof(struct udphdr);
103252 copied = len;
103253 if (copied > ulen)
103254@@ -1307,7 +1328,7 @@ try_again:
103255 if (unlikely(err)) {
103256 trace_kfree_skb(skb, udp_recvmsg);
103257 if (!peeked) {
103258- atomic_inc(&sk->sk_drops);
103259+ atomic_inc_unchecked(&sk->sk_drops);
103260 UDP_INC_STATS_USER(sock_net(sk),
103261 UDP_MIB_INERRORS, is_udplite);
103262 }
103263@@ -1605,7 +1626,7 @@ csum_error:
103264 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
103265 drop:
103266 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
103267- atomic_inc(&sk->sk_drops);
103268+ atomic_inc_unchecked(&sk->sk_drops);
103269 kfree_skb(skb);
103270 return -1;
103271 }
103272@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
103273 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
103274
103275 if (!skb1) {
103276- atomic_inc(&sk->sk_drops);
103277+ atomic_inc_unchecked(&sk->sk_drops);
103278 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
103279 IS_UDPLITE(sk));
103280 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103281@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
103282 goto csum_error;
103283
103284 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
103285+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103286+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
103287+#endif
103288 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
103289
103290 /*
103291@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
103292 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103293 0, sock_i_ino(sp),
103294 atomic_read(&sp->sk_refcnt), sp,
103295- atomic_read(&sp->sk_drops));
103296+ atomic_read_unchecked(&sp->sk_drops));
103297 }
103298
103299 int udp4_seq_show(struct seq_file *seq, void *v)
103300diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
103301index 6156f68..d6ab46d 100644
103302--- a/net/ipv4/xfrm4_policy.c
103303+++ b/net/ipv4/xfrm4_policy.c
103304@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
103305 fl4->flowi4_tos = iph->tos;
103306 }
103307
103308-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
103309+static int xfrm4_garbage_collect(struct dst_ops *ops)
103310 {
103311 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
103312
103313- xfrm4_policy_afinfo.garbage_collect(net);
103314+ xfrm_garbage_collect_deferred(net);
103315 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
103316 }
103317
103318@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
103319
103320 static int __net_init xfrm4_net_init(struct net *net)
103321 {
103322- struct ctl_table *table;
103323+ ctl_table_no_const *table = NULL;
103324 struct ctl_table_header *hdr;
103325
103326- table = xfrm4_policy_table;
103327 if (!net_eq(net, &init_net)) {
103328- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103329+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103330 if (!table)
103331 goto err_alloc;
103332
103333 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
103334- }
103335-
103336- hdr = register_net_sysctl(net, "net/ipv4", table);
103337+ hdr = register_net_sysctl(net, "net/ipv4", table);
103338+ } else
103339+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
103340 if (!hdr)
103341 goto err_reg;
103342
103343@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
103344 return 0;
103345
103346 err_reg:
103347- if (!net_eq(net, &init_net))
103348- kfree(table);
103349+ kfree(table);
103350 err_alloc:
103351 return -ENOMEM;
103352 }
103353diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
103354index dac9419..534fa31 100644
103355--- a/net/ipv6/addrconf.c
103356+++ b/net/ipv6/addrconf.c
103357@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
103358 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103359 .mtu6 = IPV6_MIN_MTU,
103360 .accept_ra = 1,
103361- .accept_redirects = 1,
103362+ .accept_redirects = 0,
103363 .autoconf = 1,
103364 .force_mld_version = 0,
103365 .mldv1_unsolicited_report_interval = 10 * HZ,
103366@@ -208,7 +208,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
103367 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103368 .mtu6 = IPV6_MIN_MTU,
103369 .accept_ra = 1,
103370- .accept_redirects = 1,
103371+ .accept_redirects = 0,
103372 .autoconf = 1,
103373 .force_mld_version = 0,
103374 .mldv1_unsolicited_report_interval = 10 * HZ,
103375@@ -604,7 +604,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
103376 idx = 0;
103377 head = &net->dev_index_head[h];
103378 rcu_read_lock();
103379- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
103380+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
103381 net->dev_base_seq;
103382 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103383 if (idx < s_idx)
103384@@ -2420,7 +2420,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
103385 p.iph.ihl = 5;
103386 p.iph.protocol = IPPROTO_IPV6;
103387 p.iph.ttl = 64;
103388- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
103389+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
103390
103391 if (ops->ndo_do_ioctl) {
103392 mm_segment_t oldfs = get_fs();
103393@@ -3569,16 +3569,23 @@ static const struct file_operations if6_fops = {
103394 .release = seq_release_net,
103395 };
103396
103397+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
103398+extern void unregister_ipv6_seq_ops_addr(void);
103399+
103400 static int __net_init if6_proc_net_init(struct net *net)
103401 {
103402- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
103403+ register_ipv6_seq_ops_addr(&if6_seq_ops);
103404+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
103405+ unregister_ipv6_seq_ops_addr();
103406 return -ENOMEM;
103407+ }
103408 return 0;
103409 }
103410
103411 static void __net_exit if6_proc_net_exit(struct net *net)
103412 {
103413 remove_proc_entry("if_inet6", net->proc_net);
103414+ unregister_ipv6_seq_ops_addr();
103415 }
103416
103417 static struct pernet_operations if6_proc_net_ops = {
103418@@ -4194,7 +4201,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
103419 s_ip_idx = ip_idx = cb->args[2];
103420
103421 rcu_read_lock();
103422- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103423+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103424 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
103425 idx = 0;
103426 head = &net->dev_index_head[h];
103427@@ -4840,7 +4847,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103428 rt_genid_bump_ipv6(net);
103429 break;
103430 }
103431- atomic_inc(&net->ipv6.dev_addr_genid);
103432+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
103433 }
103434
103435 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103436@@ -4860,7 +4867,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
103437 int *valp = ctl->data;
103438 int val = *valp;
103439 loff_t pos = *ppos;
103440- struct ctl_table lctl;
103441+ ctl_table_no_const lctl;
103442 int ret;
103443
103444 /*
103445@@ -4945,7 +4952,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
103446 int *valp = ctl->data;
103447 int val = *valp;
103448 loff_t pos = *ppos;
103449- struct ctl_table lctl;
103450+ ctl_table_no_const lctl;
103451 int ret;
103452
103453 /*
103454diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
103455index e8c4400..a4cd5da 100644
103456--- a/net/ipv6/af_inet6.c
103457+++ b/net/ipv6/af_inet6.c
103458@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
103459 net->ipv6.sysctl.icmpv6_time = 1*HZ;
103460 net->ipv6.sysctl.flowlabel_consistency = 1;
103461 net->ipv6.sysctl.auto_flowlabels = 0;
103462- atomic_set(&net->ipv6.fib6_sernum, 1);
103463+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
103464
103465 err = ipv6_init_mibs(net);
103466 if (err)
103467diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
103468index 49f5e73..ae02d54 100644
103469--- a/net/ipv6/datagram.c
103470+++ b/net/ipv6/datagram.c
103471@@ -941,5 +941,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
103472 0,
103473 sock_i_ino(sp),
103474 atomic_read(&sp->sk_refcnt), sp,
103475- atomic_read(&sp->sk_drops));
103476+ atomic_read_unchecked(&sp->sk_drops));
103477 }
103478diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
103479index d674152..fb5a01d 100644
103480--- a/net/ipv6/icmp.c
103481+++ b/net/ipv6/icmp.c
103482@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
103483
103484 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
103485 {
103486- struct ctl_table *table;
103487+ ctl_table_no_const *table;
103488
103489 table = kmemdup(ipv6_icmp_table_template,
103490 sizeof(ipv6_icmp_table_template),
103491diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
103492index f1c6d5e..faabef6 100644
103493--- a/net/ipv6/ip6_fib.c
103494+++ b/net/ipv6/ip6_fib.c
103495@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
103496 int new, old;
103497
103498 do {
103499- old = atomic_read(&net->ipv6.fib6_sernum);
103500+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
103501 new = old < INT_MAX ? old + 1 : 1;
103502- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
103503+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
103504 old, new) != old);
103505 return new;
103506 }
103507diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
103508index 01ccc28..66861c7 100644
103509--- a/net/ipv6/ip6_gre.c
103510+++ b/net/ipv6/ip6_gre.c
103511@@ -71,8 +71,8 @@ struct ip6gre_net {
103512 struct net_device *fb_tunnel_dev;
103513 };
103514
103515-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
103516-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
103517+static struct rtnl_link_ops ip6gre_link_ops;
103518+static struct rtnl_link_ops ip6gre_tap_ops;
103519 static int ip6gre_tunnel_init(struct net_device *dev);
103520 static void ip6gre_tunnel_setup(struct net_device *dev);
103521 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
103522@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
103523 }
103524
103525
103526-static struct inet6_protocol ip6gre_protocol __read_mostly = {
103527+static struct inet6_protocol ip6gre_protocol = {
103528 .handler = ip6gre_rcv,
103529 .err_handler = ip6gre_err,
103530 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
103531@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
103532 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
103533 };
103534
103535-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103536+static struct rtnl_link_ops ip6gre_link_ops = {
103537 .kind = "ip6gre",
103538 .maxtype = IFLA_GRE_MAX,
103539 .policy = ip6gre_policy,
103540@@ -1664,7 +1664,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103541 .fill_info = ip6gre_fill_info,
103542 };
103543
103544-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
103545+static struct rtnl_link_ops ip6gre_tap_ops = {
103546 .kind = "ip6gretap",
103547 .maxtype = IFLA_GRE_MAX,
103548 .policy = ip6gre_policy,
103549diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
103550index 92b3da5..77837b8 100644
103551--- a/net/ipv6/ip6_tunnel.c
103552+++ b/net/ipv6/ip6_tunnel.c
103553@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103554
103555 static int ip6_tnl_dev_init(struct net_device *dev);
103556 static void ip6_tnl_dev_setup(struct net_device *dev);
103557-static struct rtnl_link_ops ip6_link_ops __read_mostly;
103558+static struct rtnl_link_ops ip6_link_ops;
103559
103560 static int ip6_tnl_net_id __read_mostly;
103561 struct ip6_tnl_net {
103562@@ -1771,7 +1771,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
103563 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
103564 };
103565
103566-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
103567+static struct rtnl_link_ops ip6_link_ops = {
103568 .kind = "ip6tnl",
103569 .maxtype = IFLA_IPTUN_MAX,
103570 .policy = ip6_tnl_policy,
103571diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
103572index ace10d0..97a8b49 100644
103573--- a/net/ipv6/ip6_vti.c
103574+++ b/net/ipv6/ip6_vti.c
103575@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103576
103577 static int vti6_dev_init(struct net_device *dev);
103578 static void vti6_dev_setup(struct net_device *dev);
103579-static struct rtnl_link_ops vti6_link_ops __read_mostly;
103580+static struct rtnl_link_ops vti6_link_ops;
103581
103582 static int vti6_net_id __read_mostly;
103583 struct vti6_net {
103584@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
103585 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
103586 };
103587
103588-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
103589+static struct rtnl_link_ops vti6_link_ops = {
103590 .kind = "vti6",
103591 .maxtype = IFLA_VTI_MAX,
103592 .policy = vti6_policy,
103593diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
103594index 66980d8d..8aef0d1 100644
103595--- a/net/ipv6/ipv6_sockglue.c
103596+++ b/net/ipv6/ipv6_sockglue.c
103597@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
103598 if (sk->sk_type != SOCK_STREAM)
103599 return -ENOPROTOOPT;
103600
103601- msg.msg_control = optval;
103602+ msg.msg_control = (void __force_kernel *)optval;
103603 msg.msg_controllen = len;
103604 msg.msg_flags = flags;
103605
103606diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
103607index e080fbb..412b3cf 100644
103608--- a/net/ipv6/netfilter/ip6_tables.c
103609+++ b/net/ipv6/netfilter/ip6_tables.c
103610@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
103611 #endif
103612
103613 static int get_info(struct net *net, void __user *user,
103614- const int *len, int compat)
103615+ int len, int compat)
103616 {
103617 char name[XT_TABLE_MAXNAMELEN];
103618 struct xt_table *t;
103619 int ret;
103620
103621- if (*len != sizeof(struct ip6t_getinfo)) {
103622- duprintf("length %u != %zu\n", *len,
103623+ if (len != sizeof(struct ip6t_getinfo)) {
103624+ duprintf("length %u != %zu\n", len,
103625 sizeof(struct ip6t_getinfo));
103626 return -EINVAL;
103627 }
103628@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
103629 info.size = private->size;
103630 strcpy(info.name, name);
103631
103632- if (copy_to_user(user, &info, *len) != 0)
103633+ if (copy_to_user(user, &info, len) != 0)
103634 ret = -EFAULT;
103635 else
103636 ret = 0;
103637@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103638
103639 switch (cmd) {
103640 case IP6T_SO_GET_INFO:
103641- ret = get_info(sock_net(sk), user, len, 1);
103642+ ret = get_info(sock_net(sk), user, *len, 1);
103643 break;
103644 case IP6T_SO_GET_ENTRIES:
103645 ret = compat_get_entries(sock_net(sk), user, len);
103646@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103647
103648 switch (cmd) {
103649 case IP6T_SO_GET_INFO:
103650- ret = get_info(sock_net(sk), user, len, 0);
103651+ ret = get_info(sock_net(sk), user, *len, 0);
103652 break;
103653
103654 case IP6T_SO_GET_ENTRIES:
103655diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
103656index 6f187c8..34b367f 100644
103657--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
103658+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
103659@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
103660
103661 static int nf_ct_frag6_sysctl_register(struct net *net)
103662 {
103663- struct ctl_table *table;
103664+ ctl_table_no_const *table = NULL;
103665 struct ctl_table_header *hdr;
103666
103667- table = nf_ct_frag6_sysctl_table;
103668 if (!net_eq(net, &init_net)) {
103669- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
103670+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
103671 GFP_KERNEL);
103672 if (table == NULL)
103673 goto err_alloc;
103674@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103675 table[2].data = &net->nf_frag.frags.high_thresh;
103676 table[2].extra1 = &net->nf_frag.frags.low_thresh;
103677 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
103678- }
103679-
103680- hdr = register_net_sysctl(net, "net/netfilter", table);
103681+ hdr = register_net_sysctl(net, "net/netfilter", table);
103682+ } else
103683+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
103684 if (hdr == NULL)
103685 goto err_reg;
103686
103687@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103688 return 0;
103689
103690 err_reg:
103691- if (!net_eq(net, &init_net))
103692- kfree(table);
103693+ kfree(table);
103694 err_alloc:
103695 return -ENOMEM;
103696 }
103697diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
103698index fe7e3e4..47aba96 100644
103699--- a/net/ipv6/ping.c
103700+++ b/net/ipv6/ping.c
103701@@ -242,6 +242,24 @@ static struct pernet_operations ping_v6_net_ops = {
103702 };
103703 #endif
103704
103705+static struct pingv6_ops real_pingv6_ops = {
103706+ .ipv6_recv_error = ipv6_recv_error,
103707+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
103708+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
103709+ .icmpv6_err_convert = icmpv6_err_convert,
103710+ .ipv6_icmp_error = ipv6_icmp_error,
103711+ .ipv6_chk_addr = ipv6_chk_addr,
103712+};
103713+
103714+static struct pingv6_ops dummy_pingv6_ops = {
103715+ .ipv6_recv_error = dummy_ipv6_recv_error,
103716+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
103717+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
103718+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
103719+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
103720+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
103721+};
103722+
103723 int __init pingv6_init(void)
103724 {
103725 #ifdef CONFIG_PROC_FS
103726@@ -249,13 +267,7 @@ int __init pingv6_init(void)
103727 if (ret)
103728 return ret;
103729 #endif
103730- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
103731- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
103732- pingv6_ops.ip6_datagram_recv_specific_ctl =
103733- ip6_datagram_recv_specific_ctl;
103734- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
103735- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
103736- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
103737+ pingv6_ops = &real_pingv6_ops;
103738 return inet6_register_protosw(&pingv6_protosw);
103739 }
103740
103741@@ -264,14 +276,9 @@ int __init pingv6_init(void)
103742 */
103743 void pingv6_exit(void)
103744 {
103745- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
103746- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
103747- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
103748- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
103749- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
103750- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
103751 #ifdef CONFIG_PROC_FS
103752 unregister_pernet_subsys(&ping_v6_net_ops);
103753 #endif
103754+ pingv6_ops = &dummy_pingv6_ops;
103755 inet6_unregister_protosw(&pingv6_protosw);
103756 }
103757diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
103758index 679253d0..70b653c 100644
103759--- a/net/ipv6/proc.c
103760+++ b/net/ipv6/proc.c
103761@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
103762 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
103763 goto proc_snmp6_fail;
103764
103765- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
103766+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
103767 if (!net->mib.proc_net_devsnmp6)
103768 goto proc_dev_snmp6_fail;
103769 return 0;
103770diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
103771index ee25631..3c3ac5d 100644
103772--- a/net/ipv6/raw.c
103773+++ b/net/ipv6/raw.c
103774@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
103775 {
103776 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
103777 skb_checksum_complete(skb)) {
103778- atomic_inc(&sk->sk_drops);
103779+ atomic_inc_unchecked(&sk->sk_drops);
103780 kfree_skb(skb);
103781 return NET_RX_DROP;
103782 }
103783@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
103784 struct raw6_sock *rp = raw6_sk(sk);
103785
103786 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
103787- atomic_inc(&sk->sk_drops);
103788+ atomic_inc_unchecked(&sk->sk_drops);
103789 kfree_skb(skb);
103790 return NET_RX_DROP;
103791 }
103792@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
103793
103794 if (inet->hdrincl) {
103795 if (skb_checksum_complete(skb)) {
103796- atomic_inc(&sk->sk_drops);
103797+ atomic_inc_unchecked(&sk->sk_drops);
103798 kfree_skb(skb);
103799 return NET_RX_DROP;
103800 }
103801@@ -609,7 +609,7 @@ out:
103802 return err;
103803 }
103804
103805-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
103806+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
103807 struct flowi6 *fl6, struct dst_entry **dstp,
103808 unsigned int flags)
103809 {
103810@@ -916,12 +916,15 @@ do_confirm:
103811 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
103812 char __user *optval, int optlen)
103813 {
103814+ struct icmp6_filter filter;
103815+
103816 switch (optname) {
103817 case ICMPV6_FILTER:
103818 if (optlen > sizeof(struct icmp6_filter))
103819 optlen = sizeof(struct icmp6_filter);
103820- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
103821+ if (copy_from_user(&filter, optval, optlen))
103822 return -EFAULT;
103823+ raw6_sk(sk)->filter = filter;
103824 return 0;
103825 default:
103826 return -ENOPROTOOPT;
103827@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
103828 char __user *optval, int __user *optlen)
103829 {
103830 int len;
103831+ struct icmp6_filter filter;
103832
103833 switch (optname) {
103834 case ICMPV6_FILTER:
103835@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
103836 len = sizeof(struct icmp6_filter);
103837 if (put_user(len, optlen))
103838 return -EFAULT;
103839- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
103840+ filter = raw6_sk(sk)->filter;
103841+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
103842 return -EFAULT;
103843 return 0;
103844 default:
103845diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
103846index d7d70e6..bd5e9fc 100644
103847--- a/net/ipv6/reassembly.c
103848+++ b/net/ipv6/reassembly.c
103849@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
103850
103851 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
103852 {
103853- struct ctl_table *table;
103854+ ctl_table_no_const *table = NULL;
103855 struct ctl_table_header *hdr;
103856
103857- table = ip6_frags_ns_ctl_table;
103858 if (!net_eq(net, &init_net)) {
103859- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
103860+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
103861 if (table == NULL)
103862 goto err_alloc;
103863
103864@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
103865 /* Don't export sysctls to unprivileged users */
103866 if (net->user_ns != &init_user_ns)
103867 table[0].procname = NULL;
103868- }
103869+ hdr = register_net_sysctl(net, "net/ipv6", table);
103870+ } else
103871+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
103872
103873- hdr = register_net_sysctl(net, "net/ipv6", table);
103874 if (hdr == NULL)
103875 goto err_reg;
103876
103877@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
103878 return 0;
103879
103880 err_reg:
103881- if (!net_eq(net, &init_net))
103882- kfree(table);
103883+ kfree(table);
103884 err_alloc:
103885 return -ENOMEM;
103886 }
103887diff --git a/net/ipv6/route.c b/net/ipv6/route.c
103888index 1528d84..f393960 100644
103889--- a/net/ipv6/route.c
103890+++ b/net/ipv6/route.c
103891@@ -2978,7 +2978,7 @@ struct ctl_table ipv6_route_table_template[] = {
103892
103893 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
103894 {
103895- struct ctl_table *table;
103896+ ctl_table_no_const *table;
103897
103898 table = kmemdup(ipv6_route_table_template,
103899 sizeof(ipv6_route_table_template),
103900diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
103901index cdbfe5a..e13eb31 100644
103902--- a/net/ipv6/sit.c
103903+++ b/net/ipv6/sit.c
103904@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
103905 static void ipip6_dev_free(struct net_device *dev);
103906 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
103907 __be32 *v4dst);
103908-static struct rtnl_link_ops sit_link_ops __read_mostly;
103909+static struct rtnl_link_ops sit_link_ops;
103910
103911 static int sit_net_id __read_mostly;
103912 struct sit_net {
103913@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
103914 unregister_netdevice_queue(dev, head);
103915 }
103916
103917-static struct rtnl_link_ops sit_link_ops __read_mostly = {
103918+static struct rtnl_link_ops sit_link_ops = {
103919 .kind = "sit",
103920 .maxtype = IFLA_IPTUN_MAX,
103921 .policy = ipip6_policy,
103922diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
103923index c5c10fa..2577d51 100644
103924--- a/net/ipv6/sysctl_net_ipv6.c
103925+++ b/net/ipv6/sysctl_net_ipv6.c
103926@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
103927
103928 static int __net_init ipv6_sysctl_net_init(struct net *net)
103929 {
103930- struct ctl_table *ipv6_table;
103931+ ctl_table_no_const *ipv6_table;
103932 struct ctl_table *ipv6_route_table;
103933 struct ctl_table *ipv6_icmp_table;
103934 int err;
103935diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
103936index 9c0b54e..5e7bd8f 100644
103937--- a/net/ipv6/tcp_ipv6.c
103938+++ b/net/ipv6/tcp_ipv6.c
103939@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
103940 }
103941 }
103942
103943+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103944+extern int grsec_enable_blackhole;
103945+#endif
103946+
103947 static void tcp_v6_hash(struct sock *sk)
103948 {
103949 if (sk->sk_state != TCP_CLOSE) {
103950@@ -1343,6 +1347,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
103951 return 0;
103952
103953 reset:
103954+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103955+ if (!grsec_enable_blackhole)
103956+#endif
103957 tcp_v6_send_reset(sk, skb);
103958 discard:
103959 if (opt_skb)
103960@@ -1443,12 +1450,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
103961
103962 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
103963 inet6_iif(skb));
103964- if (!sk)
103965+ if (!sk) {
103966+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103967+ ret = 1;
103968+#endif
103969 goto no_tcp_socket;
103970+ }
103971
103972 process:
103973- if (sk->sk_state == TCP_TIME_WAIT)
103974+ if (sk->sk_state == TCP_TIME_WAIT) {
103975+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103976+ ret = 2;
103977+#endif
103978 goto do_time_wait;
103979+ }
103980
103981 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
103982 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
103983@@ -1499,6 +1514,10 @@ csum_error:
103984 bad_packet:
103985 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
103986 } else {
103987+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103988+ if (!grsec_enable_blackhole || (ret == 1 &&
103989+ (skb->dev->flags & IFF_LOOPBACK)))
103990+#endif
103991 tcp_v6_send_reset(NULL, skb);
103992 }
103993
103994diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
103995index 189dc4a..458bec0 100644
103996--- a/net/ipv6/udp.c
103997+++ b/net/ipv6/udp.c
103998@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
103999 udp_ipv6_hash_secret + net_hash_mix(net));
104000 }
104001
104002+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104003+extern int grsec_enable_blackhole;
104004+#endif
104005+
104006 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
104007 {
104008 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
104009@@ -448,7 +452,7 @@ try_again:
104010 if (unlikely(err)) {
104011 trace_kfree_skb(skb, udpv6_recvmsg);
104012 if (!peeked) {
104013- atomic_inc(&sk->sk_drops);
104014+ atomic_inc_unchecked(&sk->sk_drops);
104015 if (is_udp4)
104016 UDP_INC_STATS_USER(sock_net(sk),
104017 UDP_MIB_INERRORS,
104018@@ -714,7 +718,7 @@ csum_error:
104019 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104020 drop:
104021 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104022- atomic_inc(&sk->sk_drops);
104023+ atomic_inc_unchecked(&sk->sk_drops);
104024 kfree_skb(skb);
104025 return -1;
104026 }
104027@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104028 if (likely(skb1 == NULL))
104029 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104030 if (!skb1) {
104031- atomic_inc(&sk->sk_drops);
104032+ atomic_inc_unchecked(&sk->sk_drops);
104033 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104034 IS_UDPLITE(sk));
104035 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104036@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104037 goto csum_error;
104038
104039 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104040+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104041+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104042+#endif
104043 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
104044
104045 kfree_skb(skb);
104046diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
104047index 48bf5a0..691985a 100644
104048--- a/net/ipv6/xfrm6_policy.c
104049+++ b/net/ipv6/xfrm6_policy.c
104050@@ -223,11 +223,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104051 }
104052 }
104053
104054-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
104055+static int xfrm6_garbage_collect(struct dst_ops *ops)
104056 {
104057 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
104058
104059- xfrm6_policy_afinfo.garbage_collect(net);
104060+ xfrm_garbage_collect_deferred(net);
104061 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
104062 }
104063
104064@@ -340,19 +340,19 @@ static struct ctl_table xfrm6_policy_table[] = {
104065
104066 static int __net_init xfrm6_net_init(struct net *net)
104067 {
104068- struct ctl_table *table;
104069+ ctl_table_no_const *table = NULL;
104070 struct ctl_table_header *hdr;
104071
104072- table = xfrm6_policy_table;
104073 if (!net_eq(net, &init_net)) {
104074- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104075+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104076 if (!table)
104077 goto err_alloc;
104078
104079 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
104080- }
104081+ hdr = register_net_sysctl(net, "net/ipv6", table);
104082+ } else
104083+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
104084
104085- hdr = register_net_sysctl(net, "net/ipv6", table);
104086 if (!hdr)
104087 goto err_reg;
104088
104089@@ -360,8 +360,7 @@ static int __net_init xfrm6_net_init(struct net *net)
104090 return 0;
104091
104092 err_reg:
104093- if (!net_eq(net, &init_net))
104094- kfree(table);
104095+ kfree(table);
104096 err_alloc:
104097 return -ENOMEM;
104098 }
104099diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
104100index c1d247e..9e5949d 100644
104101--- a/net/ipx/ipx_proc.c
104102+++ b/net/ipx/ipx_proc.c
104103@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
104104 struct proc_dir_entry *p;
104105 int rc = -ENOMEM;
104106
104107- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
104108+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
104109
104110 if (!ipx_proc_dir)
104111 goto out;
104112diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
104113index 4efe486..dee966e 100644
104114--- a/net/irda/ircomm/ircomm_tty.c
104115+++ b/net/irda/ircomm/ircomm_tty.c
104116@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104117 add_wait_queue(&port->open_wait, &wait);
104118
104119 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
104120- __FILE__, __LINE__, tty->driver->name, port->count);
104121+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104122
104123 spin_lock_irqsave(&port->lock, flags);
104124- port->count--;
104125+ atomic_dec(&port->count);
104126 port->blocked_open++;
104127 spin_unlock_irqrestore(&port->lock, flags);
104128
104129@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104130 }
104131
104132 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
104133- __FILE__, __LINE__, tty->driver->name, port->count);
104134+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104135
104136 schedule();
104137 }
104138@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104139
104140 spin_lock_irqsave(&port->lock, flags);
104141 if (!tty_hung_up_p(filp))
104142- port->count++;
104143+ atomic_inc(&port->count);
104144 port->blocked_open--;
104145 spin_unlock_irqrestore(&port->lock, flags);
104146
104147 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
104148- __FILE__, __LINE__, tty->driver->name, port->count);
104149+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104150
104151 if (!retval)
104152 port->flags |= ASYNC_NORMAL_ACTIVE;
104153@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
104154
104155 /* ++ is not atomic, so this should be protected - Jean II */
104156 spin_lock_irqsave(&self->port.lock, flags);
104157- self->port.count++;
104158+ atomic_inc(&self->port.count);
104159 spin_unlock_irqrestore(&self->port.lock, flags);
104160 tty_port_tty_set(&self->port, tty);
104161
104162 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
104163- self->line, self->port.count);
104164+ self->line, atomic_read(&self->port.count));
104165
104166 /* Not really used by us, but lets do it anyway */
104167 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
104168@@ -961,7 +961,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
104169 tty_kref_put(port->tty);
104170 }
104171 port->tty = NULL;
104172- port->count = 0;
104173+ atomic_set(&port->count, 0);
104174 spin_unlock_irqrestore(&port->lock, flags);
104175
104176 wake_up_interruptible(&port->open_wait);
104177@@ -1308,7 +1308,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
104178 seq_putc(m, '\n');
104179
104180 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
104181- seq_printf(m, "Open count: %d\n", self->port.count);
104182+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
104183 seq_printf(m, "Max data size: %d\n", self->max_data_size);
104184 seq_printf(m, "Max header size: %d\n", self->max_header_size);
104185
104186diff --git a/net/irda/irproc.c b/net/irda/irproc.c
104187index b9ac598..f88cc56 100644
104188--- a/net/irda/irproc.c
104189+++ b/net/irda/irproc.c
104190@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
104191 {
104192 int i;
104193
104194- proc_irda = proc_mkdir("irda", init_net.proc_net);
104195+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
104196 if (proc_irda == NULL)
104197 return;
104198
104199diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
104200index 2e9953b..ed06350 100644
104201--- a/net/iucv/af_iucv.c
104202+++ b/net/iucv/af_iucv.c
104203@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
104204 {
104205 char name[12];
104206
104207- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
104208+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104209 while (__iucv_get_sock_by_name(name)) {
104210 sprintf(name, "%08x",
104211- atomic_inc_return(&iucv_sk_list.autobind_name));
104212+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104213 }
104214 memcpy(iucv->src_name, name, 8);
104215 }
104216diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
104217index 2a6a1fd..6c112b0 100644
104218--- a/net/iucv/iucv.c
104219+++ b/net/iucv/iucv.c
104220@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
104221 return NOTIFY_OK;
104222 }
104223
104224-static struct notifier_block __refdata iucv_cpu_notifier = {
104225+static struct notifier_block iucv_cpu_notifier = {
104226 .notifier_call = iucv_cpu_notify,
104227 };
104228
104229diff --git a/net/key/af_key.c b/net/key/af_key.c
104230index f8ac939..1e189bf 100644
104231--- a/net/key/af_key.c
104232+++ b/net/key/af_key.c
104233@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
104234 static u32 get_acqseq(void)
104235 {
104236 u32 res;
104237- static atomic_t acqseq;
104238+ static atomic_unchecked_t acqseq;
104239
104240 do {
104241- res = atomic_inc_return(&acqseq);
104242+ res = atomic_inc_return_unchecked(&acqseq);
104243 } while (!res);
104244 return res;
104245 }
104246diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
104247index 781b3a2..73a7434 100644
104248--- a/net/l2tp/l2tp_eth.c
104249+++ b/net/l2tp/l2tp_eth.c
104250@@ -42,12 +42,12 @@ struct l2tp_eth {
104251 struct sock *tunnel_sock;
104252 struct l2tp_session *session;
104253 struct list_head list;
104254- atomic_long_t tx_bytes;
104255- atomic_long_t tx_packets;
104256- atomic_long_t tx_dropped;
104257- atomic_long_t rx_bytes;
104258- atomic_long_t rx_packets;
104259- atomic_long_t rx_errors;
104260+ atomic_long_unchecked_t tx_bytes;
104261+ atomic_long_unchecked_t tx_packets;
104262+ atomic_long_unchecked_t tx_dropped;
104263+ atomic_long_unchecked_t rx_bytes;
104264+ atomic_long_unchecked_t rx_packets;
104265+ atomic_long_unchecked_t rx_errors;
104266 };
104267
104268 /* via l2tp_session_priv() */
104269@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
104270 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
104271
104272 if (likely(ret == NET_XMIT_SUCCESS)) {
104273- atomic_long_add(len, &priv->tx_bytes);
104274- atomic_long_inc(&priv->tx_packets);
104275+ atomic_long_add_unchecked(len, &priv->tx_bytes);
104276+ atomic_long_inc_unchecked(&priv->tx_packets);
104277 } else {
104278- atomic_long_inc(&priv->tx_dropped);
104279+ atomic_long_inc_unchecked(&priv->tx_dropped);
104280 }
104281 return NETDEV_TX_OK;
104282 }
104283@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
104284 {
104285 struct l2tp_eth *priv = netdev_priv(dev);
104286
104287- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
104288- stats->tx_packets = atomic_long_read(&priv->tx_packets);
104289- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
104290- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
104291- stats->rx_packets = atomic_long_read(&priv->rx_packets);
104292- stats->rx_errors = atomic_long_read(&priv->rx_errors);
104293+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
104294+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
104295+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
104296+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
104297+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
104298+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
104299 return stats;
104300 }
104301
104302@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
104303 nf_reset(skb);
104304
104305 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
104306- atomic_long_inc(&priv->rx_packets);
104307- atomic_long_add(data_len, &priv->rx_bytes);
104308+ atomic_long_inc_unchecked(&priv->rx_packets);
104309+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
104310 } else {
104311- atomic_long_inc(&priv->rx_errors);
104312+ atomic_long_inc_unchecked(&priv->rx_errors);
104313 }
104314 return;
104315
104316 error:
104317- atomic_long_inc(&priv->rx_errors);
104318+ atomic_long_inc_unchecked(&priv->rx_errors);
104319 kfree_skb(skb);
104320 }
104321
104322diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
104323index 1a3c7e0..80f8b0c 100644
104324--- a/net/llc/llc_proc.c
104325+++ b/net/llc/llc_proc.c
104326@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
104327 int rc = -ENOMEM;
104328 struct proc_dir_entry *p;
104329
104330- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
104331+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
104332 if (!llc_proc_dir)
104333 goto out;
104334
104335diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
104336index e75d5c5..429fc95 100644
104337--- a/net/mac80211/cfg.c
104338+++ b/net/mac80211/cfg.c
104339@@ -543,7 +543,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
104340 ret = ieee80211_vif_use_channel(sdata, chandef,
104341 IEEE80211_CHANCTX_EXCLUSIVE);
104342 }
104343- } else if (local->open_count == local->monitors) {
104344+ } else if (local_read(&local->open_count) == local->monitors) {
104345 local->_oper_chandef = *chandef;
104346 ieee80211_hw_config(local, 0);
104347 }
104348@@ -3416,7 +3416,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
104349 else
104350 local->probe_req_reg--;
104351
104352- if (!local->open_count)
104353+ if (!local_read(&local->open_count))
104354 break;
104355
104356 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
104357@@ -3551,8 +3551,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
104358 if (chanctx_conf) {
104359 *chandef = sdata->vif.bss_conf.chandef;
104360 ret = 0;
104361- } else if (local->open_count > 0 &&
104362- local->open_count == local->monitors &&
104363+ } else if (local_read(&local->open_count) > 0 &&
104364+ local_read(&local->open_count) == local->monitors &&
104365 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
104366 if (local->use_chanctx)
104367 *chandef = local->monitor_chandef;
104368diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
104369index cc6e964..029a3a3 100644
104370--- a/net/mac80211/ieee80211_i.h
104371+++ b/net/mac80211/ieee80211_i.h
104372@@ -29,6 +29,7 @@
104373 #include <net/ieee80211_radiotap.h>
104374 #include <net/cfg80211.h>
104375 #include <net/mac80211.h>
104376+#include <asm/local.h>
104377 #include "key.h"
104378 #include "sta_info.h"
104379 #include "debug.h"
104380@@ -1114,7 +1115,7 @@ struct ieee80211_local {
104381 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
104382 spinlock_t queue_stop_reason_lock;
104383
104384- int open_count;
104385+ local_t open_count;
104386 int monitors, cooked_mntrs;
104387 /* number of interfaces with corresponding FIF_ flags */
104388 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
104389diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
104390index 4173553..e3b5a3f 100644
104391--- a/net/mac80211/iface.c
104392+++ b/net/mac80211/iface.c
104393@@ -543,7 +543,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104394 break;
104395 }
104396
104397- if (local->open_count == 0) {
104398+ if (local_read(&local->open_count) == 0) {
104399 res = drv_start(local);
104400 if (res)
104401 goto err_del_bss;
104402@@ -590,7 +590,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104403 res = drv_add_interface(local, sdata);
104404 if (res)
104405 goto err_stop;
104406- } else if (local->monitors == 0 && local->open_count == 0) {
104407+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
104408 res = ieee80211_add_virtual_monitor(local);
104409 if (res)
104410 goto err_stop;
104411@@ -700,7 +700,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104412 atomic_inc(&local->iff_promiscs);
104413
104414 if (coming_up)
104415- local->open_count++;
104416+ local_inc(&local->open_count);
104417
104418 if (hw_reconf_flags)
104419 ieee80211_hw_config(local, hw_reconf_flags);
104420@@ -738,7 +738,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104421 err_del_interface:
104422 drv_remove_interface(local, sdata);
104423 err_stop:
104424- if (!local->open_count)
104425+ if (!local_read(&local->open_count))
104426 drv_stop(local);
104427 err_del_bss:
104428 sdata->bss = NULL;
104429@@ -906,7 +906,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104430 }
104431
104432 if (going_down)
104433- local->open_count--;
104434+ local_dec(&local->open_count);
104435
104436 switch (sdata->vif.type) {
104437 case NL80211_IFTYPE_AP_VLAN:
104438@@ -968,7 +968,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104439 }
104440 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
104441
104442- if (local->open_count == 0)
104443+ if (local_read(&local->open_count) == 0)
104444 ieee80211_clear_tx_pending(local);
104445
104446 /*
104447@@ -1011,7 +1011,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104448 if (cancel_scan)
104449 flush_delayed_work(&local->scan_work);
104450
104451- if (local->open_count == 0) {
104452+ if (local_read(&local->open_count) == 0) {
104453 ieee80211_stop_device(local);
104454
104455 /* no reconfiguring after stop! */
104456@@ -1022,7 +1022,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104457 ieee80211_configure_filter(local);
104458 ieee80211_hw_config(local, hw_reconf_flags);
104459
104460- if (local->monitors == local->open_count)
104461+ if (local->monitors == local_read(&local->open_count))
104462 ieee80211_add_virtual_monitor(local);
104463 }
104464
104465diff --git a/net/mac80211/main.c b/net/mac80211/main.c
104466index 6ab99da..f9502d4 100644
104467--- a/net/mac80211/main.c
104468+++ b/net/mac80211/main.c
104469@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
104470 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
104471 IEEE80211_CONF_CHANGE_POWER);
104472
104473- if (changed && local->open_count) {
104474+ if (changed && local_read(&local->open_count)) {
104475 ret = drv_config(local, changed);
104476 /*
104477 * Goal:
104478diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
104479index 4a95fe3..0bfd713 100644
104480--- a/net/mac80211/pm.c
104481+++ b/net/mac80211/pm.c
104482@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104483 struct ieee80211_sub_if_data *sdata;
104484 struct sta_info *sta;
104485
104486- if (!local->open_count)
104487+ if (!local_read(&local->open_count))
104488 goto suspend;
104489
104490 ieee80211_scan_cancel(local);
104491@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104492 cancel_work_sync(&local->dynamic_ps_enable_work);
104493 del_timer_sync(&local->dynamic_ps_timer);
104494
104495- local->wowlan = wowlan && local->open_count;
104496+ local->wowlan = wowlan && local_read(&local->open_count);
104497 if (local->wowlan) {
104498 int err = drv_suspend(local, wowlan);
104499 if (err < 0) {
104500@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104501 WARN_ON(!list_empty(&local->chanctx_list));
104502
104503 /* stop hardware - this must stop RX */
104504- if (local->open_count)
104505+ if (local_read(&local->open_count))
104506 ieee80211_stop_device(local);
104507
104508 suspend:
104509diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
104510index d53355b..21f583a 100644
104511--- a/net/mac80211/rate.c
104512+++ b/net/mac80211/rate.c
104513@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
104514
104515 ASSERT_RTNL();
104516
104517- if (local->open_count)
104518+ if (local_read(&local->open_count))
104519 return -EBUSY;
104520
104521 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
104522diff --git a/net/mac80211/util.c b/net/mac80211/util.c
104523index 974ebe7..57bcd3c 100644
104524--- a/net/mac80211/util.c
104525+++ b/net/mac80211/util.c
104526@@ -1757,7 +1757,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104527 }
104528 #endif
104529 /* everything else happens only if HW was up & running */
104530- if (!local->open_count)
104531+ if (!local_read(&local->open_count))
104532 goto wake_up;
104533
104534 /*
104535@@ -1987,7 +1987,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104536 local->in_reconfig = false;
104537 barrier();
104538
104539- if (local->monitors == local->open_count && local->monitors > 0)
104540+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
104541 ieee80211_add_virtual_monitor(local);
104542
104543 /*
104544diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
104545index b02660f..c0f791c 100644
104546--- a/net/netfilter/Kconfig
104547+++ b/net/netfilter/Kconfig
104548@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
104549
104550 To compile it as a module, choose M here. If unsure, say N.
104551
104552+config NETFILTER_XT_MATCH_GRADM
104553+ tristate '"gradm" match support'
104554+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
104555+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
104556+ ---help---
104557+ The gradm match allows to match on grsecurity RBAC being enabled.
104558+ It is useful when iptables rules are applied early on bootup to
104559+ prevent connections to the machine (except from a trusted host)
104560+ while the RBAC system is disabled.
104561+
104562 config NETFILTER_XT_MATCH_HASHLIMIT
104563 tristate '"hashlimit" match support'
104564 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
104565diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
104566index 89f73a9..e4e5bd9 100644
104567--- a/net/netfilter/Makefile
104568+++ b/net/netfilter/Makefile
104569@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
104570 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
104571 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
104572 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
104573+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
104574 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
104575 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
104576 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
104577diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
104578index d259da3..6a32b2c 100644
104579--- a/net/netfilter/ipset/ip_set_core.c
104580+++ b/net/netfilter/ipset/ip_set_core.c
104581@@ -1952,7 +1952,7 @@ done:
104582 return ret;
104583 }
104584
104585-static struct nf_sockopt_ops so_set __read_mostly = {
104586+static struct nf_sockopt_ops so_set = {
104587 .pf = PF_INET,
104588 .get_optmin = SO_IP_SET,
104589 .get_optmax = SO_IP_SET + 1,
104590diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
104591index b0f7b62..0541842 100644
104592--- a/net/netfilter/ipvs/ip_vs_conn.c
104593+++ b/net/netfilter/ipvs/ip_vs_conn.c
104594@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
104595 /* Increase the refcnt counter of the dest */
104596 ip_vs_dest_hold(dest);
104597
104598- conn_flags = atomic_read(&dest->conn_flags);
104599+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
104600 if (cp->protocol != IPPROTO_UDP)
104601 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
104602 flags = cp->flags;
104603@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
104604
104605 cp->control = NULL;
104606 atomic_set(&cp->n_control, 0);
104607- atomic_set(&cp->in_pkts, 0);
104608+ atomic_set_unchecked(&cp->in_pkts, 0);
104609
104610 cp->packet_xmit = NULL;
104611 cp->app = NULL;
104612@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
104613
104614 /* Don't drop the entry if its number of incoming packets is not
104615 located in [0, 8] */
104616- i = atomic_read(&cp->in_pkts);
104617+ i = atomic_read_unchecked(&cp->in_pkts);
104618 if (i > 8 || i < 0) return 0;
104619
104620 if (!todrop_rate[i]) return 0;
104621diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
104622index b87ca32..76c7799 100644
104623--- a/net/netfilter/ipvs/ip_vs_core.c
104624+++ b/net/netfilter/ipvs/ip_vs_core.c
104625@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
104626 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
104627 /* do not touch skb anymore */
104628
104629- atomic_inc(&cp->in_pkts);
104630+ atomic_inc_unchecked(&cp->in_pkts);
104631 ip_vs_conn_put(cp);
104632 return ret;
104633 }
104634@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
104635 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
104636 pkts = sysctl_sync_threshold(ipvs);
104637 else
104638- pkts = atomic_add_return(1, &cp->in_pkts);
104639+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104640
104641 if (ipvs->sync_state & IP_VS_STATE_MASTER)
104642 ip_vs_sync_conn(net, cp, pkts);
104643diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
104644index fdcda8b..dbc1979 100644
104645--- a/net/netfilter/ipvs/ip_vs_ctl.c
104646+++ b/net/netfilter/ipvs/ip_vs_ctl.c
104647@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
104648 */
104649 ip_vs_rs_hash(ipvs, dest);
104650 }
104651- atomic_set(&dest->conn_flags, conn_flags);
104652+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
104653
104654 /* bind the service */
104655 old_svc = rcu_dereference_protected(dest->svc, 1);
104656@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
104657 * align with netns init in ip_vs_control_net_init()
104658 */
104659
104660-static struct ctl_table vs_vars[] = {
104661+static ctl_table_no_const vs_vars[] __read_only = {
104662 {
104663 .procname = "amemthresh",
104664 .maxlen = sizeof(int),
104665@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104666 " %-7s %-6d %-10d %-10d\n",
104667 &dest->addr.in6,
104668 ntohs(dest->port),
104669- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104670+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104671 atomic_read(&dest->weight),
104672 atomic_read(&dest->activeconns),
104673 atomic_read(&dest->inactconns));
104674@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104675 "%-7s %-6d %-10d %-10d\n",
104676 ntohl(dest->addr.ip),
104677 ntohs(dest->port),
104678- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104679+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104680 atomic_read(&dest->weight),
104681 atomic_read(&dest->activeconns),
104682 atomic_read(&dest->inactconns));
104683@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
104684
104685 entry.addr = dest->addr.ip;
104686 entry.port = dest->port;
104687- entry.conn_flags = atomic_read(&dest->conn_flags);
104688+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
104689 entry.weight = atomic_read(&dest->weight);
104690 entry.u_threshold = dest->u_threshold;
104691 entry.l_threshold = dest->l_threshold;
104692@@ -3039,7 +3039,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
104693 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
104694 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
104695 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
104696- (atomic_read(&dest->conn_flags) &
104697+ (atomic_read_unchecked(&dest->conn_flags) &
104698 IP_VS_CONN_F_FWD_MASK)) ||
104699 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
104700 atomic_read(&dest->weight)) ||
104701@@ -3672,7 +3672,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
104702 {
104703 int idx;
104704 struct netns_ipvs *ipvs = net_ipvs(net);
104705- struct ctl_table *tbl;
104706+ ctl_table_no_const *tbl;
104707
104708 atomic_set(&ipvs->dropentry, 0);
104709 spin_lock_init(&ipvs->dropentry_lock);
104710diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
104711index 127f140..553d652 100644
104712--- a/net/netfilter/ipvs/ip_vs_lblc.c
104713+++ b/net/netfilter/ipvs/ip_vs_lblc.c
104714@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
104715 * IPVS LBLC sysctl table
104716 */
104717 #ifdef CONFIG_SYSCTL
104718-static struct ctl_table vs_vars_table[] = {
104719+static ctl_table_no_const vs_vars_table[] __read_only = {
104720 {
104721 .procname = "lblc_expiration",
104722 .data = NULL,
104723diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
104724index 2229d2d..b32b785 100644
104725--- a/net/netfilter/ipvs/ip_vs_lblcr.c
104726+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
104727@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
104728 * IPVS LBLCR sysctl table
104729 */
104730
104731-static struct ctl_table vs_vars_table[] = {
104732+static ctl_table_no_const vs_vars_table[] __read_only = {
104733 {
104734 .procname = "lblcr_expiration",
104735 .data = NULL,
104736diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
104737index d93ceeb..4556144 100644
104738--- a/net/netfilter/ipvs/ip_vs_sync.c
104739+++ b/net/netfilter/ipvs/ip_vs_sync.c
104740@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
104741 cp = cp->control;
104742 if (cp) {
104743 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104744- pkts = atomic_add_return(1, &cp->in_pkts);
104745+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104746 else
104747 pkts = sysctl_sync_threshold(ipvs);
104748 ip_vs_sync_conn(net, cp->control, pkts);
104749@@ -771,7 +771,7 @@ control:
104750 if (!cp)
104751 return;
104752 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104753- pkts = atomic_add_return(1, &cp->in_pkts);
104754+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104755 else
104756 pkts = sysctl_sync_threshold(ipvs);
104757 goto sloop;
104758@@ -902,7 +902,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
104759
104760 if (opt)
104761 memcpy(&cp->in_seq, opt, sizeof(*opt));
104762- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
104763+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
104764 cp->state = state;
104765 cp->old_state = cp->state;
104766 /*
104767diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
104768index 3aedbda..6a63567 100644
104769--- a/net/netfilter/ipvs/ip_vs_xmit.c
104770+++ b/net/netfilter/ipvs/ip_vs_xmit.c
104771@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
104772 else
104773 rc = NF_ACCEPT;
104774 /* do not touch skb anymore */
104775- atomic_inc(&cp->in_pkts);
104776+ atomic_inc_unchecked(&cp->in_pkts);
104777 goto out;
104778 }
104779
104780@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
104781 else
104782 rc = NF_ACCEPT;
104783 /* do not touch skb anymore */
104784- atomic_inc(&cp->in_pkts);
104785+ atomic_inc_unchecked(&cp->in_pkts);
104786 goto out;
104787 }
104788
104789diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
104790index a4b5e2a..13b1de3 100644
104791--- a/net/netfilter/nf_conntrack_acct.c
104792+++ b/net/netfilter/nf_conntrack_acct.c
104793@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
104794 #ifdef CONFIG_SYSCTL
104795 static int nf_conntrack_acct_init_sysctl(struct net *net)
104796 {
104797- struct ctl_table *table;
104798+ ctl_table_no_const *table;
104799
104800 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
104801 GFP_KERNEL);
104802diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
104803index 46d1b26..b7f3b76 100644
104804--- a/net/netfilter/nf_conntrack_core.c
104805+++ b/net/netfilter/nf_conntrack_core.c
104806@@ -1734,6 +1734,10 @@ void nf_conntrack_init_end(void)
104807 #define DYING_NULLS_VAL ((1<<30)+1)
104808 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
104809
104810+#ifdef CONFIG_GRKERNSEC_HIDESYM
104811+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
104812+#endif
104813+
104814 int nf_conntrack_init_net(struct net *net)
104815 {
104816 int ret = -ENOMEM;
104817@@ -1759,7 +1763,11 @@ int nf_conntrack_init_net(struct net *net)
104818 if (!net->ct.stat)
104819 goto err_pcpu_lists;
104820
104821+#ifdef CONFIG_GRKERNSEC_HIDESYM
104822+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
104823+#else
104824 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
104825+#endif
104826 if (!net->ct.slabname)
104827 goto err_slabname;
104828
104829diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
104830index 4e78c57..ec8fb74 100644
104831--- a/net/netfilter/nf_conntrack_ecache.c
104832+++ b/net/netfilter/nf_conntrack_ecache.c
104833@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
104834 #ifdef CONFIG_SYSCTL
104835 static int nf_conntrack_event_init_sysctl(struct net *net)
104836 {
104837- struct ctl_table *table;
104838+ ctl_table_no_const *table;
104839
104840 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
104841 GFP_KERNEL);
104842diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
104843index bd9d315..989947e 100644
104844--- a/net/netfilter/nf_conntrack_helper.c
104845+++ b/net/netfilter/nf_conntrack_helper.c
104846@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
104847
104848 static int nf_conntrack_helper_init_sysctl(struct net *net)
104849 {
104850- struct ctl_table *table;
104851+ ctl_table_no_const *table;
104852
104853 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
104854 GFP_KERNEL);
104855diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
104856index b65d586..beec902 100644
104857--- a/net/netfilter/nf_conntrack_proto.c
104858+++ b/net/netfilter/nf_conntrack_proto.c
104859@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
104860
104861 static void
104862 nf_ct_unregister_sysctl(struct ctl_table_header **header,
104863- struct ctl_table **table,
104864+ ctl_table_no_const **table,
104865 unsigned int users)
104866 {
104867 if (users > 0)
104868diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
104869index fc823fa..8311af3 100644
104870--- a/net/netfilter/nf_conntrack_standalone.c
104871+++ b/net/netfilter/nf_conntrack_standalone.c
104872@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
104873
104874 static int nf_conntrack_standalone_init_sysctl(struct net *net)
104875 {
104876- struct ctl_table *table;
104877+ ctl_table_no_const *table;
104878
104879 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
104880 GFP_KERNEL);
104881diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
104882index 7a394df..bd91a8a 100644
104883--- a/net/netfilter/nf_conntrack_timestamp.c
104884+++ b/net/netfilter/nf_conntrack_timestamp.c
104885@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
104886 #ifdef CONFIG_SYSCTL
104887 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
104888 {
104889- struct ctl_table *table;
104890+ ctl_table_no_const *table;
104891
104892 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
104893 GFP_KERNEL);
104894diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
104895index 43c926c..a5731d8 100644
104896--- a/net/netfilter/nf_log.c
104897+++ b/net/netfilter/nf_log.c
104898@@ -362,7 +362,7 @@ static const struct file_operations nflog_file_ops = {
104899
104900 #ifdef CONFIG_SYSCTL
104901 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
104902-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
104903+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
104904
104905 static int nf_log_proc_dostring(struct ctl_table *table, int write,
104906 void __user *buffer, size_t *lenp, loff_t *ppos)
104907@@ -393,13 +393,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
104908 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
104909 mutex_unlock(&nf_log_mutex);
104910 } else {
104911+ ctl_table_no_const nf_log_table = *table;
104912+
104913 mutex_lock(&nf_log_mutex);
104914 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
104915 if (!logger)
104916- table->data = "NONE";
104917+ nf_log_table.data = "NONE";
104918 else
104919- table->data = logger->name;
104920- r = proc_dostring(table, write, buffer, lenp, ppos);
104921+ nf_log_table.data = logger->name;
104922+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
104923 mutex_unlock(&nf_log_mutex);
104924 }
104925
104926diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
104927index c68c1e5..8b5d670 100644
104928--- a/net/netfilter/nf_sockopt.c
104929+++ b/net/netfilter/nf_sockopt.c
104930@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
104931 }
104932 }
104933
104934- list_add(&reg->list, &nf_sockopts);
104935+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
104936 out:
104937 mutex_unlock(&nf_sockopt_mutex);
104938 return ret;
104939@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
104940 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
104941 {
104942 mutex_lock(&nf_sockopt_mutex);
104943- list_del(&reg->list);
104944+ pax_list_del((struct list_head *)&reg->list);
104945 mutex_unlock(&nf_sockopt_mutex);
104946 }
104947 EXPORT_SYMBOL(nf_unregister_sockopt);
104948diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
104949index 11d85b3..7fcc420 100644
104950--- a/net/netfilter/nfnetlink_log.c
104951+++ b/net/netfilter/nfnetlink_log.c
104952@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
104953 struct nfnl_log_net {
104954 spinlock_t instances_lock;
104955 struct hlist_head instance_table[INSTANCE_BUCKETS];
104956- atomic_t global_seq;
104957+ atomic_unchecked_t global_seq;
104958 };
104959
104960 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
104961@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
104962 /* global sequence number */
104963 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
104964 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
104965- htonl(atomic_inc_return(&log->global_seq))))
104966+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
104967 goto nla_put_failure;
104968
104969 if (data_len) {
104970diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
104971new file mode 100644
104972index 0000000..c566332
104973--- /dev/null
104974+++ b/net/netfilter/xt_gradm.c
104975@@ -0,0 +1,51 @@
104976+/*
104977+ * gradm match for netfilter
104978