]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-4.0.4-201505171737.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-4.0.4-201505171737.patch
CommitLineData
32ca80f1
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index 74b6c6d..eac0e77 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -643,7 +644,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -666,7 +689,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -694,7 +717,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -725,7 +748,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 4d68ec8..9546b75 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1203,6 +1203,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+ grsec_sysfs_restrict= Format: 0 | 1
327+ Default: 1
328+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
329+
330 hashdist= [KNL,NUMA] Large hashes allocated during boot
331 are distributed across NUMA nodes. Defaults on
332 for 64-bit NUMA, off otherwise.
333@@ -2300,6 +2307,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
334 noexec=on: enable non-executable mappings (default)
335 noexec=off: disable non-executable mappings
336
337+ nopcid [X86-64]
338+ Disable PCID (Process-Context IDentifier) even if it
339+ is supported by the processor.
340+
341 nosmap [X86]
342 Disable SMAP (Supervisor Mode Access Prevention)
343 even if it is supported by processor.
344@@ -2601,6 +2612,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
345 the specified number of seconds. This is to be used if
346 your oopses keep scrolling off the screen.
347
348+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
349+ virtualization environments that don't cope well with the
350+ expand down segment used by UDEREF on X86-32 or the frequent
351+ page table updates on X86-64.
352+
353+ pax_sanitize_slab=
354+ Format: { 0 | 1 | off | fast | full }
355+ Options '0' and '1' are only provided for backward
356+ compatibility, 'off' or 'fast' should be used instead.
357+ 0|off : disable slab object sanitization
358+ 1|fast: enable slab object sanitization excluding
359+ whitelisted slabs (default)
360+ full : sanitize all slabs, even the whitelisted ones
361+
362+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
363+
364+ pax_extra_latent_entropy
365+ Enable a very simple form of latent entropy extraction
366+ from the first 4GB of memory as the bootmem allocator
367+ passes the memory pages to the buddy allocator.
368+
369+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
370+ when the processor supports PCID.
371+
372 pcbit= [HW,ISDN]
373
374 pcd. [PARIDE]
375diff --git a/Makefile b/Makefile
376index 3d16bcc..a3b342e 100644
377--- a/Makefile
378+++ b/Makefile
379@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
380 HOSTCC = gcc
381 HOSTCXX = g++
382 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
383-HOSTCXXFLAGS = -O2
384+HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
385+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
386+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
387
388 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
389 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
390@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
391 # Rules shared between *config targets and build targets
392
393 # Basic helpers built in scripts/
394-PHONY += scripts_basic
395-scripts_basic:
396+PHONY += scripts_basic gcc-plugins
397+scripts_basic: gcc-plugins
398 $(Q)$(MAKE) $(build)=scripts/basic
399 $(Q)rm -f .tmp_quiet_recordmcount
400
401@@ -622,6 +624,72 @@ endif
402 # Tell gcc to never replace conditional load with a non-conditional one
403 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
404
405+ifndef DISABLE_PAX_PLUGINS
406+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
408+else
409+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
410+endif
411+ifneq ($(PLUGINCC),)
412+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
413+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
414+endif
415+ifdef CONFIG_PAX_MEMORY_STACKLEAK
416+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
417+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
418+endif
419+ifdef CONFIG_KALLOCSTAT_PLUGIN
420+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
421+endif
422+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
424+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
425+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
426+endif
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
428+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
429+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
430+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
431+endif
432+endif
433+ifdef CONFIG_CHECKER_PLUGIN
434+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
435+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
436+endif
437+endif
438+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
439+ifdef CONFIG_PAX_SIZE_OVERFLOW
440+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
441+endif
442+ifdef CONFIG_PAX_LATENT_ENTROPY
443+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
444+endif
445+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
446+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
447+endif
448+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
449+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
450+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
451+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
452+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
453+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
454+ifeq ($(KBUILD_EXTMOD),)
455+gcc-plugins:
456+ $(Q)$(MAKE) $(build)=tools/gcc
457+else
458+gcc-plugins: ;
459+endif
460+else
461+gcc-plugins:
462+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
463+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
464+else
465+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
466+endif
467+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
468+endif
469+endif
470+
471 ifdef CONFIG_READABLE_ASM
472 # Disable optimizations that make assembler listings hard to read.
473 # reorder blocks reorders the control in the function
474@@ -714,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
475 else
476 KBUILD_CFLAGS += -g
477 endif
478-KBUILD_AFLAGS += -Wa,-gdwarf-2
479+KBUILD_AFLAGS += -Wa,--gdwarf-2
480 endif
481 ifdef CONFIG_DEBUG_INFO_DWARF4
482 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
483@@ -884,7 +952,7 @@ export mod_sign_cmd
484
485
486 ifeq ($(KBUILD_EXTMOD),)
487-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
488+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
489
490 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
491 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
492@@ -934,6 +1002,8 @@ endif
493
494 # The actual objects are generated when descending,
495 # make sure no implicit rule kicks in
496+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
497+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
498 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
499
500 # Handle descending into subdirectories listed in $(vmlinux-dirs)
501@@ -943,7 +1013,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
502 # Error messages still appears in the original language
503
504 PHONY += $(vmlinux-dirs)
505-$(vmlinux-dirs): prepare scripts
506+$(vmlinux-dirs): gcc-plugins prepare scripts
507 $(Q)$(MAKE) $(build)=$@
508
509 define filechk_kernel.release
510@@ -986,10 +1056,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
511
512 archprepare: archheaders archscripts prepare1 scripts_basic
513
514+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
515+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
516 prepare0: archprepare FORCE
517 $(Q)$(MAKE) $(build)=.
518
519 # All the preparing..
520+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
521 prepare: prepare0
522
523 # Generate some files
524@@ -1103,6 +1176,8 @@ all: modules
525 # using awk while concatenating to the final file.
526
527 PHONY += modules
528+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
529+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
530 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
531 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
532 @$(kecho) ' Building modules, stage 2.';
533@@ -1118,7 +1193,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
534
535 # Target to prepare building external modules
536 PHONY += modules_prepare
537-modules_prepare: prepare scripts
538+modules_prepare: gcc-plugins prepare scripts
539
540 # Target to install modules
541 PHONY += modules_install
542@@ -1184,7 +1259,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
543 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
544 signing_key.priv signing_key.x509 x509.genkey \
545 extra_certificates signing_key.x509.keyid \
546- signing_key.x509.signer vmlinux-gdb.py
547+ signing_key.x509.signer vmlinux-gdb.py \
548+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
549+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
550+ tools/gcc/randomize_layout_seed.h
551
552 # clean - Delete most, but leave enough to build external modules
553 #
554@@ -1223,7 +1301,7 @@ distclean: mrproper
555 @find $(srctree) $(RCS_FIND_IGNORE) \
556 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
557 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
558- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
559+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
560 -type f -print | xargs rm -f
561
562
563@@ -1389,6 +1467,8 @@ PHONY += $(module-dirs) modules
564 $(module-dirs): crmodverdir $(objtree)/Module.symvers
565 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
566
567+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
568+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
569 modules: $(module-dirs)
570 @$(kecho) ' Building modules, stage 2.';
571 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
572@@ -1529,17 +1609,21 @@ else
573 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
574 endif
575
576-%.s: %.c prepare scripts FORCE
577+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
578+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
579+%.s: %.c gcc-plugins prepare scripts FORCE
580 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
581 %.i: %.c prepare scripts FORCE
582 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
583-%.o: %.c prepare scripts FORCE
584+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
585+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
586+%.o: %.c gcc-plugins prepare scripts FORCE
587 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
588 %.lst: %.c prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590-%.s: %.S prepare scripts FORCE
591+%.s: %.S gcc-plugins prepare scripts FORCE
592 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
593-%.o: %.S prepare scripts FORCE
594+%.o: %.S gcc-plugins prepare scripts FORCE
595 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
596 %.symtypes: %.c prepare scripts FORCE
597 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
598@@ -1551,11 +1635,15 @@ endif
599 $(build)=$(build-dir)
600 # Make sure the latest headers are built for Documentation
601 Documentation/: headers_install
602-%/: prepare scripts FORCE
603+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
604+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
605+%/: gcc-plugins prepare scripts FORCE
606 $(cmd_crmodverdir)
607 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
608 $(build)=$(build-dir)
609-%.ko: prepare scripts FORCE
610+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
611+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
612+%.ko: gcc-plugins prepare scripts FORCE
613 $(cmd_crmodverdir)
614 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
615 $(build)=$(build-dir) $(@:.ko=.o)
616diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
617index 8f8eafb..3405f46 100644
618--- a/arch/alpha/include/asm/atomic.h
619+++ b/arch/alpha/include/asm/atomic.h
620@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
621 #define atomic_dec(v) atomic_sub(1,(v))
622 #define atomic64_dec(v) atomic64_sub(1,(v))
623
624+#define atomic64_read_unchecked(v) atomic64_read(v)
625+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
626+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
627+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
628+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
629+#define atomic64_inc_unchecked(v) atomic64_inc(v)
630+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
631+#define atomic64_dec_unchecked(v) atomic64_dec(v)
632+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
633+
634 #endif /* _ALPHA_ATOMIC_H */
635diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
636index ad368a9..fbe0f25 100644
637--- a/arch/alpha/include/asm/cache.h
638+++ b/arch/alpha/include/asm/cache.h
639@@ -4,19 +4,19 @@
640 #ifndef __ARCH_ALPHA_CACHE_H
641 #define __ARCH_ALPHA_CACHE_H
642
643+#include <linux/const.h>
644
645 /* Bytes per L1 (data) cache line. */
646 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
647-# define L1_CACHE_BYTES 64
648 # define L1_CACHE_SHIFT 6
649 #else
650 /* Both EV4 and EV5 are write-through, read-allocate,
651 direct-mapped, physical.
652 */
653-# define L1_CACHE_BYTES 32
654 # define L1_CACHE_SHIFT 5
655 #endif
656
657+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
658 #define SMP_CACHE_BYTES L1_CACHE_BYTES
659
660 #endif
661diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
662index 968d999..d36b2df 100644
663--- a/arch/alpha/include/asm/elf.h
664+++ b/arch/alpha/include/asm/elf.h
665@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
666
667 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
668
669+#ifdef CONFIG_PAX_ASLR
670+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
671+
672+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
673+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
674+#endif
675+
676 /* $0 is set by ld.so to a pointer to a function which might be
677 registered using atexit. This provides a mean for the dynamic
678 linker to call DT_FINI functions for shared libraries that have
679diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
680index aab14a0..b4fa3e7 100644
681--- a/arch/alpha/include/asm/pgalloc.h
682+++ b/arch/alpha/include/asm/pgalloc.h
683@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
684 pgd_set(pgd, pmd);
685 }
686
687+static inline void
688+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
689+{
690+ pgd_populate(mm, pgd, pmd);
691+}
692+
693 extern pgd_t *pgd_alloc(struct mm_struct *mm);
694
695 static inline void
696diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
697index a9a1195..e9b8417 100644
698--- a/arch/alpha/include/asm/pgtable.h
699+++ b/arch/alpha/include/asm/pgtable.h
700@@ -101,6 +101,17 @@ struct vm_area_struct;
701 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
702 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
703 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
704+
705+#ifdef CONFIG_PAX_PAGEEXEC
706+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
707+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
708+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
709+#else
710+# define PAGE_SHARED_NOEXEC PAGE_SHARED
711+# define PAGE_COPY_NOEXEC PAGE_COPY
712+# define PAGE_READONLY_NOEXEC PAGE_READONLY
713+#endif
714+
715 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
716
717 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
718diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
719index 2fd00b7..cfd5069 100644
720--- a/arch/alpha/kernel/module.c
721+++ b/arch/alpha/kernel/module.c
722@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
723
724 /* The small sections were sorted to the end of the segment.
725 The following should definitely cover them. */
726- gp = (u64)me->module_core + me->core_size - 0x8000;
727+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
728 got = sechdrs[me->arch.gotsecindex].sh_addr;
729
730 for (i = 0; i < n; i++) {
731diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
732index e51f578..16c64a3 100644
733--- a/arch/alpha/kernel/osf_sys.c
734+++ b/arch/alpha/kernel/osf_sys.c
735@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
736 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
737
738 static unsigned long
739-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
740- unsigned long limit)
741+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
742+ unsigned long limit, unsigned long flags)
743 {
744 struct vm_unmapped_area_info info;
745+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
746
747 info.flags = 0;
748 info.length = len;
749@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
750 info.high_limit = limit;
751 info.align_mask = 0;
752 info.align_offset = 0;
753+ info.threadstack_offset = offset;
754 return vm_unmapped_area(&info);
755 }
756
757@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
758 merely specific addresses, but regions of memory -- perhaps
759 this feature should be incorporated into all ports? */
760
761+#ifdef CONFIG_PAX_RANDMMAP
762+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
763+#endif
764+
765 if (addr) {
766- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
767+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
768 if (addr != (unsigned long) -ENOMEM)
769 return addr;
770 }
771
772 /* Next, try allocating at TASK_UNMAPPED_BASE. */
773- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
774- len, limit);
775+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
776+
777 if (addr != (unsigned long) -ENOMEM)
778 return addr;
779
780 /* Finally, try allocating in low memory. */
781- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
782+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
783
784 return addr;
785 }
786diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
787index 9d0ac09..479a962 100644
788--- a/arch/alpha/mm/fault.c
789+++ b/arch/alpha/mm/fault.c
790@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
791 __reload_thread(pcb);
792 }
793
794+#ifdef CONFIG_PAX_PAGEEXEC
795+/*
796+ * PaX: decide what to do with offenders (regs->pc = fault address)
797+ *
798+ * returns 1 when task should be killed
799+ * 2 when patched PLT trampoline was detected
800+ * 3 when unpatched PLT trampoline was detected
801+ */
802+static int pax_handle_fetch_fault(struct pt_regs *regs)
803+{
804+
805+#ifdef CONFIG_PAX_EMUPLT
806+ int err;
807+
808+ do { /* PaX: patched PLT emulation #1 */
809+ unsigned int ldah, ldq, jmp;
810+
811+ err = get_user(ldah, (unsigned int *)regs->pc);
812+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
813+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
814+
815+ if (err)
816+ break;
817+
818+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
819+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
820+ jmp == 0x6BFB0000U)
821+ {
822+ unsigned long r27, addr;
823+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
824+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
825+
826+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
827+ err = get_user(r27, (unsigned long *)addr);
828+ if (err)
829+ break;
830+
831+ regs->r27 = r27;
832+ regs->pc = r27;
833+ return 2;
834+ }
835+ } while (0);
836+
837+ do { /* PaX: patched PLT emulation #2 */
838+ unsigned int ldah, lda, br;
839+
840+ err = get_user(ldah, (unsigned int *)regs->pc);
841+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
842+ err |= get_user(br, (unsigned int *)(regs->pc+8));
843+
844+ if (err)
845+ break;
846+
847+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
848+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
849+ (br & 0xFFE00000U) == 0xC3E00000U)
850+ {
851+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
852+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
853+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
854+
855+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
856+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
857+ return 2;
858+ }
859+ } while (0);
860+
861+ do { /* PaX: unpatched PLT emulation */
862+ unsigned int br;
863+
864+ err = get_user(br, (unsigned int *)regs->pc);
865+
866+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
867+ unsigned int br2, ldq, nop, jmp;
868+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
869+
870+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
871+ err = get_user(br2, (unsigned int *)addr);
872+ err |= get_user(ldq, (unsigned int *)(addr+4));
873+ err |= get_user(nop, (unsigned int *)(addr+8));
874+ err |= get_user(jmp, (unsigned int *)(addr+12));
875+ err |= get_user(resolver, (unsigned long *)(addr+16));
876+
877+ if (err)
878+ break;
879+
880+ if (br2 == 0xC3600000U &&
881+ ldq == 0xA77B000CU &&
882+ nop == 0x47FF041FU &&
883+ jmp == 0x6B7B0000U)
884+ {
885+ regs->r28 = regs->pc+4;
886+ regs->r27 = addr+16;
887+ regs->pc = resolver;
888+ return 3;
889+ }
890+ }
891+ } while (0);
892+#endif
893+
894+ return 1;
895+}
896+
897+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
898+{
899+ unsigned long i;
900+
901+ printk(KERN_ERR "PAX: bytes at PC: ");
902+ for (i = 0; i < 5; i++) {
903+ unsigned int c;
904+ if (get_user(c, (unsigned int *)pc+i))
905+ printk(KERN_CONT "???????? ");
906+ else
907+ printk(KERN_CONT "%08x ", c);
908+ }
909+ printk("\n");
910+}
911+#endif
912
913 /*
914 * This routine handles page faults. It determines the address,
915@@ -133,8 +251,29 @@ retry:
916 good_area:
917 si_code = SEGV_ACCERR;
918 if (cause < 0) {
919- if (!(vma->vm_flags & VM_EXEC))
920+ if (!(vma->vm_flags & VM_EXEC)) {
921+
922+#ifdef CONFIG_PAX_PAGEEXEC
923+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
924+ goto bad_area;
925+
926+ up_read(&mm->mmap_sem);
927+ switch (pax_handle_fetch_fault(regs)) {
928+
929+#ifdef CONFIG_PAX_EMUPLT
930+ case 2:
931+ case 3:
932+ return;
933+#endif
934+
935+ }
936+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
937+ do_group_exit(SIGKILL);
938+#else
939 goto bad_area;
940+#endif
941+
942+ }
943 } else if (!cause) {
944 /* Allow reads even for write-only mappings */
945 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
946diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
947index cf4c0c9..a87ecf5 100644
948--- a/arch/arm/Kconfig
949+++ b/arch/arm/Kconfig
950@@ -1735,7 +1735,7 @@ config ALIGNMENT_TRAP
951
952 config UACCESS_WITH_MEMCPY
953 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
954- depends on MMU
955+ depends on MMU && !PAX_MEMORY_UDEREF
956 default y if CPU_FEROCEON
957 help
958 Implement faster copy_to_user and clear_user methods for CPU
959@@ -1999,6 +1999,7 @@ config XIP_PHYS_ADDR
960 config KEXEC
961 bool "Kexec system call (EXPERIMENTAL)"
962 depends on (!SMP || PM_SLEEP_SMP)
963+ depends on !GRKERNSEC_KMEM
964 help
965 kexec is a system call that implements the ability to shutdown your
966 current kernel, and to start another kernel. It is like a reboot
967diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
968index e22c119..abe7041 100644
969--- a/arch/arm/include/asm/atomic.h
970+++ b/arch/arm/include/asm/atomic.h
971@@ -18,17 +18,41 @@
972 #include <asm/barrier.h>
973 #include <asm/cmpxchg.h>
974
975+#ifdef CONFIG_GENERIC_ATOMIC64
976+#include <asm-generic/atomic64.h>
977+#endif
978+
979 #define ATOMIC_INIT(i) { (i) }
980
981 #ifdef __KERNEL__
982
983+#ifdef CONFIG_THUMB2_KERNEL
984+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
985+#else
986+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
987+#endif
988+
989+#define _ASM_EXTABLE(from, to) \
990+" .pushsection __ex_table,\"a\"\n"\
991+" .align 3\n" \
992+" .long " #from ", " #to"\n" \
993+" .popsection"
994+
995 /*
996 * On ARM, ordinary assignment (str instruction) doesn't clear the local
997 * strex/ldrex monitor on some implementations. The reason we can use it for
998 * atomic_set() is the clrex or dummy strex done on every exception return.
999 */
1000 #define atomic_read(v) ACCESS_ONCE((v)->counter)
1001+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1002+{
1003+ return ACCESS_ONCE(v->counter);
1004+}
1005 #define atomic_set(v,i) (((v)->counter) = (i))
1006+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1007+{
1008+ v->counter = i;
1009+}
1010
1011 #if __LINUX_ARM_ARCH__ >= 6
1012
1013@@ -38,26 +62,50 @@
1014 * to ensure that the update happens.
1015 */
1016
1017-#define ATOMIC_OP(op, c_op, asm_op) \
1018-static inline void atomic_##op(int i, atomic_t *v) \
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+#define __OVERFLOW_POST \
1021+ " bvc 3f\n" \
1022+ "2: " REFCOUNT_TRAP_INSN "\n"\
1023+ "3:\n"
1024+#define __OVERFLOW_POST_RETURN \
1025+ " bvc 3f\n" \
1026+" mov %0, %1\n" \
1027+ "2: " REFCOUNT_TRAP_INSN "\n"\
1028+ "3:\n"
1029+#define __OVERFLOW_EXTABLE \
1030+ "4:\n" \
1031+ _ASM_EXTABLE(2b, 4b)
1032+#else
1033+#define __OVERFLOW_POST
1034+#define __OVERFLOW_POST_RETURN
1035+#define __OVERFLOW_EXTABLE
1036+#endif
1037+
1038+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1039+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1040 { \
1041 unsigned long tmp; \
1042 int result; \
1043 \
1044 prefetchw(&v->counter); \
1045- __asm__ __volatile__("@ atomic_" #op "\n" \
1046+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1047 "1: ldrex %0, [%3]\n" \
1048 " " #asm_op " %0, %0, %4\n" \
1049+ post_op \
1050 " strex %1, %0, [%3]\n" \
1051 " teq %1, #0\n" \
1052-" bne 1b" \
1053+" bne 1b\n" \
1054+ extable \
1055 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1056 : "r" (&v->counter), "Ir" (i) \
1057 : "cc"); \
1058 } \
1059
1060-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1061-static inline int atomic_##op##_return(int i, atomic_t *v) \
1062+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
1063+ __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1064+
1065+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1066+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1067 { \
1068 unsigned long tmp; \
1069 int result; \
1070@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1071 smp_mb(); \
1072 prefetchw(&v->counter); \
1073 \
1074- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1075+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1076 "1: ldrex %0, [%3]\n" \
1077 " " #asm_op " %0, %0, %4\n" \
1078+ post_op \
1079 " strex %1, %0, [%3]\n" \
1080 " teq %1, #0\n" \
1081-" bne 1b" \
1082+" bne 1b\n" \
1083+ extable \
1084 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1085 : "r" (&v->counter), "Ir" (i) \
1086 : "cc"); \
1087@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1088 return result; \
1089 }
1090
1091+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
1092+ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1093+
1094 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1095 {
1096 int oldval;
1097@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1098 __asm__ __volatile__ ("@ atomic_add_unless\n"
1099 "1: ldrex %0, [%4]\n"
1100 " teq %0, %5\n"
1101-" beq 2f\n"
1102-" add %1, %0, %6\n"
1103+" beq 4f\n"
1104+" adds %1, %0, %6\n"
1105+
1106+#ifdef CONFIG_PAX_REFCOUNT
1107+" bvc 3f\n"
1108+"2: " REFCOUNT_TRAP_INSN "\n"
1109+"3:\n"
1110+#endif
1111+
1112 " strex %2, %1, [%4]\n"
1113 " teq %2, #0\n"
1114 " bne 1b\n"
1115-"2:"
1116+"4:"
1117+
1118+#ifdef CONFIG_PAX_REFCOUNT
1119+ _ASM_EXTABLE(2b, 4b)
1120+#endif
1121+
1122 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1123 : "r" (&v->counter), "r" (u), "r" (a)
1124 : "cc");
1125@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1126 return oldval;
1127 }
1128
1129+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1130+{
1131+ unsigned long oldval, res;
1132+
1133+ smp_mb();
1134+
1135+ do {
1136+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1137+ "ldrex %1, [%3]\n"
1138+ "mov %0, #0\n"
1139+ "teq %1, %4\n"
1140+ "strexeq %0, %5, [%3]\n"
1141+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1142+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1143+ : "cc");
1144+ } while (res);
1145+
1146+ smp_mb();
1147+
1148+ return oldval;
1149+}
1150+
1151 #else /* ARM_ARCH_6 */
1152
1153 #ifdef CONFIG_SMP
1154 #error SMP not supported on pre-ARMv6 CPUs
1155 #endif
1156
1157-#define ATOMIC_OP(op, c_op, asm_op) \
1158-static inline void atomic_##op(int i, atomic_t *v) \
1159+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1160+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1161 { \
1162 unsigned long flags; \
1163 \
1164@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1165 raw_local_irq_restore(flags); \
1166 } \
1167
1168-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1169-static inline int atomic_##op##_return(int i, atomic_t *v) \
1170+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1171+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1172+
1173+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1174+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1175 { \
1176 unsigned long flags; \
1177 int val; \
1178@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1179 return val; \
1180 }
1181
1182+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1183+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1184+
1185 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1186 {
1187 int ret;
1188@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1189 return ret;
1190 }
1191
1192+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1193+{
1194+ return atomic_cmpxchg((atomic_t *)v, old, new);
1195+}
1196+
1197 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1198 {
1199 int c, old;
1200@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1201
1202 #undef ATOMIC_OPS
1203 #undef ATOMIC_OP_RETURN
1204+#undef __ATOMIC_OP_RETURN
1205 #undef ATOMIC_OP
1206+#undef __ATOMIC_OP
1207
1208 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1209+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1210+{
1211+ return xchg(&v->counter, new);
1212+}
1213
1214 #define atomic_inc(v) atomic_add(1, v)
1215+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1216+{
1217+ atomic_add_unchecked(1, v);
1218+}
1219 #define atomic_dec(v) atomic_sub(1, v)
1220+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1221+{
1222+ atomic_sub_unchecked(1, v);
1223+}
1224
1225 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1226+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1227+{
1228+ return atomic_add_return_unchecked(1, v) == 0;
1229+}
1230 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1231 #define atomic_inc_return(v) (atomic_add_return(1, v))
1232+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1233+{
1234+ return atomic_add_return_unchecked(1, v);
1235+}
1236 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1237 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1238
1239@@ -216,6 +336,14 @@ typedef struct {
1240 long long counter;
1241 } atomic64_t;
1242
1243+#ifdef CONFIG_PAX_REFCOUNT
1244+typedef struct {
1245+ long long counter;
1246+} atomic64_unchecked_t;
1247+#else
1248+typedef atomic64_t atomic64_unchecked_t;
1249+#endif
1250+
1251 #define ATOMIC64_INIT(i) { (i) }
1252
1253 #ifdef CONFIG_ARM_LPAE
1254@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1255 return result;
1256 }
1257
1258+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1259+{
1260+ long long result;
1261+
1262+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1263+" ldrd %0, %H0, [%1]"
1264+ : "=&r" (result)
1265+ : "r" (&v->counter), "Qo" (v->counter)
1266+ );
1267+
1268+ return result;
1269+}
1270+
1271 static inline void atomic64_set(atomic64_t *v, long long i)
1272 {
1273 __asm__ __volatile__("@ atomic64_set\n"
1274@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1275 : "r" (&v->counter), "r" (i)
1276 );
1277 }
1278+
1279+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1280+{
1281+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1282+" strd %2, %H2, [%1]"
1283+ : "=Qo" (v->counter)
1284+ : "r" (&v->counter), "r" (i)
1285+ );
1286+}
1287 #else
1288 static inline long long atomic64_read(const atomic64_t *v)
1289 {
1290@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1291 return result;
1292 }
1293
1294+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1295+{
1296+ long long result;
1297+
1298+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1299+" ldrexd %0, %H0, [%1]"
1300+ : "=&r" (result)
1301+ : "r" (&v->counter), "Qo" (v->counter)
1302+ );
1303+
1304+ return result;
1305+}
1306+
1307 static inline void atomic64_set(atomic64_t *v, long long i)
1308 {
1309 long long tmp;
1310@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1311 : "r" (&v->counter), "r" (i)
1312 : "cc");
1313 }
1314+
1315+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1316+{
1317+ long long tmp;
1318+
1319+ prefetchw(&v->counter);
1320+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1321+"1: ldrexd %0, %H0, [%2]\n"
1322+" strexd %0, %3, %H3, [%2]\n"
1323+" teq %0, #0\n"
1324+" bne 1b"
1325+ : "=&r" (tmp), "=Qo" (v->counter)
1326+ : "r" (&v->counter), "r" (i)
1327+ : "cc");
1328+}
1329 #endif
1330
1331-#define ATOMIC64_OP(op, op1, op2) \
1332-static inline void atomic64_##op(long long i, atomic64_t *v) \
1333+#undef __OVERFLOW_POST_RETURN
1334+#define __OVERFLOW_POST_RETURN \
1335+ " bvc 3f\n" \
1336+" mov %0, %1\n" \
1337+" mov %H0, %H1\n" \
1338+ "2: " REFCOUNT_TRAP_INSN "\n"\
1339+ "3:\n"
1340+
1341+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1342+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1343 { \
1344 long long result; \
1345 unsigned long tmp; \
1346 \
1347 prefetchw(&v->counter); \
1348- __asm__ __volatile__("@ atomic64_" #op "\n" \
1349+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1350 "1: ldrexd %0, %H0, [%3]\n" \
1351 " " #op1 " %Q0, %Q0, %Q4\n" \
1352 " " #op2 " %R0, %R0, %R4\n" \
1353+ post_op \
1354 " strexd %1, %0, %H0, [%3]\n" \
1355 " teq %1, #0\n" \
1356-" bne 1b" \
1357+" bne 1b\n" \
1358+ extable \
1359 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1360 : "r" (&v->counter), "r" (i) \
1361 : "cc"); \
1362 } \
1363
1364-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1365-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1366+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
1367+ __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1368+
1369+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1370+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1371 { \
1372 long long result; \
1373 unsigned long tmp; \
1374@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1375 smp_mb(); \
1376 prefetchw(&v->counter); \
1377 \
1378- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1379+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1380 "1: ldrexd %0, %H0, [%3]\n" \
1381 " " #op1 " %Q0, %Q0, %Q4\n" \
1382 " " #op2 " %R0, %R0, %R4\n" \
1383+ post_op \
1384 " strexd %1, %0, %H0, [%3]\n" \
1385 " teq %1, #0\n" \
1386-" bne 1b" \
1387+" bne 1b\n" \
1388+ extable \
1389 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1390 : "r" (&v->counter), "r" (i) \
1391 : "cc"); \
1392@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1393 return result; \
1394 }
1395
1396+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
1397+ __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1398+
1399 #define ATOMIC64_OPS(op, op1, op2) \
1400 ATOMIC64_OP(op, op1, op2) \
1401 ATOMIC64_OP_RETURN(op, op1, op2)
1402@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1403
1404 #undef ATOMIC64_OPS
1405 #undef ATOMIC64_OP_RETURN
1406+#undef __ATOMIC64_OP_RETURN
1407 #undef ATOMIC64_OP
1408+#undef __ATOMIC64_OP
1409+#undef __OVERFLOW_EXTABLE
1410+#undef __OVERFLOW_POST_RETURN
1411+#undef __OVERFLOW_POST
1412
1413 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1414 long long new)
1415@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1416 return oldval;
1417 }
1418
1419+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1420+ long long new)
1421+{
1422+ long long oldval;
1423+ unsigned long res;
1424+
1425+ smp_mb();
1426+
1427+ do {
1428+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1429+ "ldrexd %1, %H1, [%3]\n"
1430+ "mov %0, #0\n"
1431+ "teq %1, %4\n"
1432+ "teqeq %H1, %H4\n"
1433+ "strexdeq %0, %5, %H5, [%3]"
1434+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1435+ : "r" (&ptr->counter), "r" (old), "r" (new)
1436+ : "cc");
1437+ } while (res);
1438+
1439+ smp_mb();
1440+
1441+ return oldval;
1442+}
1443+
1444 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1445 {
1446 long long result;
1447@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1448 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1449 {
1450 long long result;
1451- unsigned long tmp;
1452+ u64 tmp;
1453
1454 smp_mb();
1455 prefetchw(&v->counter);
1456
1457 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1458-"1: ldrexd %0, %H0, [%3]\n"
1459-" subs %Q0, %Q0, #1\n"
1460-" sbc %R0, %R0, #0\n"
1461+"1: ldrexd %1, %H1, [%3]\n"
1462+" subs %Q0, %Q1, #1\n"
1463+" sbcs %R0, %R1, #0\n"
1464+
1465+#ifdef CONFIG_PAX_REFCOUNT
1466+" bvc 3f\n"
1467+" mov %Q0, %Q1\n"
1468+" mov %R0, %R1\n"
1469+"2: " REFCOUNT_TRAP_INSN "\n"
1470+"3:\n"
1471+#endif
1472+
1473 " teq %R0, #0\n"
1474-" bmi 2f\n"
1475+" bmi 4f\n"
1476 " strexd %1, %0, %H0, [%3]\n"
1477 " teq %1, #0\n"
1478 " bne 1b\n"
1479-"2:"
1480+"4:\n"
1481+
1482+#ifdef CONFIG_PAX_REFCOUNT
1483+ _ASM_EXTABLE(2b, 4b)
1484+#endif
1485+
1486 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1487 : "r" (&v->counter)
1488 : "cc");
1489@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1490 " teq %0, %5\n"
1491 " teqeq %H0, %H5\n"
1492 " moveq %1, #0\n"
1493-" beq 2f\n"
1494+" beq 4f\n"
1495 " adds %Q0, %Q0, %Q6\n"
1496-" adc %R0, %R0, %R6\n"
1497+" adcs %R0, %R0, %R6\n"
1498+
1499+#ifdef CONFIG_PAX_REFCOUNT
1500+" bvc 3f\n"
1501+"2: " REFCOUNT_TRAP_INSN "\n"
1502+"3:\n"
1503+#endif
1504+
1505 " strexd %2, %0, %H0, [%4]\n"
1506 " teq %2, #0\n"
1507 " bne 1b\n"
1508-"2:"
1509+"4:\n"
1510+
1511+#ifdef CONFIG_PAX_REFCOUNT
1512+ _ASM_EXTABLE(2b, 4b)
1513+#endif
1514+
1515 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1516 : "r" (&v->counter), "r" (u), "r" (a)
1517 : "cc");
1518@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1519
1520 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1521 #define atomic64_inc(v) atomic64_add(1LL, (v))
1522+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1523 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1524+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1525 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1526 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1527 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1528+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1529 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1530 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1531 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1532diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1533index d2f81e6..3c4dba5 100644
1534--- a/arch/arm/include/asm/barrier.h
1535+++ b/arch/arm/include/asm/barrier.h
1536@@ -67,7 +67,7 @@
1537 do { \
1538 compiletime_assert_atomic_type(*p); \
1539 smp_mb(); \
1540- ACCESS_ONCE(*p) = (v); \
1541+ ACCESS_ONCE_RW(*p) = (v); \
1542 } while (0)
1543
1544 #define smp_load_acquire(p) \
1545diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1546index 75fe66b..ba3dee4 100644
1547--- a/arch/arm/include/asm/cache.h
1548+++ b/arch/arm/include/asm/cache.h
1549@@ -4,8 +4,10 @@
1550 #ifndef __ASMARM_CACHE_H
1551 #define __ASMARM_CACHE_H
1552
1553+#include <linux/const.h>
1554+
1555 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1556-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1557+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1558
1559 /*
1560 * Memory returned by kmalloc() may be used for DMA, so we must make
1561@@ -24,5 +26,6 @@
1562 #endif
1563
1564 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1565+#define __read_only __attribute__ ((__section__(".data..read_only")))
1566
1567 #endif
1568diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1569index 2d46862..a35415b 100644
1570--- a/arch/arm/include/asm/cacheflush.h
1571+++ b/arch/arm/include/asm/cacheflush.h
1572@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1573 void (*dma_unmap_area)(const void *, size_t, int);
1574
1575 void (*dma_flush_range)(const void *, const void *);
1576-};
1577+} __no_const;
1578
1579 /*
1580 * Select the calling method
1581diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1582index 5233151..87a71fa 100644
1583--- a/arch/arm/include/asm/checksum.h
1584+++ b/arch/arm/include/asm/checksum.h
1585@@ -37,7 +37,19 @@ __wsum
1586 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1587
1588 __wsum
1589-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1590+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1591+
1592+static inline __wsum
1593+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1594+{
1595+ __wsum ret;
1596+ pax_open_userland();
1597+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1598+ pax_close_userland();
1599+ return ret;
1600+}
1601+
1602+
1603
1604 /*
1605 * Fold a partial checksum without adding pseudo headers
1606diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1607index abb2c37..96db950 100644
1608--- a/arch/arm/include/asm/cmpxchg.h
1609+++ b/arch/arm/include/asm/cmpxchg.h
1610@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1611
1612 #define xchg(ptr,x) \
1613 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1614+#define xchg_unchecked(ptr,x) \
1615+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1616
1617 #include <asm-generic/cmpxchg-local.h>
1618
1619diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1620index 6ddbe44..b5e38b1a 100644
1621--- a/arch/arm/include/asm/domain.h
1622+++ b/arch/arm/include/asm/domain.h
1623@@ -48,18 +48,37 @@
1624 * Domain types
1625 */
1626 #define DOMAIN_NOACCESS 0
1627-#define DOMAIN_CLIENT 1
1628 #ifdef CONFIG_CPU_USE_DOMAINS
1629+#define DOMAIN_USERCLIENT 1
1630+#define DOMAIN_KERNELCLIENT 1
1631 #define DOMAIN_MANAGER 3
1632+#define DOMAIN_VECTORS DOMAIN_USER
1633 #else
1634+
1635+#ifdef CONFIG_PAX_KERNEXEC
1636 #define DOMAIN_MANAGER 1
1637+#define DOMAIN_KERNEXEC 3
1638+#else
1639+#define DOMAIN_MANAGER 1
1640+#endif
1641+
1642+#ifdef CONFIG_PAX_MEMORY_UDEREF
1643+#define DOMAIN_USERCLIENT 0
1644+#define DOMAIN_UDEREF 1
1645+#define DOMAIN_VECTORS DOMAIN_KERNEL
1646+#else
1647+#define DOMAIN_USERCLIENT 1
1648+#define DOMAIN_VECTORS DOMAIN_USER
1649+#endif
1650+#define DOMAIN_KERNELCLIENT 1
1651+
1652 #endif
1653
1654 #define domain_val(dom,type) ((type) << (2*(dom)))
1655
1656 #ifndef __ASSEMBLY__
1657
1658-#ifdef CONFIG_CPU_USE_DOMAINS
1659+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1660 static inline void set_domain(unsigned val)
1661 {
1662 asm volatile(
1663@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1664 isb();
1665 }
1666
1667-#define modify_domain(dom,type) \
1668- do { \
1669- struct thread_info *thread = current_thread_info(); \
1670- unsigned int domain = thread->cpu_domain; \
1671- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1672- thread->cpu_domain = domain | domain_val(dom, type); \
1673- set_domain(thread->cpu_domain); \
1674- } while (0)
1675-
1676+extern void modify_domain(unsigned int dom, unsigned int type);
1677 #else
1678 static inline void set_domain(unsigned val) { }
1679 static inline void modify_domain(unsigned dom, unsigned type) { }
1680diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1681index 674d03f..9a0bac0 100644
1682--- a/arch/arm/include/asm/elf.h
1683+++ b/arch/arm/include/asm/elf.h
1684@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1685 the loader. We need to make sure that it is out of the way of the program
1686 that it will "exec", and that there is sufficient room for the brk. */
1687
1688-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1689+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1690+
1691+#ifdef CONFIG_PAX_ASLR
1692+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1693+
1694+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1695+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1696+#endif
1697
1698 /* When the program starts, a1 contains a pointer to a function to be
1699 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1700@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1701 extern void elf_set_personality(const struct elf32_hdr *);
1702 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1703
1704-struct mm_struct;
1705-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1706-#define arch_randomize_brk arch_randomize_brk
1707-
1708 #ifdef CONFIG_MMU
1709 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1710 struct linux_binprm;
1711diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1712index de53547..52b9a28 100644
1713--- a/arch/arm/include/asm/fncpy.h
1714+++ b/arch/arm/include/asm/fncpy.h
1715@@ -81,7 +81,9 @@
1716 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1717 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1718 \
1719+ pax_open_kernel(); \
1720 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1721+ pax_close_kernel(); \
1722 flush_icache_range((unsigned long)(dest_buf), \
1723 (unsigned long)(dest_buf) + (size)); \
1724 \
1725diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1726index 53e69da..3fdc896 100644
1727--- a/arch/arm/include/asm/futex.h
1728+++ b/arch/arm/include/asm/futex.h
1729@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1730 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1731 return -EFAULT;
1732
1733+ pax_open_userland();
1734+
1735 smp_mb();
1736 /* Prefetching cannot fault */
1737 prefetchw(uaddr);
1738@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1739 : "cc", "memory");
1740 smp_mb();
1741
1742+ pax_close_userland();
1743+
1744 *uval = val;
1745 return ret;
1746 }
1747@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1748 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1749 return -EFAULT;
1750
1751+ pax_open_userland();
1752+
1753 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1754 "1: " TUSER(ldr) " %1, [%4]\n"
1755 " teq %1, %2\n"
1756@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1757 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1758 : "cc", "memory");
1759
1760+ pax_close_userland();
1761+
1762 *uval = val;
1763 return ret;
1764 }
1765@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1766 return -EFAULT;
1767
1768 pagefault_disable(); /* implies preempt_disable() */
1769+ pax_open_userland();
1770
1771 switch (op) {
1772 case FUTEX_OP_SET:
1773@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1774 ret = -ENOSYS;
1775 }
1776
1777+ pax_close_userland();
1778 pagefault_enable(); /* subsumes preempt_enable() */
1779
1780 if (!ret) {
1781diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1782index 83eb2f7..ed77159 100644
1783--- a/arch/arm/include/asm/kmap_types.h
1784+++ b/arch/arm/include/asm/kmap_types.h
1785@@ -4,6 +4,6 @@
1786 /*
1787 * This is the "bare minimum". AIO seems to require this.
1788 */
1789-#define KM_TYPE_NR 16
1790+#define KM_TYPE_NR 17
1791
1792 #endif
1793diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1794index 9e614a1..3302cca 100644
1795--- a/arch/arm/include/asm/mach/dma.h
1796+++ b/arch/arm/include/asm/mach/dma.h
1797@@ -22,7 +22,7 @@ struct dma_ops {
1798 int (*residue)(unsigned int, dma_t *); /* optional */
1799 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1800 const char *type;
1801-};
1802+} __do_const;
1803
1804 struct dma_struct {
1805 void *addr; /* single DMA address */
1806diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1807index f98c7f3..e5c626d 100644
1808--- a/arch/arm/include/asm/mach/map.h
1809+++ b/arch/arm/include/asm/mach/map.h
1810@@ -23,17 +23,19 @@ struct map_desc {
1811
1812 /* types 0-3 are defined in asm/io.h */
1813 enum {
1814- MT_UNCACHED = 4,
1815- MT_CACHECLEAN,
1816- MT_MINICLEAN,
1817+ MT_UNCACHED_RW = 4,
1818+ MT_CACHECLEAN_RO,
1819+ MT_MINICLEAN_RO,
1820 MT_LOW_VECTORS,
1821 MT_HIGH_VECTORS,
1822- MT_MEMORY_RWX,
1823+ __MT_MEMORY_RWX,
1824 MT_MEMORY_RW,
1825- MT_ROM,
1826- MT_MEMORY_RWX_NONCACHED,
1827+ MT_MEMORY_RX,
1828+ MT_ROM_RX,
1829+ MT_MEMORY_RW_NONCACHED,
1830+ MT_MEMORY_RX_NONCACHED,
1831 MT_MEMORY_RW_DTCM,
1832- MT_MEMORY_RWX_ITCM,
1833+ MT_MEMORY_RX_ITCM,
1834 MT_MEMORY_RW_SO,
1835 MT_MEMORY_DMA_READY,
1836 };
1837diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1838index 563b92f..689d58e 100644
1839--- a/arch/arm/include/asm/outercache.h
1840+++ b/arch/arm/include/asm/outercache.h
1841@@ -39,7 +39,7 @@ struct outer_cache_fns {
1842 /* This is an ARM L2C thing */
1843 void (*write_sec)(unsigned long, unsigned);
1844 void (*configure)(const struct l2x0_regs *);
1845-};
1846+} __no_const;
1847
1848 extern struct outer_cache_fns outer_cache;
1849
1850diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1851index 4355f0e..cd9168e 100644
1852--- a/arch/arm/include/asm/page.h
1853+++ b/arch/arm/include/asm/page.h
1854@@ -23,6 +23,7 @@
1855
1856 #else
1857
1858+#include <linux/compiler.h>
1859 #include <asm/glue.h>
1860
1861 /*
1862@@ -114,7 +115,7 @@ struct cpu_user_fns {
1863 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1864 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1865 unsigned long vaddr, struct vm_area_struct *vma);
1866-};
1867+} __no_const;
1868
1869 #ifdef MULTI_USER
1870 extern struct cpu_user_fns cpu_user;
1871diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1872index 19cfab5..3f5c7e9 100644
1873--- a/arch/arm/include/asm/pgalloc.h
1874+++ b/arch/arm/include/asm/pgalloc.h
1875@@ -17,6 +17,7 @@
1876 #include <asm/processor.h>
1877 #include <asm/cacheflush.h>
1878 #include <asm/tlbflush.h>
1879+#include <asm/system_info.h>
1880
1881 #define check_pgt_cache() do { } while (0)
1882
1883@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1884 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1885 }
1886
1887+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1888+{
1889+ pud_populate(mm, pud, pmd);
1890+}
1891+
1892 #else /* !CONFIG_ARM_LPAE */
1893
1894 /*
1895@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1896 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1897 #define pmd_free(mm, pmd) do { } while (0)
1898 #define pud_populate(mm,pmd,pte) BUG()
1899+#define pud_populate_kernel(mm,pmd,pte) BUG()
1900
1901 #endif /* CONFIG_ARM_LPAE */
1902
1903@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1904 __free_page(pte);
1905 }
1906
1907+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1908+{
1909+#ifdef CONFIG_ARM_LPAE
1910+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1911+#else
1912+ if (addr & SECTION_SIZE)
1913+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1914+ else
1915+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1916+#endif
1917+ flush_pmd_entry(pmdp);
1918+}
1919+
1920 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1921 pmdval_t prot)
1922 {
1923diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1924index 5e68278..1869bae 100644
1925--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1926+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1927@@ -27,7 +27,7 @@
1928 /*
1929 * - section
1930 */
1931-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1932+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1933 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1934 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1935 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1936@@ -39,6 +39,7 @@
1937 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1938 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1939 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1940+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1941
1942 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1943 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1944@@ -68,6 +69,7 @@
1945 * - extended small page/tiny page
1946 */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1948+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1949 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1950 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1951 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1952diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1953index bfd662e..f6cbb02 100644
1954--- a/arch/arm/include/asm/pgtable-2level.h
1955+++ b/arch/arm/include/asm/pgtable-2level.h
1956@@ -127,6 +127,9 @@
1957 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1958 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1959
1960+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1961+#define L_PTE_PXN (_AT(pteval_t, 0))
1962+
1963 /*
1964 * These are the memory types, defined to be compatible with
1965 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1966diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1967index a745a2a..481350a 100644
1968--- a/arch/arm/include/asm/pgtable-3level.h
1969+++ b/arch/arm/include/asm/pgtable-3level.h
1970@@ -80,6 +80,7 @@
1971 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1972 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1973 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1974+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1975 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1976 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1977 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1978@@ -91,10 +92,12 @@
1979 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1980 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1981 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1982+#define PMD_SECT_RDONLY PMD_SECT_AP2
1983
1984 /*
1985 * To be used in assembly code with the upper page attributes.
1986 */
1987+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1988 #define L_PTE_XN_HIGH (1 << (54 - 32))
1989 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1990
1991diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1992index f403541..b10df68 100644
1993--- a/arch/arm/include/asm/pgtable.h
1994+++ b/arch/arm/include/asm/pgtable.h
1995@@ -33,6 +33,9 @@
1996 #include <asm/pgtable-2level.h>
1997 #endif
1998
1999+#define ktla_ktva(addr) (addr)
2000+#define ktva_ktla(addr) (addr)
2001+
2002 /*
2003 * Just any arbitrary offset to the start of the vmalloc VM area: the
2004 * current 8MB value just means that there will be a 8MB "hole" after the
2005@@ -48,6 +51,9 @@
2006 #define LIBRARY_TEXT_START 0x0c000000
2007
2008 #ifndef __ASSEMBLY__
2009+extern pteval_t __supported_pte_mask;
2010+extern pmdval_t __supported_pmd_mask;
2011+
2012 extern void __pte_error(const char *file, int line, pte_t);
2013 extern void __pmd_error(const char *file, int line, pmd_t);
2014 extern void __pgd_error(const char *file, int line, pgd_t);
2015@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2016 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2017 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2018
2019+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2020+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2021+
2022+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2023+#include <asm/domain.h>
2024+#include <linux/thread_info.h>
2025+#include <linux/preempt.h>
2026+
2027+static inline int test_domain(int domain, int domaintype)
2028+{
2029+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2030+}
2031+#endif
2032+
2033+#ifdef CONFIG_PAX_KERNEXEC
2034+static inline unsigned long pax_open_kernel(void) {
2035+#ifdef CONFIG_ARM_LPAE
2036+ /* TODO */
2037+#else
2038+ preempt_disable();
2039+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2040+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2041+#endif
2042+ return 0;
2043+}
2044+
2045+static inline unsigned long pax_close_kernel(void) {
2046+#ifdef CONFIG_ARM_LPAE
2047+ /* TODO */
2048+#else
2049+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2050+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2051+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2052+ preempt_enable_no_resched();
2053+#endif
2054+ return 0;
2055+}
2056+#else
2057+static inline unsigned long pax_open_kernel(void) { return 0; }
2058+static inline unsigned long pax_close_kernel(void) { return 0; }
2059+#endif
2060+
2061 /*
2062 * This is the lowest virtual address we can permit any user space
2063 * mapping to be mapped at. This is particularly important for
2064@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2065 /*
2066 * The pgprot_* and protection_map entries will be fixed up in runtime
2067 * to include the cachable and bufferable bits based on memory policy,
2068- * as well as any architecture dependent bits like global/ASID and SMP
2069- * shared mapping bits.
2070+ * as well as any architecture dependent bits like global/ASID, PXN,
2071+ * and SMP shared mapping bits.
2072 */
2073 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2074
2075@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2076 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2077 {
2078 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2079- L_PTE_NONE | L_PTE_VALID;
2080+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2081 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2082 return pte;
2083 }
2084diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2085index c25ef3e..735f14b 100644
2086--- a/arch/arm/include/asm/psci.h
2087+++ b/arch/arm/include/asm/psci.h
2088@@ -32,7 +32,7 @@ struct psci_operations {
2089 int (*affinity_info)(unsigned long target_affinity,
2090 unsigned long lowest_affinity_level);
2091 int (*migrate_info_type)(void);
2092-};
2093+} __no_const;
2094
2095 extern struct psci_operations psci_ops;
2096 extern struct smp_operations psci_smp_ops;
2097diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2098index 18f5a55..5072a40 100644
2099--- a/arch/arm/include/asm/smp.h
2100+++ b/arch/arm/include/asm/smp.h
2101@@ -107,7 +107,7 @@ struct smp_operations {
2102 int (*cpu_disable)(unsigned int cpu);
2103 #endif
2104 #endif
2105-};
2106+} __no_const;
2107
2108 struct of_cpu_method {
2109 const char *method;
2110diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2111index 72812a1..335f4f3 100644
2112--- a/arch/arm/include/asm/thread_info.h
2113+++ b/arch/arm/include/asm/thread_info.h
2114@@ -77,9 +77,9 @@ struct thread_info {
2115 .flags = 0, \
2116 .preempt_count = INIT_PREEMPT_COUNT, \
2117 .addr_limit = KERNEL_DS, \
2118- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2119- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2120- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2121+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2122+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2123+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2124 }
2125
2126 #define init_thread_info (init_thread_union.thread_info)
2127@@ -155,7 +155,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2128 #define TIF_SYSCALL_AUDIT 9
2129 #define TIF_SYSCALL_TRACEPOINT 10
2130 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2131-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2132+/* within 8 bits of TIF_SYSCALL_TRACE
2133+ * to meet flexible second operand requirements
2134+ */
2135+#define TIF_GRSEC_SETXID 12
2136+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2137 #define TIF_USING_IWMMXT 17
2138 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2139 #define TIF_RESTORE_SIGMASK 20
2140@@ -169,10 +173,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2141 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2142 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2143 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2144+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2145
2146 /* Checks for any syscall work in entry-common.S */
2147 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2148- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2149+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2150
2151 /*
2152 * Change these and you break ASM code in entry-common.S
2153diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2154index 5f833f7..76e6644 100644
2155--- a/arch/arm/include/asm/tls.h
2156+++ b/arch/arm/include/asm/tls.h
2157@@ -3,6 +3,7 @@
2158
2159 #include <linux/compiler.h>
2160 #include <asm/thread_info.h>
2161+#include <asm/pgtable.h>
2162
2163 #ifdef __ASSEMBLY__
2164 #include <asm/asm-offsets.h>
2165@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2166 * at 0xffff0fe0 must be used instead. (see
2167 * entry-armv.S for details)
2168 */
2169+ pax_open_kernel();
2170 *((unsigned int *)0xffff0ff0) = val;
2171+ pax_close_kernel();
2172 #endif
2173 }
2174
2175diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2176index ce0786e..a80c264 100644
2177--- a/arch/arm/include/asm/uaccess.h
2178+++ b/arch/arm/include/asm/uaccess.h
2179@@ -18,6 +18,7 @@
2180 #include <asm/domain.h>
2181 #include <asm/unified.h>
2182 #include <asm/compiler.h>
2183+#include <asm/pgtable.h>
2184
2185 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2186 #include <asm-generic/uaccess-unaligned.h>
2187@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2188 static inline void set_fs(mm_segment_t fs)
2189 {
2190 current_thread_info()->addr_limit = fs;
2191- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2192+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2193 }
2194
2195 #define segment_eq(a, b) ((a) == (b))
2196
2197+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2198+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2199+
2200+static inline void pax_open_userland(void)
2201+{
2202+
2203+#ifdef CONFIG_PAX_MEMORY_UDEREF
2204+ if (segment_eq(get_fs(), USER_DS)) {
2205+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2206+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2207+ }
2208+#endif
2209+
2210+}
2211+
2212+static inline void pax_close_userland(void)
2213+{
2214+
2215+#ifdef CONFIG_PAX_MEMORY_UDEREF
2216+ if (segment_eq(get_fs(), USER_DS)) {
2217+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2218+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2219+ }
2220+#endif
2221+
2222+}
2223+
2224 #define __addr_ok(addr) ({ \
2225 unsigned long flag; \
2226 __asm__("cmp %2, %0; movlo %0, #0" \
2227@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2228
2229 #define get_user(x, p) \
2230 ({ \
2231+ int __e; \
2232 might_fault(); \
2233- __get_user_check(x, p); \
2234+ pax_open_userland(); \
2235+ __e = __get_user_check((x), (p)); \
2236+ pax_close_userland(); \
2237+ __e; \
2238 })
2239
2240 extern int __put_user_1(void *, unsigned int);
2241@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2242
2243 #define put_user(x, p) \
2244 ({ \
2245+ int __e; \
2246 might_fault(); \
2247- __put_user_check(x, p); \
2248+ pax_open_userland(); \
2249+ __e = __put_user_check((x), (p)); \
2250+ pax_close_userland(); \
2251+ __e; \
2252 })
2253
2254 #else /* CONFIG_MMU */
2255@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2256
2257 #endif /* CONFIG_MMU */
2258
2259+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
2260 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
2261
2262 #define user_addr_max() \
2263@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2264 #define __get_user(x, ptr) \
2265 ({ \
2266 long __gu_err = 0; \
2267+ pax_open_userland(); \
2268 __get_user_err((x), (ptr), __gu_err); \
2269+ pax_close_userland(); \
2270 __gu_err; \
2271 })
2272
2273 #define __get_user_error(x, ptr, err) \
2274 ({ \
2275+ pax_open_userland(); \
2276 __get_user_err((x), (ptr), err); \
2277+ pax_close_userland(); \
2278 (void) 0; \
2279 })
2280
2281@@ -368,13 +409,17 @@ do { \
2282 #define __put_user(x, ptr) \
2283 ({ \
2284 long __pu_err = 0; \
2285+ pax_open_userland(); \
2286 __put_user_err((x), (ptr), __pu_err); \
2287+ pax_close_userland(); \
2288 __pu_err; \
2289 })
2290
2291 #define __put_user_error(x, ptr, err) \
2292 ({ \
2293+ pax_open_userland(); \
2294 __put_user_err((x), (ptr), err); \
2295+ pax_close_userland(); \
2296 (void) 0; \
2297 })
2298
2299@@ -474,11 +519,44 @@ do { \
2300
2301
2302 #ifdef CONFIG_MMU
2303-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2304-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2305+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2306+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2307+
2308+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2309+{
2310+ unsigned long ret;
2311+
2312+ check_object_size(to, n, false);
2313+ pax_open_userland();
2314+ ret = ___copy_from_user(to, from, n);
2315+ pax_close_userland();
2316+ return ret;
2317+}
2318+
2319+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2320+{
2321+ unsigned long ret;
2322+
2323+ check_object_size(from, n, true);
2324+ pax_open_userland();
2325+ ret = ___copy_to_user(to, from, n);
2326+ pax_close_userland();
2327+ return ret;
2328+}
2329+
2330 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2331-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2332+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2333 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2334+
2335+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2336+{
2337+ unsigned long ret;
2338+ pax_open_userland();
2339+ ret = ___clear_user(addr, n);
2340+ pax_close_userland();
2341+ return ret;
2342+}
2343+
2344 #else
2345 #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
2346 #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
2347@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2348
2349 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2350 {
2351+ if ((long)n < 0)
2352+ return n;
2353+
2354 if (access_ok(VERIFY_READ, from, n))
2355 n = __copy_from_user(to, from, n);
2356 else /* security hole - plug it */
2357@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2358
2359 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2360 {
2361+ if ((long)n < 0)
2362+ return n;
2363+
2364 if (access_ok(VERIFY_WRITE, to, n))
2365 n = __copy_to_user(to, from, n);
2366 return n;
2367diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2368index 5af0ed1..cea83883 100644
2369--- a/arch/arm/include/uapi/asm/ptrace.h
2370+++ b/arch/arm/include/uapi/asm/ptrace.h
2371@@ -92,7 +92,7 @@
2372 * ARMv7 groups of PSR bits
2373 */
2374 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2375-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2376+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2377 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2378 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2379
2380diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2381index a88671c..1cc895e 100644
2382--- a/arch/arm/kernel/armksyms.c
2383+++ b/arch/arm/kernel/armksyms.c
2384@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2385
2386 /* networking */
2387 EXPORT_SYMBOL(csum_partial);
2388-EXPORT_SYMBOL(csum_partial_copy_from_user);
2389+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2390 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2391 EXPORT_SYMBOL(__csum_ipv6_magic);
2392
2393@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2394 #ifdef CONFIG_MMU
2395 EXPORT_SYMBOL(copy_page);
2396
2397-EXPORT_SYMBOL(__copy_from_user);
2398-EXPORT_SYMBOL(__copy_to_user);
2399-EXPORT_SYMBOL(__clear_user);
2400+EXPORT_SYMBOL(___copy_from_user);
2401+EXPORT_SYMBOL(___copy_to_user);
2402+EXPORT_SYMBOL(___clear_user);
2403
2404 EXPORT_SYMBOL(__get_user_1);
2405 EXPORT_SYMBOL(__get_user_2);
2406diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2407index 672b219..4aa120a 100644
2408--- a/arch/arm/kernel/entry-armv.S
2409+++ b/arch/arm/kernel/entry-armv.S
2410@@ -48,6 +48,87 @@
2411 9997:
2412 .endm
2413
2414+ .macro pax_enter_kernel
2415+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2416+ @ make aligned space for saved DACR
2417+ sub sp, sp, #8
2418+ @ save regs
2419+ stmdb sp!, {r1, r2}
2420+ @ read DACR from cpu_domain into r1
2421+ mov r2, sp
2422+ @ assume 8K pages, since we have to split the immediate in two
2423+ bic r2, r2, #(0x1fc0)
2424+ bic r2, r2, #(0x3f)
2425+ ldr r1, [r2, #TI_CPU_DOMAIN]
2426+ @ store old DACR on stack
2427+ str r1, [sp, #8]
2428+#ifdef CONFIG_PAX_KERNEXEC
2429+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2430+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2431+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2432+#endif
2433+#ifdef CONFIG_PAX_MEMORY_UDEREF
2434+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2435+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2436+#endif
2437+ @ write r1 to current_thread_info()->cpu_domain
2438+ str r1, [r2, #TI_CPU_DOMAIN]
2439+ @ write r1 to DACR
2440+ mcr p15, 0, r1, c3, c0, 0
2441+ @ instruction sync
2442+ instr_sync
2443+ @ restore regs
2444+ ldmia sp!, {r1, r2}
2445+#endif
2446+ .endm
2447+
2448+ .macro pax_open_userland
2449+#ifdef CONFIG_PAX_MEMORY_UDEREF
2450+ @ save regs
2451+ stmdb sp!, {r0, r1}
2452+ @ read DACR from cpu_domain into r1
2453+ mov r0, sp
2454+ @ assume 8K pages, since we have to split the immediate in two
2455+ bic r0, r0, #(0x1fc0)
2456+ bic r0, r0, #(0x3f)
2457+ ldr r1, [r0, #TI_CPU_DOMAIN]
2458+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2459+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2460+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2461+ @ write r1 to current_thread_info()->cpu_domain
2462+ str r1, [r0, #TI_CPU_DOMAIN]
2463+ @ write r1 to DACR
2464+ mcr p15, 0, r1, c3, c0, 0
2465+ @ instruction sync
2466+ instr_sync
2467+ @ restore regs
2468+ ldmia sp!, {r0, r1}
2469+#endif
2470+ .endm
2471+
2472+ .macro pax_close_userland
2473+#ifdef CONFIG_PAX_MEMORY_UDEREF
2474+ @ save regs
2475+ stmdb sp!, {r0, r1}
2476+ @ read DACR from cpu_domain into r1
2477+ mov r0, sp
2478+ @ assume 8K pages, since we have to split the immediate in two
2479+ bic r0, r0, #(0x1fc0)
2480+ bic r0, r0, #(0x3f)
2481+ ldr r1, [r0, #TI_CPU_DOMAIN]
2482+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2483+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2484+ @ write r1 to current_thread_info()->cpu_domain
2485+ str r1, [r0, #TI_CPU_DOMAIN]
2486+ @ write r1 to DACR
2487+ mcr p15, 0, r1, c3, c0, 0
2488+ @ instruction sync
2489+ instr_sync
2490+ @ restore regs
2491+ ldmia sp!, {r0, r1}
2492+#endif
2493+ .endm
2494+
2495 .macro pabt_helper
2496 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2497 #ifdef MULTI_PABORT
2498@@ -90,11 +171,15 @@
2499 * Invalid mode handlers
2500 */
2501 .macro inv_entry, reason
2502+
2503+ pax_enter_kernel
2504+
2505 sub sp, sp, #S_FRAME_SIZE
2506 ARM( stmib sp, {r1 - lr} )
2507 THUMB( stmia sp, {r0 - r12} )
2508 THUMB( str sp, [sp, #S_SP] )
2509 THUMB( str lr, [sp, #S_LR] )
2510+
2511 mov r1, #\reason
2512 .endm
2513
2514@@ -150,7 +235,11 @@ ENDPROC(__und_invalid)
2515 .macro svc_entry, stack_hole=0, trace=1
2516 UNWIND(.fnstart )
2517 UNWIND(.save {r0 - pc} )
2518+
2519+ pax_enter_kernel
2520+
2521 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2522+
2523 #ifdef CONFIG_THUMB2_KERNEL
2524 SPFIX( str r0, [sp] ) @ temporarily saved
2525 SPFIX( mov r0, sp )
2526@@ -165,7 +254,12 @@ ENDPROC(__und_invalid)
2527 ldmia r0, {r3 - r5}
2528 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2529 mov r6, #-1 @ "" "" "" ""
2530+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2531+ @ offset sp by 8 as done in pax_enter_kernel
2532+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2533+#else
2534 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2535+#endif
2536 SPFIX( addeq r2, r2, #4 )
2537 str r3, [sp, #-4]! @ save the "real" r0 copied
2538 @ from the exception stack
2539@@ -369,6 +463,9 @@ ENDPROC(__fiq_abt)
2540 .macro usr_entry, trace=1
2541 UNWIND(.fnstart )
2542 UNWIND(.cantunwind ) @ don't unwind the user space
2543+
2544+ pax_enter_kernel_user
2545+
2546 sub sp, sp, #S_FRAME_SIZE
2547 ARM( stmib sp, {r1 - r12} )
2548 THUMB( stmia sp, {r0 - r12} )
2549@@ -479,7 +576,9 @@ __und_usr:
2550 tst r3, #PSR_T_BIT @ Thumb mode?
2551 bne __und_usr_thumb
2552 sub r4, r2, #4 @ ARM instr at LR - 4
2553+ pax_open_userland
2554 1: ldrt r0, [r4]
2555+ pax_close_userland
2556 ARM_BE8(rev r0, r0) @ little endian instruction
2557
2558 @ r0 = 32-bit ARM instruction which caused the exception
2559@@ -513,11 +612,15 @@ __und_usr_thumb:
2560 */
2561 .arch armv6t2
2562 #endif
2563+ pax_open_userland
2564 2: ldrht r5, [r4]
2565+ pax_close_userland
2566 ARM_BE8(rev16 r5, r5) @ little endian instruction
2567 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2568 blo __und_usr_fault_16 @ 16bit undefined instruction
2569+ pax_open_userland
2570 3: ldrht r0, [r2]
2571+ pax_close_userland
2572 ARM_BE8(rev16 r0, r0) @ little endian instruction
2573 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2574 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2575@@ -547,7 +650,8 @@ ENDPROC(__und_usr)
2576 */
2577 .pushsection .fixup, "ax"
2578 .align 2
2579-4: str r4, [sp, #S_PC] @ retry current instruction
2580+4: pax_close_userland
2581+ str r4, [sp, #S_PC] @ retry current instruction
2582 ret r9
2583 .popsection
2584 .pushsection __ex_table,"a"
2585@@ -767,7 +871,7 @@ ENTRY(__switch_to)
2586 THUMB( str lr, [ip], #4 )
2587 ldr r4, [r2, #TI_TP_VALUE]
2588 ldr r5, [r2, #TI_TP_VALUE + 4]
2589-#ifdef CONFIG_CPU_USE_DOMAINS
2590+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2591 ldr r6, [r2, #TI_CPU_DOMAIN]
2592 #endif
2593 switch_tls r1, r4, r5, r3, r7
2594@@ -776,7 +880,7 @@ ENTRY(__switch_to)
2595 ldr r8, =__stack_chk_guard
2596 ldr r7, [r7, #TSK_STACK_CANARY]
2597 #endif
2598-#ifdef CONFIG_CPU_USE_DOMAINS
2599+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2600 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2601 #endif
2602 mov r5, r0
2603diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2604index f8ccc21..83d192f 100644
2605--- a/arch/arm/kernel/entry-common.S
2606+++ b/arch/arm/kernel/entry-common.S
2607@@ -11,18 +11,46 @@
2608 #include <asm/assembler.h>
2609 #include <asm/unistd.h>
2610 #include <asm/ftrace.h>
2611+#include <asm/domain.h>
2612 #include <asm/unwind.h>
2613
2614+#include "entry-header.S"
2615+
2616 #ifdef CONFIG_NEED_RET_TO_USER
2617 #include <mach/entry-macro.S>
2618 #else
2619 .macro arch_ret_to_user, tmp1, tmp2
2620+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2621+ @ save regs
2622+ stmdb sp!, {r1, r2}
2623+ @ read DACR from cpu_domain into r1
2624+ mov r2, sp
2625+ @ assume 8K pages, since we have to split the immediate in two
2626+ bic r2, r2, #(0x1fc0)
2627+ bic r2, r2, #(0x3f)
2628+ ldr r1, [r2, #TI_CPU_DOMAIN]
2629+#ifdef CONFIG_PAX_KERNEXEC
2630+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2631+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2632+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2633+#endif
2634+#ifdef CONFIG_PAX_MEMORY_UDEREF
2635+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2636+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2637+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2638+#endif
2639+ @ write r1 to current_thread_info()->cpu_domain
2640+ str r1, [r2, #TI_CPU_DOMAIN]
2641+ @ write r1 to DACR
2642+ mcr p15, 0, r1, c3, c0, 0
2643+ @ instruction sync
2644+ instr_sync
2645+ @ restore regs
2646+ ldmia sp!, {r1, r2}
2647+#endif
2648 .endm
2649 #endif
2650
2651-#include "entry-header.S"
2652-
2653-
2654 .align 5
2655 /*
2656 * This is the fast syscall return path. We do as little as
2657@@ -171,6 +199,12 @@ ENTRY(vector_swi)
2658 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2659 #endif
2660
2661+ /*
2662+ * do this here to avoid a performance hit of wrapping the code above
2663+ * that directly dereferences userland to parse the SWI instruction
2664+ */
2665+ pax_enter_kernel_user
2666+
2667 adr tbl, sys_call_table @ load syscall table pointer
2668
2669 #if defined(CONFIG_OABI_COMPAT)
2670diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2671index 1a0045a..9b4f34d 100644
2672--- a/arch/arm/kernel/entry-header.S
2673+++ b/arch/arm/kernel/entry-header.S
2674@@ -196,6 +196,60 @@
2675 msr cpsr_c, \rtemp @ switch back to the SVC mode
2676 .endm
2677
2678+ .macro pax_enter_kernel_user
2679+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2680+ @ save regs
2681+ stmdb sp!, {r0, r1}
2682+ @ read DACR from cpu_domain into r1
2683+ mov r0, sp
2684+ @ assume 8K pages, since we have to split the immediate in two
2685+ bic r0, r0, #(0x1fc0)
2686+ bic r0, r0, #(0x3f)
2687+ ldr r1, [r0, #TI_CPU_DOMAIN]
2688+#ifdef CONFIG_PAX_MEMORY_UDEREF
2689+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2690+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2691+#endif
2692+#ifdef CONFIG_PAX_KERNEXEC
2693+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2694+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2695+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2696+#endif
2697+ @ write r1 to current_thread_info()->cpu_domain
2698+ str r1, [r0, #TI_CPU_DOMAIN]
2699+ @ write r1 to DACR
2700+ mcr p15, 0, r1, c3, c0, 0
2701+ @ instruction sync
2702+ instr_sync
2703+ @ restore regs
2704+ ldmia sp!, {r0, r1}
2705+#endif
2706+ .endm
2707+
2708+ .macro pax_exit_kernel
2709+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2710+ @ save regs
2711+ stmdb sp!, {r0, r1}
2712+ @ read old DACR from stack into r1
2713+ ldr r1, [sp, #(8 + S_SP)]
2714+ sub r1, r1, #8
2715+ ldr r1, [r1]
2716+
2717+ @ write r1 to current_thread_info()->cpu_domain
2718+ mov r0, sp
2719+ @ assume 8K pages, since we have to split the immediate in two
2720+ bic r0, r0, #(0x1fc0)
2721+ bic r0, r0, #(0x3f)
2722+ str r1, [r0, #TI_CPU_DOMAIN]
2723+ @ write r1 to DACR
2724+ mcr p15, 0, r1, c3, c0, 0
2725+ @ instruction sync
2726+ instr_sync
2727+ @ restore regs
2728+ ldmia sp!, {r0, r1}
2729+#endif
2730+ .endm
2731+
2732 #ifndef CONFIG_THUMB2_KERNEL
2733 .macro svc_exit, rpsr, irq = 0
2734 .if \irq != 0
2735@@ -215,6 +269,9 @@
2736 blne trace_hardirqs_off
2737 #endif
2738 .endif
2739+
2740+ pax_exit_kernel
2741+
2742 msr spsr_cxsf, \rpsr
2743 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2744 @ We must avoid clrex due to Cortex-A15 erratum #830321
2745@@ -291,6 +348,9 @@
2746 blne trace_hardirqs_off
2747 #endif
2748 .endif
2749+
2750+ pax_exit_kernel
2751+
2752 ldr lr, [sp, #S_SP] @ top of the stack
2753 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2754
2755diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2756index 059c3da..8e45cfc 100644
2757--- a/arch/arm/kernel/fiq.c
2758+++ b/arch/arm/kernel/fiq.c
2759@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2760 void *base = vectors_page;
2761 unsigned offset = FIQ_OFFSET;
2762
2763+ pax_open_kernel();
2764 memcpy(base + offset, start, length);
2765+ pax_close_kernel();
2766+
2767 if (!cache_is_vipt_nonaliasing())
2768 flush_icache_range((unsigned long)base + offset, offset +
2769 length);
2770diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2771index 0196327..50ac8895 100644
2772--- a/arch/arm/kernel/head.S
2773+++ b/arch/arm/kernel/head.S
2774@@ -444,7 +444,7 @@ __enable_mmu:
2775 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2776 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2777 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2778- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2779+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2780 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2781 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2782 #endif
2783diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2784index 2e11961..07f0704 100644
2785--- a/arch/arm/kernel/module.c
2786+++ b/arch/arm/kernel/module.c
2787@@ -38,12 +38,39 @@
2788 #endif
2789
2790 #ifdef CONFIG_MMU
2791-void *module_alloc(unsigned long size)
2792+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2793 {
2794+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2795+ return NULL;
2796 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2797- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
2798+ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
2799 __builtin_return_address(0));
2800 }
2801+
2802+void *module_alloc(unsigned long size)
2803+{
2804+
2805+#ifdef CONFIG_PAX_KERNEXEC
2806+ return __module_alloc(size, PAGE_KERNEL);
2807+#else
2808+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2809+#endif
2810+
2811+}
2812+
2813+#ifdef CONFIG_PAX_KERNEXEC
2814+void module_memfree_exec(void *module_region)
2815+{
2816+ module_memfree(module_region);
2817+}
2818+EXPORT_SYMBOL(module_memfree_exec);
2819+
2820+void *module_alloc_exec(unsigned long size)
2821+{
2822+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2823+}
2824+EXPORT_SYMBOL(module_alloc_exec);
2825+#endif
2826 #endif
2827
2828 int
2829diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2830index 69bda1a..755113a 100644
2831--- a/arch/arm/kernel/patch.c
2832+++ b/arch/arm/kernel/patch.c
2833@@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2834 else
2835 __acquire(&patch_lock);
2836
2837+ pax_open_kernel();
2838 if (thumb2 && __opcode_is_thumb16(insn)) {
2839 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2840 size = sizeof(u16);
2841@@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2842 *(u32 *)waddr = insn;
2843 size = sizeof(u32);
2844 }
2845+ pax_close_kernel();
2846
2847 if (waddr != addr) {
2848 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2849diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2850index 2bf1a16..d959d40 100644
2851--- a/arch/arm/kernel/process.c
2852+++ b/arch/arm/kernel/process.c
2853@@ -213,6 +213,7 @@ void machine_power_off(void)
2854
2855 if (pm_power_off)
2856 pm_power_off();
2857+ BUG();
2858 }
2859
2860 /*
2861@@ -226,7 +227,7 @@ void machine_power_off(void)
2862 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2863 * to use. Implementing such co-ordination would be essentially impossible.
2864 */
2865-void machine_restart(char *cmd)
2866+__noreturn void machine_restart(char *cmd)
2867 {
2868 local_irq_disable();
2869 smp_send_stop();
2870@@ -252,8 +253,8 @@ void __show_regs(struct pt_regs *regs)
2871
2872 show_regs_print_info(KERN_DEFAULT);
2873
2874- print_symbol("PC is at %s\n", instruction_pointer(regs));
2875- print_symbol("LR is at %s\n", regs->ARM_lr);
2876+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2877+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2878 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2879 "sp : %08lx ip : %08lx fp : %08lx\n",
2880 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2881@@ -430,12 +431,6 @@ unsigned long get_wchan(struct task_struct *p)
2882 return 0;
2883 }
2884
2885-unsigned long arch_randomize_brk(struct mm_struct *mm)
2886-{
2887- unsigned long range_end = mm->brk + 0x02000000;
2888- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2889-}
2890-
2891 #ifdef CONFIG_MMU
2892 #ifdef CONFIG_KUSER_HELPERS
2893 /*
2894@@ -451,7 +446,7 @@ static struct vm_area_struct gate_vma = {
2895
2896 static int __init gate_vma_init(void)
2897 {
2898- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2899+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2900 return 0;
2901 }
2902 arch_initcall(gate_vma_init);
2903@@ -480,81 +475,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2904 return is_gate_vma(vma) ? "[vectors]" : NULL;
2905 }
2906
2907-/* If possible, provide a placement hint at a random offset from the
2908- * stack for the signal page.
2909- */
2910-static unsigned long sigpage_addr(const struct mm_struct *mm,
2911- unsigned int npages)
2912-{
2913- unsigned long offset;
2914- unsigned long first;
2915- unsigned long last;
2916- unsigned long addr;
2917- unsigned int slots;
2918-
2919- first = PAGE_ALIGN(mm->start_stack);
2920-
2921- last = TASK_SIZE - (npages << PAGE_SHIFT);
2922-
2923- /* No room after stack? */
2924- if (first > last)
2925- return 0;
2926-
2927- /* Just enough room? */
2928- if (first == last)
2929- return first;
2930-
2931- slots = ((last - first) >> PAGE_SHIFT) + 1;
2932-
2933- offset = get_random_int() % slots;
2934-
2935- addr = first + (offset << PAGE_SHIFT);
2936-
2937- return addr;
2938-}
2939-
2940-static struct page *signal_page;
2941-extern struct page *get_signal_page(void);
2942-
2943-static const struct vm_special_mapping sigpage_mapping = {
2944- .name = "[sigpage]",
2945- .pages = &signal_page,
2946-};
2947-
2948 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2949 {
2950 struct mm_struct *mm = current->mm;
2951- struct vm_area_struct *vma;
2952- unsigned long addr;
2953- unsigned long hint;
2954- int ret = 0;
2955-
2956- if (!signal_page)
2957- signal_page = get_signal_page();
2958- if (!signal_page)
2959- return -ENOMEM;
2960
2961 down_write(&mm->mmap_sem);
2962- hint = sigpage_addr(mm, 1);
2963- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2964- if (IS_ERR_VALUE(addr)) {
2965- ret = addr;
2966- goto up_fail;
2967- }
2968-
2969- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2970- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2971- &sigpage_mapping);
2972-
2973- if (IS_ERR(vma)) {
2974- ret = PTR_ERR(vma);
2975- goto up_fail;
2976- }
2977-
2978- mm->context.sigpage = addr;
2979-
2980- up_fail:
2981+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2982 up_write(&mm->mmap_sem);
2983- return ret;
2984+ return 0;
2985 }
2986 #endif
2987diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2988index f90fdf4..24e8c84 100644
2989--- a/arch/arm/kernel/psci.c
2990+++ b/arch/arm/kernel/psci.c
2991@@ -26,7 +26,7 @@
2992 #include <asm/psci.h>
2993 #include <asm/system_misc.h>
2994
2995-struct psci_operations psci_ops;
2996+struct psci_operations psci_ops __read_only;
2997
2998 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2999 typedef int (*psci_initcall_t)(const struct device_node *);
3000diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3001index ef9119f..31995a3 100644
3002--- a/arch/arm/kernel/ptrace.c
3003+++ b/arch/arm/kernel/ptrace.c
3004@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3005 regs->ARM_ip = ip;
3006 }
3007
3008+#ifdef CONFIG_GRKERNSEC_SETXID
3009+extern void gr_delayed_cred_worker(void);
3010+#endif
3011+
3012 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3013 {
3014 current_thread_info()->syscall = scno;
3015
3016+#ifdef CONFIG_GRKERNSEC_SETXID
3017+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3018+ gr_delayed_cred_worker();
3019+#endif
3020+
3021 /* Do the secure computing check first; failures should be fast. */
3022 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3023 if (secure_computing() == -1)
3024diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3025index 1d60beb..4aa25d5 100644
3026--- a/arch/arm/kernel/setup.c
3027+++ b/arch/arm/kernel/setup.c
3028@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3029 unsigned int elf_hwcap2 __read_mostly;
3030 EXPORT_SYMBOL(elf_hwcap2);
3031
3032+pteval_t __supported_pte_mask __read_only;
3033+pmdval_t __supported_pmd_mask __read_only;
3034
3035 #ifdef MULTI_CPU
3036-struct processor processor __read_mostly;
3037+struct processor processor __read_only;
3038 #endif
3039 #ifdef MULTI_TLB
3040-struct cpu_tlb_fns cpu_tlb __read_mostly;
3041+struct cpu_tlb_fns cpu_tlb __read_only;
3042 #endif
3043 #ifdef MULTI_USER
3044-struct cpu_user_fns cpu_user __read_mostly;
3045+struct cpu_user_fns cpu_user __read_only;
3046 #endif
3047 #ifdef MULTI_CACHE
3048-struct cpu_cache_fns cpu_cache __read_mostly;
3049+struct cpu_cache_fns cpu_cache __read_only;
3050 #endif
3051 #ifdef CONFIG_OUTER_CACHE
3052-struct outer_cache_fns outer_cache __read_mostly;
3053+struct outer_cache_fns outer_cache __read_only;
3054 EXPORT_SYMBOL(outer_cache);
3055 #endif
3056
3057@@ -250,9 +252,13 @@ static int __get_cpu_architecture(void)
3058 * Register 0 and check for VMSAv7 or PMSAv7 */
3059 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
3060 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3061- (mmfr0 & 0x000000f0) >= 0x00000030)
3062+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3063 cpu_arch = CPU_ARCH_ARMv7;
3064- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3065+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3066+ __supported_pte_mask |= L_PTE_PXN;
3067+ __supported_pmd_mask |= PMD_PXNTABLE;
3068+ }
3069+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3070 (mmfr0 & 0x000000f0) == 0x00000020)
3071 cpu_arch = CPU_ARCH_ARMv6;
3072 else
3073diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3074index 023ac90..0a69950 100644
3075--- a/arch/arm/kernel/signal.c
3076+++ b/arch/arm/kernel/signal.c
3077@@ -24,8 +24,6 @@
3078
3079 extern const unsigned long sigreturn_codes[7];
3080
3081-static unsigned long signal_return_offset;
3082-
3083 #ifdef CONFIG_CRUNCH
3084 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3085 {
3086@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3087 * except when the MPU has protected the vectors
3088 * page from PL0
3089 */
3090- retcode = mm->context.sigpage + signal_return_offset +
3091- (idx << 2) + thumb;
3092+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3093 } else
3094 #endif
3095 {
3096@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3097 } while (thread_flags & _TIF_WORK_MASK);
3098 return 0;
3099 }
3100-
3101-struct page *get_signal_page(void)
3102-{
3103- unsigned long ptr;
3104- unsigned offset;
3105- struct page *page;
3106- void *addr;
3107-
3108- page = alloc_pages(GFP_KERNEL, 0);
3109-
3110- if (!page)
3111- return NULL;
3112-
3113- addr = page_address(page);
3114-
3115- /* Give the signal return code some randomness */
3116- offset = 0x200 + (get_random_int() & 0x7fc);
3117- signal_return_offset = offset;
3118-
3119- /*
3120- * Copy signal return handlers into the vector page, and
3121- * set sigreturn to be a pointer to these.
3122- */
3123- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3124-
3125- ptr = (unsigned long)addr + offset;
3126- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3127-
3128- return page;
3129-}
3130diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3131index 86ef244..c518451 100644
3132--- a/arch/arm/kernel/smp.c
3133+++ b/arch/arm/kernel/smp.c
3134@@ -76,7 +76,7 @@ enum ipi_msg_type {
3135
3136 static DECLARE_COMPLETION(cpu_running);
3137
3138-static struct smp_operations smp_ops;
3139+static struct smp_operations smp_ops __read_only;
3140
3141 void __init smp_set_ops(struct smp_operations *ops)
3142 {
3143diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3144index 7a3be1d..b00c7de 100644
3145--- a/arch/arm/kernel/tcm.c
3146+++ b/arch/arm/kernel/tcm.c
3147@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3148 .virtual = ITCM_OFFSET,
3149 .pfn = __phys_to_pfn(ITCM_OFFSET),
3150 .length = 0,
3151- .type = MT_MEMORY_RWX_ITCM,
3152+ .type = MT_MEMORY_RX_ITCM,
3153 }
3154 };
3155
3156@@ -267,7 +267,9 @@ no_dtcm:
3157 start = &__sitcm_text;
3158 end = &__eitcm_text;
3159 ram = &__itcm_start;
3160+ pax_open_kernel();
3161 memcpy(start, ram, itcm_code_sz);
3162+ pax_close_kernel();
3163 pr_debug("CPU ITCM: copied code from %p - %p\n",
3164 start, end);
3165 itcm_present = true;
3166diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3167index 788e23f..6fa06a1 100644
3168--- a/arch/arm/kernel/traps.c
3169+++ b/arch/arm/kernel/traps.c
3170@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3171 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3172 {
3173 #ifdef CONFIG_KALLSYMS
3174- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3175+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3176 #else
3177 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3178 #endif
3179@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3180 static int die_owner = -1;
3181 static unsigned int die_nest_count;
3182
3183+extern void gr_handle_kernel_exploit(void);
3184+
3185 static unsigned long oops_begin(void)
3186 {
3187 int cpu;
3188@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3189 panic("Fatal exception in interrupt");
3190 if (panic_on_oops)
3191 panic("Fatal exception");
3192+
3193+ gr_handle_kernel_exploit();
3194+
3195 if (signr)
3196 do_exit(signr);
3197 }
3198@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3199 kuser_init(vectors_base);
3200
3201 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3202- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3203+
3204+#ifndef CONFIG_PAX_MEMORY_UDEREF
3205+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3206+#endif
3207+
3208 #else /* ifndef CONFIG_CPU_V7M */
3209 /*
3210 * on V7-M there is no need to copy the vector table to a dedicated
3211diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3212index b31aa73..cc4b7a1 100644
3213--- a/arch/arm/kernel/vmlinux.lds.S
3214+++ b/arch/arm/kernel/vmlinux.lds.S
3215@@ -37,7 +37,7 @@
3216 #endif
3217
3218 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3219- defined(CONFIG_GENERIC_BUG)
3220+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3221 #define ARM_EXIT_KEEP(x) x
3222 #define ARM_EXIT_DISCARD(x)
3223 #else
3224@@ -123,6 +123,8 @@ SECTIONS
3225 #ifdef CONFIG_DEBUG_RODATA
3226 . = ALIGN(1<<SECTION_SHIFT);
3227 #endif
3228+ _etext = .; /* End of text section */
3229+
3230 RO_DATA(PAGE_SIZE)
3231
3232 . = ALIGN(4);
3233@@ -153,8 +155,6 @@ SECTIONS
3234
3235 NOTES
3236
3237- _etext = .; /* End of text and rodata section */
3238-
3239 #ifndef CONFIG_XIP_KERNEL
3240 # ifdef CONFIG_ARM_KERNMEM_PERMS
3241 . = ALIGN(1<<SECTION_SHIFT);
3242diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3243index b652af5..60231ab 100644
3244--- a/arch/arm/kvm/arm.c
3245+++ b/arch/arm/kvm/arm.c
3246@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3247 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3248
3249 /* The VMID used in the VTTBR */
3250-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3251+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3252 static u8 kvm_next_vmid;
3253 static DEFINE_SPINLOCK(kvm_vmid_lock);
3254
3255@@ -358,7 +358,7 @@ void force_vm_exit(const cpumask_t *mask)
3256 */
3257 static bool need_new_vmid_gen(struct kvm *kvm)
3258 {
3259- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3260+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3261 }
3262
3263 /**
3264@@ -391,7 +391,7 @@ static void update_vttbr(struct kvm *kvm)
3265
3266 /* First user of a new VMID generation? */
3267 if (unlikely(kvm_next_vmid == 0)) {
3268- atomic64_inc(&kvm_vmid_gen);
3269+ atomic64_inc_unchecked(&kvm_vmid_gen);
3270 kvm_next_vmid = 1;
3271
3272 /*
3273@@ -408,7 +408,7 @@ static void update_vttbr(struct kvm *kvm)
3274 kvm_call_hyp(__kvm_flush_vm_context);
3275 }
3276
3277- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3278+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3279 kvm->arch.vmid = kvm_next_vmid;
3280 kvm_next_vmid++;
3281
3282@@ -1087,7 +1087,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
3283 /**
3284 * Initialize Hyp-mode and memory mappings on all CPUs.
3285 */
3286-int kvm_arch_init(void *opaque)
3287+int kvm_arch_init(const void *opaque)
3288 {
3289 int err;
3290 int ret, cpu;
3291diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3292index 14a0d98..7771a7d 100644
3293--- a/arch/arm/lib/clear_user.S
3294+++ b/arch/arm/lib/clear_user.S
3295@@ -12,14 +12,14 @@
3296
3297 .text
3298
3299-/* Prototype: int __clear_user(void *addr, size_t sz)
3300+/* Prototype: int ___clear_user(void *addr, size_t sz)
3301 * Purpose : clear some user memory
3302 * Params : addr - user memory address to clear
3303 * : sz - number of bytes to clear
3304 * Returns : number of bytes NOT cleared
3305 */
3306 ENTRY(__clear_user_std)
3307-WEAK(__clear_user)
3308+WEAK(___clear_user)
3309 stmfd sp!, {r1, lr}
3310 mov r2, #0
3311 cmp r1, #4
3312@@ -44,7 +44,7 @@ WEAK(__clear_user)
3313 USER( strnebt r2, [r0])
3314 mov r0, #0
3315 ldmfd sp!, {r1, pc}
3316-ENDPROC(__clear_user)
3317+ENDPROC(___clear_user)
3318 ENDPROC(__clear_user_std)
3319
3320 .pushsection .fixup,"ax"
3321diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3322index 7a235b9..73a0556 100644
3323--- a/arch/arm/lib/copy_from_user.S
3324+++ b/arch/arm/lib/copy_from_user.S
3325@@ -17,7 +17,7 @@
3326 /*
3327 * Prototype:
3328 *
3329- * size_t __copy_from_user(void *to, const void *from, size_t n)
3330+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3331 *
3332 * Purpose:
3333 *
3334@@ -89,11 +89,11 @@
3335
3336 .text
3337
3338-ENTRY(__copy_from_user)
3339+ENTRY(___copy_from_user)
3340
3341 #include "copy_template.S"
3342
3343-ENDPROC(__copy_from_user)
3344+ENDPROC(___copy_from_user)
3345
3346 .pushsection .fixup,"ax"
3347 .align 0
3348diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3349index 6ee2f67..d1cce76 100644
3350--- a/arch/arm/lib/copy_page.S
3351+++ b/arch/arm/lib/copy_page.S
3352@@ -10,6 +10,7 @@
3353 * ASM optimised string functions
3354 */
3355 #include <linux/linkage.h>
3356+#include <linux/const.h>
3357 #include <asm/assembler.h>
3358 #include <asm/asm-offsets.h>
3359 #include <asm/cache.h>
3360diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3361index a9d3db1..164b089 100644
3362--- a/arch/arm/lib/copy_to_user.S
3363+++ b/arch/arm/lib/copy_to_user.S
3364@@ -17,7 +17,7 @@
3365 /*
3366 * Prototype:
3367 *
3368- * size_t __copy_to_user(void *to, const void *from, size_t n)
3369+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3370 *
3371 * Purpose:
3372 *
3373@@ -93,11 +93,11 @@
3374 .text
3375
3376 ENTRY(__copy_to_user_std)
3377-WEAK(__copy_to_user)
3378+WEAK(___copy_to_user)
3379
3380 #include "copy_template.S"
3381
3382-ENDPROC(__copy_to_user)
3383+ENDPROC(___copy_to_user)
3384 ENDPROC(__copy_to_user_std)
3385
3386 .pushsection .fixup,"ax"
3387diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3388index 7d08b43..f7ca7ea 100644
3389--- a/arch/arm/lib/csumpartialcopyuser.S
3390+++ b/arch/arm/lib/csumpartialcopyuser.S
3391@@ -57,8 +57,8 @@
3392 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3393 */
3394
3395-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3396-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3397+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3398+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3399
3400 #include "csumpartialcopygeneric.S"
3401
3402diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3403index 312d43e..21d2322 100644
3404--- a/arch/arm/lib/delay.c
3405+++ b/arch/arm/lib/delay.c
3406@@ -29,7 +29,7 @@
3407 /*
3408 * Default to the loop-based delay implementation.
3409 */
3410-struct arm_delay_ops arm_delay_ops = {
3411+struct arm_delay_ops arm_delay_ops __read_only = {
3412 .delay = __loop_delay,
3413 .const_udelay = __loop_const_udelay,
3414 .udelay = __loop_udelay,
3415diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3416index 3e58d71..029817c 100644
3417--- a/arch/arm/lib/uaccess_with_memcpy.c
3418+++ b/arch/arm/lib/uaccess_with_memcpy.c
3419@@ -136,7 +136,7 @@ out:
3420 }
3421
3422 unsigned long
3423-__copy_to_user(void __user *to, const void *from, unsigned long n)
3424+___copy_to_user(void __user *to, const void *from, unsigned long n)
3425 {
3426 /*
3427 * This test is stubbed out of the main function above to keep
3428@@ -190,7 +190,7 @@ out:
3429 return n;
3430 }
3431
3432-unsigned long __clear_user(void __user *addr, unsigned long n)
3433+unsigned long ___clear_user(void __user *addr, unsigned long n)
3434 {
3435 /* See rational for this in __copy_to_user() above. */
3436 if (n < 64)
3437diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3438index 318d127..9aab0d1 100644
3439--- a/arch/arm/mach-exynos/suspend.c
3440+++ b/arch/arm/mach-exynos/suspend.c
3441@@ -18,6 +18,7 @@
3442 #include <linux/syscore_ops.h>
3443 #include <linux/cpu_pm.h>
3444 #include <linux/io.h>
3445+#include <linux/irq.h>
3446 #include <linux/irqchip/arm-gic.h>
3447 #include <linux/err.h>
3448 #include <linux/regulator/machine.h>
3449@@ -632,8 +633,10 @@ void __init exynos_pm_init(void)
3450 tmp |= pm_data->wake_disable_mask;
3451 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3452
3453- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3454- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3455+ pax_open_kernel();
3456+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3457+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3458+ pax_close_kernel();
3459
3460 register_syscore_ops(&exynos_pm_syscore_ops);
3461 suspend_set_ops(&exynos_suspend_ops);
3462diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3463index 0662087..004d163 100644
3464--- a/arch/arm/mach-keystone/keystone.c
3465+++ b/arch/arm/mach-keystone/keystone.c
3466@@ -27,7 +27,7 @@
3467
3468 #include "keystone.h"
3469
3470-static struct notifier_block platform_nb;
3471+static notifier_block_no_const platform_nb;
3472 static unsigned long keystone_dma_pfn_offset __read_mostly;
3473
3474 static int keystone_platform_notifier(struct notifier_block *nb,
3475diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3476index e46e9ea..9141c83 100644
3477--- a/arch/arm/mach-mvebu/coherency.c
3478+++ b/arch/arm/mach-mvebu/coherency.c
3479@@ -117,7 +117,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3480
3481 /*
3482 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3483- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3484+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3485 * is needed as a workaround for a deadlock issue between the PCIe
3486 * interface and the cache controller.
3487 */
3488@@ -130,7 +130,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3489 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3490
3491 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3492- mtype = MT_UNCACHED;
3493+ mtype = MT_UNCACHED_RW;
3494
3495 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3496 }
3497diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3498index b6443a4..20a0b74 100644
3499--- a/arch/arm/mach-omap2/board-n8x0.c
3500+++ b/arch/arm/mach-omap2/board-n8x0.c
3501@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3502 }
3503 #endif
3504
3505-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3506+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3507 .late_init = n8x0_menelaus_late_init,
3508 };
3509
3510diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3511index 79f49d9..70bf184 100644
3512--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3513+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3514@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3515 void (*resume)(void);
3516 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3517 void (*hotplug_restart)(void);
3518-};
3519+} __no_const;
3520
3521 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3522 static struct powerdomain *mpuss_pd;
3523@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3524 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3525 {}
3526
3527-struct cpu_pm_ops omap_pm_ops = {
3528+static struct cpu_pm_ops omap_pm_ops __read_only = {
3529 .finish_suspend = default_finish_suspend,
3530 .resume = dummy_cpu_resume,
3531 .scu_prepare = dummy_scu_prepare,
3532diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3533index 5305ec7..6d74045 100644
3534--- a/arch/arm/mach-omap2/omap-smp.c
3535+++ b/arch/arm/mach-omap2/omap-smp.c
3536@@ -19,6 +19,7 @@
3537 #include <linux/device.h>
3538 #include <linux/smp.h>
3539 #include <linux/io.h>
3540+#include <linux/irq.h>
3541 #include <linux/irqchip/arm-gic.h>
3542
3543 #include <asm/smp_scu.h>
3544diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3545index f961c46..4a453dc 100644
3546--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3547+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3548@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3549 return NOTIFY_OK;
3550 }
3551
3552-static struct notifier_block __refdata irq_hotplug_notifier = {
3553+static struct notifier_block irq_hotplug_notifier = {
3554 .notifier_call = irq_cpu_hotplug_notify,
3555 };
3556
3557diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3558index be9541e..821805f 100644
3559--- a/arch/arm/mach-omap2/omap_device.c
3560+++ b/arch/arm/mach-omap2/omap_device.c
3561@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3562 struct platform_device __init *omap_device_build(const char *pdev_name,
3563 int pdev_id,
3564 struct omap_hwmod *oh,
3565- void *pdata, int pdata_len)
3566+ const void *pdata, int pdata_len)
3567 {
3568 struct omap_hwmod *ohs[] = { oh };
3569
3570@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3571 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3572 int pdev_id,
3573 struct omap_hwmod **ohs,
3574- int oh_cnt, void *pdata,
3575+ int oh_cnt, const void *pdata,
3576 int pdata_len)
3577 {
3578 int ret = -ENOMEM;
3579diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3580index 78c02b3..c94109a 100644
3581--- a/arch/arm/mach-omap2/omap_device.h
3582+++ b/arch/arm/mach-omap2/omap_device.h
3583@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3584 /* Core code interface */
3585
3586 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3587- struct omap_hwmod *oh, void *pdata,
3588+ struct omap_hwmod *oh, const void *pdata,
3589 int pdata_len);
3590
3591 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3592 struct omap_hwmod **oh, int oh_cnt,
3593- void *pdata, int pdata_len);
3594+ const void *pdata, int pdata_len);
3595
3596 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3597 struct omap_hwmod **ohs, int oh_cnt);
3598diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3599index 355b089..2c9d7c3 100644
3600--- a/arch/arm/mach-omap2/omap_hwmod.c
3601+++ b/arch/arm/mach-omap2/omap_hwmod.c
3602@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3603 int (*init_clkdm)(struct omap_hwmod *oh);
3604 void (*update_context_lost)(struct omap_hwmod *oh);
3605 int (*get_context_lost)(struct omap_hwmod *oh);
3606-};
3607+} __no_const;
3608
3609 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3610-static struct omap_hwmod_soc_ops soc_ops;
3611+static struct omap_hwmod_soc_ops soc_ops __read_only;
3612
3613 /* omap_hwmod_list contains all registered struct omap_hwmods */
3614 static LIST_HEAD(omap_hwmod_list);
3615diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3616index 95fee54..cfa9cf1 100644
3617--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3618+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3619@@ -10,6 +10,7 @@
3620
3621 #include <linux/kernel.h>
3622 #include <linux/init.h>
3623+#include <asm/pgtable.h>
3624
3625 #include "powerdomain.h"
3626
3627@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3628
3629 void __init am43xx_powerdomains_init(void)
3630 {
3631- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3632+ pax_open_kernel();
3633+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3634+ pax_close_kernel();
3635 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3636 pwrdm_register_pwrdms(powerdomains_am43xx);
3637 pwrdm_complete_init();
3638diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3639index ff0a68c..b312aa0 100644
3640--- a/arch/arm/mach-omap2/wd_timer.c
3641+++ b/arch/arm/mach-omap2/wd_timer.c
3642@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3643 struct omap_hwmod *oh;
3644 char *oh_name = "wd_timer2";
3645 char *dev_name = "omap_wdt";
3646- struct omap_wd_timer_platform_data pdata;
3647+ static struct omap_wd_timer_platform_data pdata = {
3648+ .read_reset_sources = prm_read_reset_sources
3649+ };
3650
3651 if (!cpu_class_is_omap2() || of_have_populated_dt())
3652 return 0;
3653@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3654 return -EINVAL;
3655 }
3656
3657- pdata.read_reset_sources = prm_read_reset_sources;
3658-
3659 pdev = omap_device_build(dev_name, id, oh, &pdata,
3660 sizeof(struct omap_wd_timer_platform_data));
3661 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3662diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3663index 4f25a7c..a81be85 100644
3664--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3665+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3666@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3667 bool entered_lp2 = false;
3668
3669 if (tegra_pending_sgi())
3670- ACCESS_ONCE(abort_flag) = true;
3671+ ACCESS_ONCE_RW(abort_flag) = true;
3672
3673 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3674
3675diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3676index ab95f53..4b977a7 100644
3677--- a/arch/arm/mach-tegra/irq.c
3678+++ b/arch/arm/mach-tegra/irq.c
3679@@ -20,6 +20,7 @@
3680 #include <linux/cpu_pm.h>
3681 #include <linux/interrupt.h>
3682 #include <linux/io.h>
3683+#include <linux/irq.h>
3684 #include <linux/irqchip/arm-gic.h>
3685 #include <linux/irq.h>
3686 #include <linux/kernel.h>
3687diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3688index 2cb587b..6ddfebf 100644
3689--- a/arch/arm/mach-ux500/pm.c
3690+++ b/arch/arm/mach-ux500/pm.c
3691@@ -10,6 +10,7 @@
3692 */
3693
3694 #include <linux/kernel.h>
3695+#include <linux/irq.h>
3696 #include <linux/irqchip/arm-gic.h>
3697 #include <linux/delay.h>
3698 #include <linux/io.h>
3699diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3700index 2dea8b5..6499da2 100644
3701--- a/arch/arm/mach-ux500/setup.h
3702+++ b/arch/arm/mach-ux500/setup.h
3703@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3704 .type = MT_DEVICE, \
3705 }
3706
3707-#define __MEM_DEV_DESC(x, sz) { \
3708- .virtual = IO_ADDRESS(x), \
3709- .pfn = __phys_to_pfn(x), \
3710- .length = sz, \
3711- .type = MT_MEMORY_RWX, \
3712-}
3713-
3714 extern struct smp_operations ux500_smp_ops;
3715 extern void ux500_cpu_die(unsigned int cpu);
3716
3717diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3718index 52d768f..5f93180 100644
3719--- a/arch/arm/mach-zynq/platsmp.c
3720+++ b/arch/arm/mach-zynq/platsmp.c
3721@@ -24,6 +24,7 @@
3722 #include <linux/io.h>
3723 #include <asm/cacheflush.h>
3724 #include <asm/smp_scu.h>
3725+#include <linux/irq.h>
3726 #include <linux/irqchip/arm-gic.h>
3727 #include "common.h"
3728
3729diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3730index 9b4f29e..bbf3bfa 100644
3731--- a/arch/arm/mm/Kconfig
3732+++ b/arch/arm/mm/Kconfig
3733@@ -446,6 +446,7 @@ config CPU_32v5
3734
3735 config CPU_32v6
3736 bool
3737+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3738 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3739
3740 config CPU_32v6K
3741@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3742
3743 config CPU_USE_DOMAINS
3744 bool
3745+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3746 help
3747 This option enables or disables the use of domain switching
3748 via the set_fs() function.
3749@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3750
3751 config KUSER_HELPERS
3752 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3753- depends on MMU
3754+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3755 default y
3756 help
3757 Warning: disabling this option may break user programs.
3758@@ -812,7 +814,7 @@ config KUSER_HELPERS
3759 See Documentation/arm/kernel_user_helpers.txt for details.
3760
3761 However, the fixed address nature of these helpers can be used
3762- by ROP (return orientated programming) authors when creating
3763+ by ROP (Return Oriented Programming) authors when creating
3764 exploits.
3765
3766 If all of the binaries and libraries which run on your platform
3767diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3768index 2c0c541..4585df9 100644
3769--- a/arch/arm/mm/alignment.c
3770+++ b/arch/arm/mm/alignment.c
3771@@ -216,10 +216,12 @@ union offset_union {
3772 #define __get16_unaligned_check(ins,val,addr) \
3773 do { \
3774 unsigned int err = 0, v, a = addr; \
3775+ pax_open_userland(); \
3776 __get8_unaligned_check(ins,v,a,err); \
3777 val = v << ((BE) ? 8 : 0); \
3778 __get8_unaligned_check(ins,v,a,err); \
3779 val |= v << ((BE) ? 0 : 8); \
3780+ pax_close_userland(); \
3781 if (err) \
3782 goto fault; \
3783 } while (0)
3784@@ -233,6 +235,7 @@ union offset_union {
3785 #define __get32_unaligned_check(ins,val,addr) \
3786 do { \
3787 unsigned int err = 0, v, a = addr; \
3788+ pax_open_userland(); \
3789 __get8_unaligned_check(ins,v,a,err); \
3790 val = v << ((BE) ? 24 : 0); \
3791 __get8_unaligned_check(ins,v,a,err); \
3792@@ -241,6 +244,7 @@ union offset_union {
3793 val |= v << ((BE) ? 8 : 16); \
3794 __get8_unaligned_check(ins,v,a,err); \
3795 val |= v << ((BE) ? 0 : 24); \
3796+ pax_close_userland(); \
3797 if (err) \
3798 goto fault; \
3799 } while (0)
3800@@ -254,6 +258,7 @@ union offset_union {
3801 #define __put16_unaligned_check(ins,val,addr) \
3802 do { \
3803 unsigned int err = 0, v = val, a = addr; \
3804+ pax_open_userland(); \
3805 __asm__( FIRST_BYTE_16 \
3806 ARM( "1: "ins" %1, [%2], #1\n" ) \
3807 THUMB( "1: "ins" %1, [%2]\n" ) \
3808@@ -273,6 +278,7 @@ union offset_union {
3809 " .popsection\n" \
3810 : "=r" (err), "=&r" (v), "=&r" (a) \
3811 : "0" (err), "1" (v), "2" (a)); \
3812+ pax_close_userland(); \
3813 if (err) \
3814 goto fault; \
3815 } while (0)
3816@@ -286,6 +292,7 @@ union offset_union {
3817 #define __put32_unaligned_check(ins,val,addr) \
3818 do { \
3819 unsigned int err = 0, v = val, a = addr; \
3820+ pax_open_userland(); \
3821 __asm__( FIRST_BYTE_32 \
3822 ARM( "1: "ins" %1, [%2], #1\n" ) \
3823 THUMB( "1: "ins" %1, [%2]\n" ) \
3824@@ -315,6 +322,7 @@ union offset_union {
3825 " .popsection\n" \
3826 : "=r" (err), "=&r" (v), "=&r" (a) \
3827 : "0" (err), "1" (v), "2" (a)); \
3828+ pax_close_userland(); \
3829 if (err) \
3830 goto fault; \
3831 } while (0)
3832diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3833index 8f15f70..d599a2b 100644
3834--- a/arch/arm/mm/cache-l2x0.c
3835+++ b/arch/arm/mm/cache-l2x0.c
3836@@ -43,7 +43,7 @@ struct l2c_init_data {
3837 void (*save)(void __iomem *);
3838 void (*configure)(void __iomem *);
3839 struct outer_cache_fns outer_cache;
3840-};
3841+} __do_const;
3842
3843 #define CACHE_LINE_SIZE 32
3844
3845diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3846index 845769e..4278fd7 100644
3847--- a/arch/arm/mm/context.c
3848+++ b/arch/arm/mm/context.c
3849@@ -43,7 +43,7 @@
3850 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3851
3852 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3853-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3854+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3855 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3856
3857 static DEFINE_PER_CPU(atomic64_t, active_asids);
3858@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3859 {
3860 static u32 cur_idx = 1;
3861 u64 asid = atomic64_read(&mm->context.id);
3862- u64 generation = atomic64_read(&asid_generation);
3863+ u64 generation = atomic64_read_unchecked(&asid_generation);
3864
3865 if (asid != 0) {
3866 /*
3867@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3868 */
3869 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3870 if (asid == NUM_USER_ASIDS) {
3871- generation = atomic64_add_return(ASID_FIRST_VERSION,
3872+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3873 &asid_generation);
3874 flush_context(cpu);
3875 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3876@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3877 cpu_set_reserved_ttbr0();
3878
3879 asid = atomic64_read(&mm->context.id);
3880- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3881+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3882 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3883 goto switch_mm_fastpath;
3884
3885 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3886 /* Check that our ASID belongs to the current generation. */
3887 asid = atomic64_read(&mm->context.id);
3888- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3889+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3890 asid = new_context(mm, cpu);
3891 atomic64_set(&mm->context.id, asid);
3892 }
3893diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3894index 6333d9c..fd09b46 100644
3895--- a/arch/arm/mm/fault.c
3896+++ b/arch/arm/mm/fault.c
3897@@ -25,6 +25,7 @@
3898 #include <asm/system_misc.h>
3899 #include <asm/system_info.h>
3900 #include <asm/tlbflush.h>
3901+#include <asm/sections.h>
3902
3903 #include "fault.h"
3904
3905@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3906 if (fixup_exception(regs))
3907 return;
3908
3909+#ifdef CONFIG_PAX_MEMORY_UDEREF
3910+ if (addr < TASK_SIZE) {
3911+ if (current->signal->curr_ip)
3912+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3913+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3914+ else
3915+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3916+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3917+ }
3918+#endif
3919+
3920+#ifdef CONFIG_PAX_KERNEXEC
3921+ if ((fsr & FSR_WRITE) &&
3922+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3923+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3924+ {
3925+ if (current->signal->curr_ip)
3926+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3927+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3928+ else
3929+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3930+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3931+ }
3932+#endif
3933+
3934 /*
3935 * No handler, we'll have to terminate things with extreme prejudice.
3936 */
3937@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3938 }
3939 #endif
3940
3941+#ifdef CONFIG_PAX_PAGEEXEC
3942+ if (fsr & FSR_LNX_PF) {
3943+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3944+ do_group_exit(SIGKILL);
3945+ }
3946+#endif
3947+
3948 tsk->thread.address = addr;
3949 tsk->thread.error_code = fsr;
3950 tsk->thread.trap_no = 14;
3951@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3952 }
3953 #endif /* CONFIG_MMU */
3954
3955+#ifdef CONFIG_PAX_PAGEEXEC
3956+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3957+{
3958+ long i;
3959+
3960+ printk(KERN_ERR "PAX: bytes at PC: ");
3961+ for (i = 0; i < 20; i++) {
3962+ unsigned char c;
3963+ if (get_user(c, (__force unsigned char __user *)pc+i))
3964+ printk(KERN_CONT "?? ");
3965+ else
3966+ printk(KERN_CONT "%02x ", c);
3967+ }
3968+ printk("\n");
3969+
3970+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3971+ for (i = -1; i < 20; i++) {
3972+ unsigned long c;
3973+ if (get_user(c, (__force unsigned long __user *)sp+i))
3974+ printk(KERN_CONT "???????? ");
3975+ else
3976+ printk(KERN_CONT "%08lx ", c);
3977+ }
3978+ printk("\n");
3979+}
3980+#endif
3981+
3982 /*
3983 * First Level Translation Fault Handler
3984 *
3985@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3986 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3987 struct siginfo info;
3988
3989+#ifdef CONFIG_PAX_MEMORY_UDEREF
3990+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3991+ if (current->signal->curr_ip)
3992+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3993+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3994+ else
3995+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3996+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3997+ goto die;
3998+ }
3999+#endif
4000+
4001 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4002 return;
4003
4004+die:
4005 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4006 inf->name, fsr, addr);
4007 show_pte(current->mm, addr);
4008@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4009 ifsr_info[nr].name = name;
4010 }
4011
4012+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4013+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4014+
4015 asmlinkage void __exception
4016 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4017 {
4018 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4019 struct siginfo info;
4020+ unsigned long pc = instruction_pointer(regs);
4021+
4022+ if (user_mode(regs)) {
4023+ unsigned long sigpage = current->mm->context.sigpage;
4024+
4025+ if (sigpage <= pc && pc < sigpage + 7*4) {
4026+ if (pc < sigpage + 3*4)
4027+ sys_sigreturn(regs);
4028+ else
4029+ sys_rt_sigreturn(regs);
4030+ return;
4031+ }
4032+ if (pc == 0xffff0f60UL) {
4033+ /*
4034+ * PaX: __kuser_cmpxchg64 emulation
4035+ */
4036+ // TODO
4037+ //regs->ARM_pc = regs->ARM_lr;
4038+ //return;
4039+ }
4040+ if (pc == 0xffff0fa0UL) {
4041+ /*
4042+ * PaX: __kuser_memory_barrier emulation
4043+ */
4044+ // dmb(); implied by the exception
4045+ regs->ARM_pc = regs->ARM_lr;
4046+ return;
4047+ }
4048+ if (pc == 0xffff0fc0UL) {
4049+ /*
4050+ * PaX: __kuser_cmpxchg emulation
4051+ */
4052+ // TODO
4053+ //long new;
4054+ //int op;
4055+
4056+ //op = FUTEX_OP_SET << 28;
4057+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4058+ //regs->ARM_r0 = old != new;
4059+ //regs->ARM_pc = regs->ARM_lr;
4060+ //return;
4061+ }
4062+ if (pc == 0xffff0fe0UL) {
4063+ /*
4064+ * PaX: __kuser_get_tls emulation
4065+ */
4066+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4067+ regs->ARM_pc = regs->ARM_lr;
4068+ return;
4069+ }
4070+ }
4071+
4072+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4073+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4074+ if (current->signal->curr_ip)
4075+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4076+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4077+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4078+ else
4079+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4080+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4081+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4082+ goto die;
4083+ }
4084+#endif
4085+
4086+#ifdef CONFIG_PAX_REFCOUNT
4087+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4088+#ifdef CONFIG_THUMB2_KERNEL
4089+ unsigned short bkpt;
4090+
4091+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4092+#else
4093+ unsigned int bkpt;
4094+
4095+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4096+#endif
4097+ current->thread.error_code = ifsr;
4098+ current->thread.trap_no = 0;
4099+ pax_report_refcount_overflow(regs);
4100+ fixup_exception(regs);
4101+ return;
4102+ }
4103+ }
4104+#endif
4105
4106 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4107 return;
4108
4109+die:
4110 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4111 inf->name, ifsr, addr);
4112
4113diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4114index cf08bdf..772656c 100644
4115--- a/arch/arm/mm/fault.h
4116+++ b/arch/arm/mm/fault.h
4117@@ -3,6 +3,7 @@
4118
4119 /*
4120 * Fault status register encodings. We steal bit 31 for our own purposes.
4121+ * Set when the FSR value is from an instruction fault.
4122 */
4123 #define FSR_LNX_PF (1 << 31)
4124 #define FSR_WRITE (1 << 11)
4125@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4126 }
4127 #endif
4128
4129+/* valid for LPAE and !LPAE */
4130+static inline int is_xn_fault(unsigned int fsr)
4131+{
4132+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4133+}
4134+
4135+static inline int is_domain_fault(unsigned int fsr)
4136+{
4137+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4138+}
4139+
4140 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4141 unsigned long search_exception_table(unsigned long addr);
4142
4143diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4144index 1609b02..def0785 100644
4145--- a/arch/arm/mm/init.c
4146+++ b/arch/arm/mm/init.c
4147@@ -755,7 +755,46 @@ void free_tcmmem(void)
4148 {
4149 #ifdef CONFIG_HAVE_TCM
4150 extern char __tcm_start, __tcm_end;
4151+#endif
4152
4153+#ifdef CONFIG_PAX_KERNEXEC
4154+ unsigned long addr;
4155+ pgd_t *pgd;
4156+ pud_t *pud;
4157+ pmd_t *pmd;
4158+ int cpu_arch = cpu_architecture();
4159+ unsigned int cr = get_cr();
4160+
4161+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4162+ /* make pages tables, etc before .text NX */
4163+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4164+ pgd = pgd_offset_k(addr);
4165+ pud = pud_offset(pgd, addr);
4166+ pmd = pmd_offset(pud, addr);
4167+ __section_update(pmd, addr, PMD_SECT_XN);
4168+ }
4169+ /* make init NX */
4170+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4171+ pgd = pgd_offset_k(addr);
4172+ pud = pud_offset(pgd, addr);
4173+ pmd = pmd_offset(pud, addr);
4174+ __section_update(pmd, addr, PMD_SECT_XN);
4175+ }
4176+ /* make kernel code/rodata RX */
4177+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4178+ pgd = pgd_offset_k(addr);
4179+ pud = pud_offset(pgd, addr);
4180+ pmd = pmd_offset(pud, addr);
4181+#ifdef CONFIG_ARM_LPAE
4182+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4183+#else
4184+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4185+#endif
4186+ }
4187+ }
4188+#endif
4189+
4190+#ifdef CONFIG_HAVE_TCM
4191 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4192 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4193 #endif
4194diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4195index d1e5ad7..84dcbf2 100644
4196--- a/arch/arm/mm/ioremap.c
4197+++ b/arch/arm/mm/ioremap.c
4198@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4199 unsigned int mtype;
4200
4201 if (cached)
4202- mtype = MT_MEMORY_RWX;
4203+ mtype = MT_MEMORY_RX;
4204 else
4205- mtype = MT_MEMORY_RWX_NONCACHED;
4206+ mtype = MT_MEMORY_RX_NONCACHED;
4207
4208 return __arm_ioremap_caller(phys_addr, size, mtype,
4209 __builtin_return_address(0));
4210diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4211index 5e85ed3..b10a7ed 100644
4212--- a/arch/arm/mm/mmap.c
4213+++ b/arch/arm/mm/mmap.c
4214@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4215 struct vm_area_struct *vma;
4216 int do_align = 0;
4217 int aliasing = cache_is_vipt_aliasing();
4218+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4219 struct vm_unmapped_area_info info;
4220
4221 /*
4222@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4223 if (len > TASK_SIZE)
4224 return -ENOMEM;
4225
4226+#ifdef CONFIG_PAX_RANDMMAP
4227+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4228+#endif
4229+
4230 if (addr) {
4231 if (do_align)
4232 addr = COLOUR_ALIGN(addr, pgoff);
4233@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4234 addr = PAGE_ALIGN(addr);
4235
4236 vma = find_vma(mm, addr);
4237- if (TASK_SIZE - len >= addr &&
4238- (!vma || addr + len <= vma->vm_start))
4239+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4240 return addr;
4241 }
4242
4243@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4244 info.high_limit = TASK_SIZE;
4245 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4246 info.align_offset = pgoff << PAGE_SHIFT;
4247+ info.threadstack_offset = offset;
4248 return vm_unmapped_area(&info);
4249 }
4250
4251@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4252 unsigned long addr = addr0;
4253 int do_align = 0;
4254 int aliasing = cache_is_vipt_aliasing();
4255+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4256 struct vm_unmapped_area_info info;
4257
4258 /*
4259@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4260 return addr;
4261 }
4262
4263+#ifdef CONFIG_PAX_RANDMMAP
4264+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4265+#endif
4266+
4267 /* requesting a specific address */
4268 if (addr) {
4269 if (do_align)
4270@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4271 else
4272 addr = PAGE_ALIGN(addr);
4273 vma = find_vma(mm, addr);
4274- if (TASK_SIZE - len >= addr &&
4275- (!vma || addr + len <= vma->vm_start))
4276+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4277 return addr;
4278 }
4279
4280@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4281 info.high_limit = mm->mmap_base;
4282 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4283 info.align_offset = pgoff << PAGE_SHIFT;
4284+ info.threadstack_offset = offset;
4285 addr = vm_unmapped_area(&info);
4286
4287 /*
4288@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4289 {
4290 unsigned long random_factor = 0UL;
4291
4292+#ifdef CONFIG_PAX_RANDMMAP
4293+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4294+#endif
4295+
4296 /* 8 bits of randomness in 20 address space bits */
4297 if ((current->flags & PF_RANDOMIZE) &&
4298 !(current->personality & ADDR_NO_RANDOMIZE))
4299@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4300
4301 if (mmap_is_legacy()) {
4302 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4303+
4304+#ifdef CONFIG_PAX_RANDMMAP
4305+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4306+ mm->mmap_base += mm->delta_mmap;
4307+#endif
4308+
4309 mm->get_unmapped_area = arch_get_unmapped_area;
4310 } else {
4311 mm->mmap_base = mmap_base(random_factor);
4312+
4313+#ifdef CONFIG_PAX_RANDMMAP
4314+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4315+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4316+#endif
4317+
4318 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4319 }
4320 }
4321diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4322index 4e6ef89..21c27f2 100644
4323--- a/arch/arm/mm/mmu.c
4324+++ b/arch/arm/mm/mmu.c
4325@@ -41,6 +41,22 @@
4326 #include "mm.h"
4327 #include "tcm.h"
4328
4329+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4330+void modify_domain(unsigned int dom, unsigned int type)
4331+{
4332+ struct thread_info *thread = current_thread_info();
4333+ unsigned int domain = thread->cpu_domain;
4334+ /*
4335+ * DOMAIN_MANAGER might be defined to some other value,
4336+ * use the arch-defined constant
4337+ */
4338+ domain &= ~domain_val(dom, 3);
4339+ thread->cpu_domain = domain | domain_val(dom, type);
4340+ set_domain(thread->cpu_domain);
4341+}
4342+EXPORT_SYMBOL(modify_domain);
4343+#endif
4344+
4345 /*
4346 * empty_zero_page is a special page that is used for
4347 * zero-initialized data and COW.
4348@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4349 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4350 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4351
4352-static struct mem_type mem_types[] = {
4353+#ifdef CONFIG_PAX_KERNEXEC
4354+#define L_PTE_KERNEXEC L_PTE_RDONLY
4355+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4356+#else
4357+#define L_PTE_KERNEXEC L_PTE_DIRTY
4358+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4359+#endif
4360+
4361+static struct mem_type mem_types[] __read_only = {
4362 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4363 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4364 L_PTE_SHARED,
4365@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4366 .prot_sect = PROT_SECT_DEVICE,
4367 .domain = DOMAIN_IO,
4368 },
4369- [MT_UNCACHED] = {
4370+ [MT_UNCACHED_RW] = {
4371 .prot_pte = PROT_PTE_DEVICE,
4372 .prot_l1 = PMD_TYPE_TABLE,
4373 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4374 .domain = DOMAIN_IO,
4375 },
4376- [MT_CACHECLEAN] = {
4377- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4378+ [MT_CACHECLEAN_RO] = {
4379+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4380 .domain = DOMAIN_KERNEL,
4381 },
4382 #ifndef CONFIG_ARM_LPAE
4383- [MT_MINICLEAN] = {
4384- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4385+ [MT_MINICLEAN_RO] = {
4386+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4387 .domain = DOMAIN_KERNEL,
4388 },
4389 #endif
4390@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4391 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4392 L_PTE_RDONLY,
4393 .prot_l1 = PMD_TYPE_TABLE,
4394- .domain = DOMAIN_USER,
4395+ .domain = DOMAIN_VECTORS,
4396 },
4397 [MT_HIGH_VECTORS] = {
4398 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4399 L_PTE_USER | L_PTE_RDONLY,
4400 .prot_l1 = PMD_TYPE_TABLE,
4401- .domain = DOMAIN_USER,
4402+ .domain = DOMAIN_VECTORS,
4403 },
4404- [MT_MEMORY_RWX] = {
4405+ [__MT_MEMORY_RWX] = {
4406 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4407 .prot_l1 = PMD_TYPE_TABLE,
4408 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4409@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4410 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4411 .domain = DOMAIN_KERNEL,
4412 },
4413- [MT_ROM] = {
4414- .prot_sect = PMD_TYPE_SECT,
4415+ [MT_MEMORY_RX] = {
4416+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4417+ .prot_l1 = PMD_TYPE_TABLE,
4418+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4419+ .domain = DOMAIN_KERNEL,
4420+ },
4421+ [MT_ROM_RX] = {
4422+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4423 .domain = DOMAIN_KERNEL,
4424 },
4425- [MT_MEMORY_RWX_NONCACHED] = {
4426+ [MT_MEMORY_RW_NONCACHED] = {
4427 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4428 L_PTE_MT_BUFFERABLE,
4429 .prot_l1 = PMD_TYPE_TABLE,
4430 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4431 .domain = DOMAIN_KERNEL,
4432 },
4433+ [MT_MEMORY_RX_NONCACHED] = {
4434+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4435+ L_PTE_MT_BUFFERABLE,
4436+ .prot_l1 = PMD_TYPE_TABLE,
4437+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4438+ .domain = DOMAIN_KERNEL,
4439+ },
4440 [MT_MEMORY_RW_DTCM] = {
4441 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4442 L_PTE_XN,
4443@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4444 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4445 .domain = DOMAIN_KERNEL,
4446 },
4447- [MT_MEMORY_RWX_ITCM] = {
4448- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4449+ [MT_MEMORY_RX_ITCM] = {
4450+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4451 .prot_l1 = PMD_TYPE_TABLE,
4452+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4453 .domain = DOMAIN_KERNEL,
4454 },
4455 [MT_MEMORY_RW_SO] = {
4456@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4457 * Mark cache clean areas and XIP ROM read only
4458 * from SVC mode and no access from userspace.
4459 */
4460- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4461- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4462- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4463+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4464+#ifdef CONFIG_PAX_KERNEXEC
4465+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4466+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4467+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4468+#endif
4469+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4470+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4471 #endif
4472
4473 /*
4474@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4475 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4476 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4477 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4478- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4479- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4480+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4481+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4482 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4483 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4484+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4485+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4486 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4487- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4488- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4489+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4490+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4491+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4492+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4493 }
4494 }
4495
4496@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4497 if (cpu_arch >= CPU_ARCH_ARMv6) {
4498 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4499 /* Non-cacheable Normal is XCB = 001 */
4500- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4501+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4502+ PMD_SECT_BUFFERED;
4503+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4504 PMD_SECT_BUFFERED;
4505 } else {
4506 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4507- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4508+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4509+ PMD_SECT_TEX(1);
4510+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4511 PMD_SECT_TEX(1);
4512 }
4513 } else {
4514- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4515+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4516+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4517 }
4518
4519 #ifdef CONFIG_ARM_LPAE
4520@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4521 user_pgprot |= PTE_EXT_PXN;
4522 #endif
4523
4524+ user_pgprot |= __supported_pte_mask;
4525+
4526 for (i = 0; i < 16; i++) {
4527 pteval_t v = pgprot_val(protection_map[i]);
4528 protection_map[i] = __pgprot(v | user_pgprot);
4529@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4530
4531 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4532 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4533- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4534- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4535+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4536+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4537 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4538 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4539+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4540+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4541 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4542- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4543- mem_types[MT_ROM].prot_sect |= cp->pmd;
4544+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4545+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4546+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4547
4548 switch (cp->pmd) {
4549 case PMD_SECT_WT:
4550- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4551+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4552 break;
4553 case PMD_SECT_WB:
4554 case PMD_SECT_WBWA:
4555- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4556+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4557 break;
4558 }
4559 pr_info("Memory policy: %sData cache %s\n",
4560@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4561 return;
4562 }
4563
4564- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4565+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4566 md->virtual >= PAGE_OFFSET &&
4567 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4568 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4569@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4570 * called function. This means you can't use any function or debugging
4571 * method which may touch any device, otherwise the kernel _will_ crash.
4572 */
4573+
4574+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4575+
4576 static void __init devicemaps_init(const struct machine_desc *mdesc)
4577 {
4578 struct map_desc map;
4579 unsigned long addr;
4580- void *vectors;
4581
4582- /*
4583- * Allocate the vector page early.
4584- */
4585- vectors = early_alloc(PAGE_SIZE * 2);
4586-
4587- early_trap_init(vectors);
4588+ early_trap_init(&vectors);
4589
4590 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4591 pmd_clear(pmd_off_k(addr));
4592@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4593 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4594 map.virtual = MODULES_VADDR;
4595 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4596- map.type = MT_ROM;
4597+ map.type = MT_ROM_RX;
4598 create_mapping(&map);
4599 #endif
4600
4601@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4602 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4603 map.virtual = FLUSH_BASE;
4604 map.length = SZ_1M;
4605- map.type = MT_CACHECLEAN;
4606+ map.type = MT_CACHECLEAN_RO;
4607 create_mapping(&map);
4608 #endif
4609 #ifdef FLUSH_BASE_MINICACHE
4610 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4611 map.virtual = FLUSH_BASE_MINICACHE;
4612 map.length = SZ_1M;
4613- map.type = MT_MINICLEAN;
4614+ map.type = MT_MINICLEAN_RO;
4615 create_mapping(&map);
4616 #endif
4617
4618@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4619 * location (0xffff0000). If we aren't using high-vectors, also
4620 * create a mapping at the low-vectors virtual address.
4621 */
4622- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4623+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4624 map.virtual = 0xffff0000;
4625 map.length = PAGE_SIZE;
4626 #ifdef CONFIG_KUSER_HELPERS
4627@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4628 static void __init map_lowmem(void)
4629 {
4630 struct memblock_region *reg;
4631+#ifndef CONFIG_PAX_KERNEXEC
4632 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4633 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4634+#endif
4635
4636 /* Map all the lowmem memory banks. */
4637 for_each_memblock(memory, reg) {
4638@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4639 if (start >= end)
4640 break;
4641
4642+#ifdef CONFIG_PAX_KERNEXEC
4643+ map.pfn = __phys_to_pfn(start);
4644+ map.virtual = __phys_to_virt(start);
4645+ map.length = end - start;
4646+
4647+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4648+ struct map_desc kernel;
4649+ struct map_desc initmap;
4650+
4651+ /* when freeing initmem we will make this RW */
4652+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4653+ initmap.virtual = (unsigned long)__init_begin;
4654+ initmap.length = _sdata - __init_begin;
4655+ initmap.type = __MT_MEMORY_RWX;
4656+ create_mapping(&initmap);
4657+
4658+ /* when freeing initmem we will make this RX */
4659+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4660+ kernel.virtual = (unsigned long)_stext;
4661+ kernel.length = __init_begin - _stext;
4662+ kernel.type = __MT_MEMORY_RWX;
4663+ create_mapping(&kernel);
4664+
4665+ if (map.virtual < (unsigned long)_stext) {
4666+ map.length = (unsigned long)_stext - map.virtual;
4667+ map.type = __MT_MEMORY_RWX;
4668+ create_mapping(&map);
4669+ }
4670+
4671+ map.pfn = __phys_to_pfn(__pa(_sdata));
4672+ map.virtual = (unsigned long)_sdata;
4673+ map.length = end - __pa(_sdata);
4674+ }
4675+
4676+ map.type = MT_MEMORY_RW;
4677+ create_mapping(&map);
4678+#else
4679 if (end < kernel_x_start) {
4680 map.pfn = __phys_to_pfn(start);
4681 map.virtual = __phys_to_virt(start);
4682 map.length = end - start;
4683- map.type = MT_MEMORY_RWX;
4684+ map.type = __MT_MEMORY_RWX;
4685
4686 create_mapping(&map);
4687 } else if (start >= kernel_x_end) {
4688@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4689 map.pfn = __phys_to_pfn(kernel_x_start);
4690 map.virtual = __phys_to_virt(kernel_x_start);
4691 map.length = kernel_x_end - kernel_x_start;
4692- map.type = MT_MEMORY_RWX;
4693+ map.type = __MT_MEMORY_RWX;
4694
4695 create_mapping(&map);
4696
4697@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4698 create_mapping(&map);
4699 }
4700 }
4701+#endif
4702 }
4703 }
4704
4705diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4706index f412b53..fc89433 100644
4707--- a/arch/arm/net/bpf_jit_32.c
4708+++ b/arch/arm/net/bpf_jit_32.c
4709@@ -20,6 +20,7 @@
4710 #include <asm/cacheflush.h>
4711 #include <asm/hwcap.h>
4712 #include <asm/opcodes.h>
4713+#include <asm/pgtable.h>
4714
4715 #include "bpf_jit_32.h"
4716
4717@@ -71,7 +72,11 @@ struct jit_ctx {
4718 #endif
4719 };
4720
4721+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4722+int bpf_jit_enable __read_only;
4723+#else
4724 int bpf_jit_enable __read_mostly;
4725+#endif
4726
4727 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4728 {
4729@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4730 {
4731 u32 *ptr;
4732 /* We are guaranteed to have aligned memory. */
4733+ pax_open_kernel();
4734 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4735 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4736+ pax_close_kernel();
4737 }
4738
4739 static void build_prologue(struct jit_ctx *ctx)
4740diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4741index 5b217f4..c23f40e 100644
4742--- a/arch/arm/plat-iop/setup.c
4743+++ b/arch/arm/plat-iop/setup.c
4744@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4745 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4746 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4747 .length = IOP3XX_PERIPHERAL_SIZE,
4748- .type = MT_UNCACHED,
4749+ .type = MT_UNCACHED_RW,
4750 },
4751 };
4752
4753diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4754index a5bc92d..0bb4730 100644
4755--- a/arch/arm/plat-omap/sram.c
4756+++ b/arch/arm/plat-omap/sram.c
4757@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4758 * Looks like we need to preserve some bootloader code at the
4759 * beginning of SRAM for jumping to flash for reboot to work...
4760 */
4761+ pax_open_kernel();
4762 memset_io(omap_sram_base + omap_sram_skip, 0,
4763 omap_sram_size - omap_sram_skip);
4764+ pax_close_kernel();
4765 }
4766diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
4767index 7047051..44e8675 100644
4768--- a/arch/arm64/include/asm/atomic.h
4769+++ b/arch/arm64/include/asm/atomic.h
4770@@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
4771 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
4772 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
4773
4774+#define atomic64_read_unchecked(v) atomic64_read(v)
4775+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4776+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4777+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4778+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4779+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4780+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4781+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4782+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4783+
4784 #endif
4785 #endif
4786diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4787index a5abb00..9cbca9a 100644
4788--- a/arch/arm64/include/asm/barrier.h
4789+++ b/arch/arm64/include/asm/barrier.h
4790@@ -44,7 +44,7 @@
4791 do { \
4792 compiletime_assert_atomic_type(*p); \
4793 barrier(); \
4794- ACCESS_ONCE(*p) = (v); \
4795+ ACCESS_ONCE_RW(*p) = (v); \
4796 } while (0)
4797
4798 #define smp_load_acquire(p) \
4799diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4800index 4fde8c1..441f84f 100644
4801--- a/arch/arm64/include/asm/percpu.h
4802+++ b/arch/arm64/include/asm/percpu.h
4803@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4804 {
4805 switch (size) {
4806 case 1:
4807- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4808+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4809 break;
4810 case 2:
4811- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4812+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4813 break;
4814 case 4:
4815- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4816+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4817 break;
4818 case 8:
4819- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4820+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4821 break;
4822 default:
4823 BUILD_BUG();
4824diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
4825index e20df38..027ede3 100644
4826--- a/arch/arm64/include/asm/pgalloc.h
4827+++ b/arch/arm64/include/asm/pgalloc.h
4828@@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4829 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
4830 }
4831
4832+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4833+{
4834+ pud_populate(mm, pud, pmd);
4835+}
4836+
4837 #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
4838
4839 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
4840diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4841index 07e1ba44..ec8cbbb 100644
4842--- a/arch/arm64/include/asm/uaccess.h
4843+++ b/arch/arm64/include/asm/uaccess.h
4844@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4845 flag; \
4846 })
4847
4848+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4849 #define access_ok(type, addr, size) __range_ok(addr, size)
4850 #define user_addr_max get_fs
4851
4852diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
4853index b0bd4e5..54e82f6 100644
4854--- a/arch/arm64/mm/dma-mapping.c
4855+++ b/arch/arm64/mm/dma-mapping.c
4856@@ -134,7 +134,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
4857 phys_to_page(paddr),
4858 size >> PAGE_SHIFT);
4859 if (!freed)
4860- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
4861+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
4862 }
4863
4864 static void *__dma_alloc(struct device *dev, size_t size,
4865diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4866index c3a58a1..78fbf54 100644
4867--- a/arch/avr32/include/asm/cache.h
4868+++ b/arch/avr32/include/asm/cache.h
4869@@ -1,8 +1,10 @@
4870 #ifndef __ASM_AVR32_CACHE_H
4871 #define __ASM_AVR32_CACHE_H
4872
4873+#include <linux/const.h>
4874+
4875 #define L1_CACHE_SHIFT 5
4876-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4877+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4878
4879 /*
4880 * Memory returned by kmalloc() may be used for DMA, so we must make
4881diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4882index d232888..87c8df1 100644
4883--- a/arch/avr32/include/asm/elf.h
4884+++ b/arch/avr32/include/asm/elf.h
4885@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4886 the loader. We need to make sure that it is out of the way of the program
4887 that it will "exec", and that there is sufficient room for the brk. */
4888
4889-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4890+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4891
4892+#ifdef CONFIG_PAX_ASLR
4893+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4894+
4895+#define PAX_DELTA_MMAP_LEN 15
4896+#define PAX_DELTA_STACK_LEN 15
4897+#endif
4898
4899 /* This yields a mask that user programs can use to figure out what
4900 instruction set this CPU supports. This could be done in user space,
4901diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4902index 479330b..53717a8 100644
4903--- a/arch/avr32/include/asm/kmap_types.h
4904+++ b/arch/avr32/include/asm/kmap_types.h
4905@@ -2,9 +2,9 @@
4906 #define __ASM_AVR32_KMAP_TYPES_H
4907
4908 #ifdef CONFIG_DEBUG_HIGHMEM
4909-# define KM_TYPE_NR 29
4910+# define KM_TYPE_NR 30
4911 #else
4912-# define KM_TYPE_NR 14
4913+# define KM_TYPE_NR 15
4914 #endif
4915
4916 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4917diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4918index d223a8b..69c5210 100644
4919--- a/arch/avr32/mm/fault.c
4920+++ b/arch/avr32/mm/fault.c
4921@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4922
4923 int exception_trace = 1;
4924
4925+#ifdef CONFIG_PAX_PAGEEXEC
4926+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4927+{
4928+ unsigned long i;
4929+
4930+ printk(KERN_ERR "PAX: bytes at PC: ");
4931+ for (i = 0; i < 20; i++) {
4932+ unsigned char c;
4933+ if (get_user(c, (unsigned char *)pc+i))
4934+ printk(KERN_CONT "???????? ");
4935+ else
4936+ printk(KERN_CONT "%02x ", c);
4937+ }
4938+ printk("\n");
4939+}
4940+#endif
4941+
4942 /*
4943 * This routine handles page faults. It determines the address and the
4944 * problem, and then passes it off to one of the appropriate routines.
4945@@ -178,6 +195,16 @@ bad_area:
4946 up_read(&mm->mmap_sem);
4947
4948 if (user_mode(regs)) {
4949+
4950+#ifdef CONFIG_PAX_PAGEEXEC
4951+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4952+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4953+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4954+ do_group_exit(SIGKILL);
4955+ }
4956+ }
4957+#endif
4958+
4959 if (exception_trace && printk_ratelimit())
4960 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4961 "sp %08lx ecr %lu\n",
4962diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4963index 568885a..f8008df 100644
4964--- a/arch/blackfin/include/asm/cache.h
4965+++ b/arch/blackfin/include/asm/cache.h
4966@@ -7,6 +7,7 @@
4967 #ifndef __ARCH_BLACKFIN_CACHE_H
4968 #define __ARCH_BLACKFIN_CACHE_H
4969
4970+#include <linux/const.h>
4971 #include <linux/linkage.h> /* for asmlinkage */
4972
4973 /*
4974@@ -14,7 +15,7 @@
4975 * Blackfin loads 32 bytes for cache
4976 */
4977 #define L1_CACHE_SHIFT 5
4978-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4979+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4980 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4981
4982 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4983diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4984index aea2718..3639a60 100644
4985--- a/arch/cris/include/arch-v10/arch/cache.h
4986+++ b/arch/cris/include/arch-v10/arch/cache.h
4987@@ -1,8 +1,9 @@
4988 #ifndef _ASM_ARCH_CACHE_H
4989 #define _ASM_ARCH_CACHE_H
4990
4991+#include <linux/const.h>
4992 /* Etrax 100LX have 32-byte cache-lines. */
4993-#define L1_CACHE_BYTES 32
4994 #define L1_CACHE_SHIFT 5
4995+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4996
4997 #endif /* _ASM_ARCH_CACHE_H */
4998diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4999index 7caf25d..ee65ac5 100644
5000--- a/arch/cris/include/arch-v32/arch/cache.h
5001+++ b/arch/cris/include/arch-v32/arch/cache.h
5002@@ -1,11 +1,12 @@
5003 #ifndef _ASM_CRIS_ARCH_CACHE_H
5004 #define _ASM_CRIS_ARCH_CACHE_H
5005
5006+#include <linux/const.h>
5007 #include <arch/hwregs/dma.h>
5008
5009 /* A cache-line is 32 bytes. */
5010-#define L1_CACHE_BYTES 32
5011 #define L1_CACHE_SHIFT 5
5012+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5013
5014 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5015
5016diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5017index 102190a..5334cea 100644
5018--- a/arch/frv/include/asm/atomic.h
5019+++ b/arch/frv/include/asm/atomic.h
5020@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5021 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5022 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5023
5024+#define atomic64_read_unchecked(v) atomic64_read(v)
5025+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5026+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5027+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5028+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5029+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5030+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5031+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5032+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5033+
5034 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5035 {
5036 int c, old;
5037diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5038index 2797163..c2a401df9 100644
5039--- a/arch/frv/include/asm/cache.h
5040+++ b/arch/frv/include/asm/cache.h
5041@@ -12,10 +12,11 @@
5042 #ifndef __ASM_CACHE_H
5043 #define __ASM_CACHE_H
5044
5045+#include <linux/const.h>
5046
5047 /* bytes per L1 cache line */
5048 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5049-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5050+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5051
5052 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5053 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5054diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5055index 43901f2..0d8b865 100644
5056--- a/arch/frv/include/asm/kmap_types.h
5057+++ b/arch/frv/include/asm/kmap_types.h
5058@@ -2,6 +2,6 @@
5059 #ifndef _ASM_KMAP_TYPES_H
5060 #define _ASM_KMAP_TYPES_H
5061
5062-#define KM_TYPE_NR 17
5063+#define KM_TYPE_NR 18
5064
5065 #endif
5066diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5067index 836f147..4cf23f5 100644
5068--- a/arch/frv/mm/elf-fdpic.c
5069+++ b/arch/frv/mm/elf-fdpic.c
5070@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5071 {
5072 struct vm_area_struct *vma;
5073 struct vm_unmapped_area_info info;
5074+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5075
5076 if (len > TASK_SIZE)
5077 return -ENOMEM;
5078@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5079 if (addr) {
5080 addr = PAGE_ALIGN(addr);
5081 vma = find_vma(current->mm, addr);
5082- if (TASK_SIZE - len >= addr &&
5083- (!vma || addr + len <= vma->vm_start))
5084+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5085 goto success;
5086 }
5087
5088@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5089 info.high_limit = (current->mm->start_stack - 0x00200000);
5090 info.align_mask = 0;
5091 info.align_offset = 0;
5092+ info.threadstack_offset = offset;
5093 addr = vm_unmapped_area(&info);
5094 if (!(addr & ~PAGE_MASK))
5095 goto success;
5096diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5097index 69952c18..4fa2908 100644
5098--- a/arch/hexagon/include/asm/cache.h
5099+++ b/arch/hexagon/include/asm/cache.h
5100@@ -21,9 +21,11 @@
5101 #ifndef __ASM_CACHE_H
5102 #define __ASM_CACHE_H
5103
5104+#include <linux/const.h>
5105+
5106 /* Bytes per L1 cache line */
5107-#define L1_CACHE_SHIFT (5)
5108-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5109+#define L1_CACHE_SHIFT 5
5110+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5111
5112 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5113
5114diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5115index 074e52b..76afdac 100644
5116--- a/arch/ia64/Kconfig
5117+++ b/arch/ia64/Kconfig
5118@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5119 config KEXEC
5120 bool "kexec system call"
5121 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5122+ depends on !GRKERNSEC_KMEM
5123 help
5124 kexec is a system call that implements the ability to shutdown your
5125 current kernel, and to start another kernel. It is like a reboot
5126diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5127index 970d0bd..e750b9b 100644
5128--- a/arch/ia64/Makefile
5129+++ b/arch/ia64/Makefile
5130@@ -98,5 +98,6 @@ endef
5131 archprepare: make_nr_irqs_h FORCE
5132 PHONY += make_nr_irqs_h FORCE
5133
5134+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5135 make_nr_irqs_h: FORCE
5136 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5137diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5138index 0bf0350..2ad1957 100644
5139--- a/arch/ia64/include/asm/atomic.h
5140+++ b/arch/ia64/include/asm/atomic.h
5141@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5142 #define atomic64_inc(v) atomic64_add(1, (v))
5143 #define atomic64_dec(v) atomic64_sub(1, (v))
5144
5145+#define atomic64_read_unchecked(v) atomic64_read(v)
5146+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5147+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5148+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5149+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5150+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5151+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5152+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5153+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5154+
5155 #endif /* _ASM_IA64_ATOMIC_H */
5156diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5157index f6769eb..1cdb590 100644
5158--- a/arch/ia64/include/asm/barrier.h
5159+++ b/arch/ia64/include/asm/barrier.h
5160@@ -66,7 +66,7 @@
5161 do { \
5162 compiletime_assert_atomic_type(*p); \
5163 barrier(); \
5164- ACCESS_ONCE(*p) = (v); \
5165+ ACCESS_ONCE_RW(*p) = (v); \
5166 } while (0)
5167
5168 #define smp_load_acquire(p) \
5169diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5170index 988254a..e1ee885 100644
5171--- a/arch/ia64/include/asm/cache.h
5172+++ b/arch/ia64/include/asm/cache.h
5173@@ -1,6 +1,7 @@
5174 #ifndef _ASM_IA64_CACHE_H
5175 #define _ASM_IA64_CACHE_H
5176
5177+#include <linux/const.h>
5178
5179 /*
5180 * Copyright (C) 1998-2000 Hewlett-Packard Co
5181@@ -9,7 +10,7 @@
5182
5183 /* Bytes per L1 (data) cache line. */
5184 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5185-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5186+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5187
5188 #ifdef CONFIG_SMP
5189 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5190diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5191index 5a83c5c..4d7f553 100644
5192--- a/arch/ia64/include/asm/elf.h
5193+++ b/arch/ia64/include/asm/elf.h
5194@@ -42,6 +42,13 @@
5195 */
5196 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5197
5198+#ifdef CONFIG_PAX_ASLR
5199+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5200+
5201+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5202+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5203+#endif
5204+
5205 #define PT_IA_64_UNWIND 0x70000001
5206
5207 /* IA-64 relocations: */
5208diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5209index 5767cdf..7462574 100644
5210--- a/arch/ia64/include/asm/pgalloc.h
5211+++ b/arch/ia64/include/asm/pgalloc.h
5212@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5213 pgd_val(*pgd_entry) = __pa(pud);
5214 }
5215
5216+static inline void
5217+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5218+{
5219+ pgd_populate(mm, pgd_entry, pud);
5220+}
5221+
5222 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5223 {
5224 return quicklist_alloc(0, GFP_KERNEL, NULL);
5225@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5226 pud_val(*pud_entry) = __pa(pmd);
5227 }
5228
5229+static inline void
5230+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5231+{
5232+ pud_populate(mm, pud_entry, pmd);
5233+}
5234+
5235 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5236 {
5237 return quicklist_alloc(0, GFP_KERNEL, NULL);
5238diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5239index 7b6f880..ac8e008 100644
5240--- a/arch/ia64/include/asm/pgtable.h
5241+++ b/arch/ia64/include/asm/pgtable.h
5242@@ -12,7 +12,7 @@
5243 * David Mosberger-Tang <davidm@hpl.hp.com>
5244 */
5245
5246-
5247+#include <linux/const.h>
5248 #include <asm/mman.h>
5249 #include <asm/page.h>
5250 #include <asm/processor.h>
5251@@ -139,6 +139,17 @@
5252 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5253 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5254 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5255+
5256+#ifdef CONFIG_PAX_PAGEEXEC
5257+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5258+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5259+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5260+#else
5261+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5262+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5263+# define PAGE_COPY_NOEXEC PAGE_COPY
5264+#endif
5265+
5266 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5267 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5268 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5269diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5270index 45698cd..e8e2dbc 100644
5271--- a/arch/ia64/include/asm/spinlock.h
5272+++ b/arch/ia64/include/asm/spinlock.h
5273@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5274 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5275
5276 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5277- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5278+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5279 }
5280
5281 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5282diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5283index 4f3fb6cc..254055e 100644
5284--- a/arch/ia64/include/asm/uaccess.h
5285+++ b/arch/ia64/include/asm/uaccess.h
5286@@ -70,6 +70,7 @@
5287 && ((segment).seg == KERNEL_DS.seg \
5288 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5289 })
5290+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5291 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5292
5293 /*
5294@@ -241,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5295 static inline unsigned long
5296 __copy_to_user (void __user *to, const void *from, unsigned long count)
5297 {
5298+ if (count > INT_MAX)
5299+ return count;
5300+
5301+ if (!__builtin_constant_p(count))
5302+ check_object_size(from, count, true);
5303+
5304 return __copy_user(to, (__force void __user *) from, count);
5305 }
5306
5307 static inline unsigned long
5308 __copy_from_user (void *to, const void __user *from, unsigned long count)
5309 {
5310+ if (count > INT_MAX)
5311+ return count;
5312+
5313+ if (!__builtin_constant_p(count))
5314+ check_object_size(to, count, false);
5315+
5316 return __copy_user((__force void __user *) to, from, count);
5317 }
5318
5319@@ -256,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5320 ({ \
5321 void __user *__cu_to = (to); \
5322 const void *__cu_from = (from); \
5323- long __cu_len = (n); \
5324+ unsigned long __cu_len = (n); \
5325 \
5326- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5327+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5328+ if (!__builtin_constant_p(n)) \
5329+ check_object_size(__cu_from, __cu_len, true); \
5330 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5331+ } \
5332 __cu_len; \
5333 })
5334
5335@@ -267,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5336 ({ \
5337 void *__cu_to = (to); \
5338 const void __user *__cu_from = (from); \
5339- long __cu_len = (n); \
5340+ unsigned long __cu_len = (n); \
5341 \
5342 __chk_user_ptr(__cu_from); \
5343- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5344+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5345+ if (!__builtin_constant_p(n)) \
5346+ check_object_size(__cu_to, __cu_len, false); \
5347 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5348+ } \
5349 __cu_len; \
5350 })
5351
5352diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5353index 29754aa..06d2838 100644
5354--- a/arch/ia64/kernel/module.c
5355+++ b/arch/ia64/kernel/module.c
5356@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5357 }
5358
5359 static inline int
5360+in_init_rx (const struct module *mod, uint64_t addr)
5361+{
5362+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5363+}
5364+
5365+static inline int
5366+in_init_rw (const struct module *mod, uint64_t addr)
5367+{
5368+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5369+}
5370+
5371+static inline int
5372 in_init (const struct module *mod, uint64_t addr)
5373 {
5374- return addr - (uint64_t) mod->module_init < mod->init_size;
5375+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5376+}
5377+
5378+static inline int
5379+in_core_rx (const struct module *mod, uint64_t addr)
5380+{
5381+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5382+}
5383+
5384+static inline int
5385+in_core_rw (const struct module *mod, uint64_t addr)
5386+{
5387+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5388 }
5389
5390 static inline int
5391 in_core (const struct module *mod, uint64_t addr)
5392 {
5393- return addr - (uint64_t) mod->module_core < mod->core_size;
5394+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5395 }
5396
5397 static inline int
5398@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5399 break;
5400
5401 case RV_BDREL:
5402- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5403+ if (in_init_rx(mod, val))
5404+ val -= (uint64_t) mod->module_init_rx;
5405+ else if (in_init_rw(mod, val))
5406+ val -= (uint64_t) mod->module_init_rw;
5407+ else if (in_core_rx(mod, val))
5408+ val -= (uint64_t) mod->module_core_rx;
5409+ else if (in_core_rw(mod, val))
5410+ val -= (uint64_t) mod->module_core_rw;
5411 break;
5412
5413 case RV_LTV:
5414@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5415 * addresses have been selected...
5416 */
5417 uint64_t gp;
5418- if (mod->core_size > MAX_LTOFF)
5419+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5420 /*
5421 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5422 * at the end of the module.
5423 */
5424- gp = mod->core_size - MAX_LTOFF / 2;
5425+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5426 else
5427- gp = mod->core_size / 2;
5428- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5429+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5430+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5431 mod->arch.gp = gp;
5432 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5433 }
5434diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5435index c39c3cd..3c77738 100644
5436--- a/arch/ia64/kernel/palinfo.c
5437+++ b/arch/ia64/kernel/palinfo.c
5438@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5439 return NOTIFY_OK;
5440 }
5441
5442-static struct notifier_block __refdata palinfo_cpu_notifier =
5443+static struct notifier_block palinfo_cpu_notifier =
5444 {
5445 .notifier_call = palinfo_cpu_callback,
5446 .priority = 0,
5447diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5448index 41e33f8..65180b2a 100644
5449--- a/arch/ia64/kernel/sys_ia64.c
5450+++ b/arch/ia64/kernel/sys_ia64.c
5451@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5452 unsigned long align_mask = 0;
5453 struct mm_struct *mm = current->mm;
5454 struct vm_unmapped_area_info info;
5455+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5456
5457 if (len > RGN_MAP_LIMIT)
5458 return -ENOMEM;
5459@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5460 if (REGION_NUMBER(addr) == RGN_HPAGE)
5461 addr = 0;
5462 #endif
5463+
5464+#ifdef CONFIG_PAX_RANDMMAP
5465+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5466+ addr = mm->free_area_cache;
5467+ else
5468+#endif
5469+
5470 if (!addr)
5471 addr = TASK_UNMAPPED_BASE;
5472
5473@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5474 info.high_limit = TASK_SIZE;
5475 info.align_mask = align_mask;
5476 info.align_offset = 0;
5477+ info.threadstack_offset = offset;
5478 return vm_unmapped_area(&info);
5479 }
5480
5481diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5482index 84f8a52..7c76178 100644
5483--- a/arch/ia64/kernel/vmlinux.lds.S
5484+++ b/arch/ia64/kernel/vmlinux.lds.S
5485@@ -192,7 +192,7 @@ SECTIONS {
5486 /* Per-cpu data: */
5487 . = ALIGN(PERCPU_PAGE_SIZE);
5488 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5489- __phys_per_cpu_start = __per_cpu_load;
5490+ __phys_per_cpu_start = per_cpu_load;
5491 /*
5492 * ensure percpu data fits
5493 * into percpu page size
5494diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5495index ba5ba7a..36e9d3a 100644
5496--- a/arch/ia64/mm/fault.c
5497+++ b/arch/ia64/mm/fault.c
5498@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5499 return pte_present(pte);
5500 }
5501
5502+#ifdef CONFIG_PAX_PAGEEXEC
5503+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5504+{
5505+ unsigned long i;
5506+
5507+ printk(KERN_ERR "PAX: bytes at PC: ");
5508+ for (i = 0; i < 8; i++) {
5509+ unsigned int c;
5510+ if (get_user(c, (unsigned int *)pc+i))
5511+ printk(KERN_CONT "???????? ");
5512+ else
5513+ printk(KERN_CONT "%08x ", c);
5514+ }
5515+ printk("\n");
5516+}
5517+#endif
5518+
5519 # define VM_READ_BIT 0
5520 # define VM_WRITE_BIT 1
5521 # define VM_EXEC_BIT 2
5522@@ -151,8 +168,21 @@ retry:
5523 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5524 goto bad_area;
5525
5526- if ((vma->vm_flags & mask) != mask)
5527+ if ((vma->vm_flags & mask) != mask) {
5528+
5529+#ifdef CONFIG_PAX_PAGEEXEC
5530+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5531+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5532+ goto bad_area;
5533+
5534+ up_read(&mm->mmap_sem);
5535+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5536+ do_group_exit(SIGKILL);
5537+ }
5538+#endif
5539+
5540 goto bad_area;
5541+ }
5542
5543 /*
5544 * If for any reason at all we couldn't handle the fault, make
5545diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5546index 52b7604b..455cb85 100644
5547--- a/arch/ia64/mm/hugetlbpage.c
5548+++ b/arch/ia64/mm/hugetlbpage.c
5549@@ -143,6 +143,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5550 unsigned long pgoff, unsigned long flags)
5551 {
5552 struct vm_unmapped_area_info info;
5553+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5554
5555 if (len > RGN_MAP_LIMIT)
5556 return -ENOMEM;
5557@@ -166,6 +167,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5558 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5559 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5560 info.align_offset = 0;
5561+ info.threadstack_offset = offset;
5562 return vm_unmapped_area(&info);
5563 }
5564
5565diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5566index 6b33457..88b5124 100644
5567--- a/arch/ia64/mm/init.c
5568+++ b/arch/ia64/mm/init.c
5569@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5570 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5571 vma->vm_end = vma->vm_start + PAGE_SIZE;
5572 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5573+
5574+#ifdef CONFIG_PAX_PAGEEXEC
5575+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5576+ vma->vm_flags &= ~VM_EXEC;
5577+
5578+#ifdef CONFIG_PAX_MPROTECT
5579+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5580+ vma->vm_flags &= ~VM_MAYEXEC;
5581+#endif
5582+
5583+ }
5584+#endif
5585+
5586 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5587 down_write(&current->mm->mmap_sem);
5588 if (insert_vm_struct(current->mm, vma)) {
5589@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5590 gate_vma.vm_start = FIXADDR_USER_START;
5591 gate_vma.vm_end = FIXADDR_USER_END;
5592 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5593- gate_vma.vm_page_prot = __P101;
5594+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5595
5596 return 0;
5597 }
5598diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5599index 40b3ee98..8c2c112 100644
5600--- a/arch/m32r/include/asm/cache.h
5601+++ b/arch/m32r/include/asm/cache.h
5602@@ -1,8 +1,10 @@
5603 #ifndef _ASM_M32R_CACHE_H
5604 #define _ASM_M32R_CACHE_H
5605
5606+#include <linux/const.h>
5607+
5608 /* L1 cache line size */
5609 #define L1_CACHE_SHIFT 4
5610-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5611+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5612
5613 #endif /* _ASM_M32R_CACHE_H */
5614diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5615index 82abd15..d95ae5d 100644
5616--- a/arch/m32r/lib/usercopy.c
5617+++ b/arch/m32r/lib/usercopy.c
5618@@ -14,6 +14,9 @@
5619 unsigned long
5620 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5621 {
5622+ if ((long)n < 0)
5623+ return n;
5624+
5625 prefetch(from);
5626 if (access_ok(VERIFY_WRITE, to, n))
5627 __copy_user(to,from,n);
5628@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5629 unsigned long
5630 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5631 {
5632+ if ((long)n < 0)
5633+ return n;
5634+
5635 prefetchw(to);
5636 if (access_ok(VERIFY_READ, from, n))
5637 __copy_user_zeroing(to,from,n);
5638diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5639index 0395c51..5f26031 100644
5640--- a/arch/m68k/include/asm/cache.h
5641+++ b/arch/m68k/include/asm/cache.h
5642@@ -4,9 +4,11 @@
5643 #ifndef __ARCH_M68K_CACHE_H
5644 #define __ARCH_M68K_CACHE_H
5645
5646+#include <linux/const.h>
5647+
5648 /* bytes per L1 cache line */
5649 #define L1_CACHE_SHIFT 4
5650-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5651+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5652
5653 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5654
5655diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5656index d703d8e..a8e2d70 100644
5657--- a/arch/metag/include/asm/barrier.h
5658+++ b/arch/metag/include/asm/barrier.h
5659@@ -90,7 +90,7 @@ static inline void fence(void)
5660 do { \
5661 compiletime_assert_atomic_type(*p); \
5662 smp_mb(); \
5663- ACCESS_ONCE(*p) = (v); \
5664+ ACCESS_ONCE_RW(*p) = (v); \
5665 } while (0)
5666
5667 #define smp_load_acquire(p) \
5668diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5669index 7ca80ac..794ba72 100644
5670--- a/arch/metag/mm/hugetlbpage.c
5671+++ b/arch/metag/mm/hugetlbpage.c
5672@@ -194,6 +194,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5673 info.high_limit = TASK_SIZE;
5674 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5675 info.align_offset = 0;
5676+ info.threadstack_offset = 0;
5677 return vm_unmapped_area(&info);
5678 }
5679
5680diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5681index 4efe96a..60e8699 100644
5682--- a/arch/microblaze/include/asm/cache.h
5683+++ b/arch/microblaze/include/asm/cache.h
5684@@ -13,11 +13,12 @@
5685 #ifndef _ASM_MICROBLAZE_CACHE_H
5686 #define _ASM_MICROBLAZE_CACHE_H
5687
5688+#include <linux/const.h>
5689 #include <asm/registers.h>
5690
5691 #define L1_CACHE_SHIFT 5
5692 /* word-granular cache in microblaze */
5693-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5694+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5695
5696 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5697
5698diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5699index 1a313c4..f27b613 100644
5700--- a/arch/mips/Kconfig
5701+++ b/arch/mips/Kconfig
5702@@ -2504,6 +2504,7 @@ source "kernel/Kconfig.preempt"
5703
5704 config KEXEC
5705 bool "Kexec system call"
5706+ depends on !GRKERNSEC_KMEM
5707 help
5708 kexec is a system call that implements the ability to shutdown your
5709 current kernel, and to start another kernel. It is like a reboot
5710diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5711index d8960d4..77dbd31 100644
5712--- a/arch/mips/cavium-octeon/dma-octeon.c
5713+++ b/arch/mips/cavium-octeon/dma-octeon.c
5714@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5715 if (dma_release_from_coherent(dev, order, vaddr))
5716 return;
5717
5718- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5719+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5720 }
5721
5722 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5723diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5724index 26d4363..3c9a82e 100644
5725--- a/arch/mips/include/asm/atomic.h
5726+++ b/arch/mips/include/asm/atomic.h
5727@@ -22,15 +22,39 @@
5728 #include <asm/cmpxchg.h>
5729 #include <asm/war.h>
5730
5731+#ifdef CONFIG_GENERIC_ATOMIC64
5732+#include <asm-generic/atomic64.h>
5733+#endif
5734+
5735 #define ATOMIC_INIT(i) { (i) }
5736
5737+#ifdef CONFIG_64BIT
5738+#define _ASM_EXTABLE(from, to) \
5739+" .section __ex_table,\"a\"\n" \
5740+" .dword " #from ", " #to"\n" \
5741+" .previous\n"
5742+#else
5743+#define _ASM_EXTABLE(from, to) \
5744+" .section __ex_table,\"a\"\n" \
5745+" .word " #from ", " #to"\n" \
5746+" .previous\n"
5747+#endif
5748+
5749 /*
5750 * atomic_read - read atomic variable
5751 * @v: pointer of type atomic_t
5752 *
5753 * Atomically reads the value of @v.
5754 */
5755-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5756+static inline int atomic_read(const atomic_t *v)
5757+{
5758+ return ACCESS_ONCE(v->counter);
5759+}
5760+
5761+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5762+{
5763+ return ACCESS_ONCE(v->counter);
5764+}
5765
5766 /*
5767 * atomic_set - set atomic variable
5768@@ -39,47 +63,77 @@
5769 *
5770 * Atomically sets the value of @v to @i.
5771 */
5772-#define atomic_set(v, i) ((v)->counter = (i))
5773+static inline void atomic_set(atomic_t *v, int i)
5774+{
5775+ v->counter = i;
5776+}
5777
5778-#define ATOMIC_OP(op, c_op, asm_op) \
5779-static __inline__ void atomic_##op(int i, atomic_t * v) \
5780+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5781+{
5782+ v->counter = i;
5783+}
5784+
5785+#ifdef CONFIG_PAX_REFCOUNT
5786+#define __OVERFLOW_POST \
5787+ " b 4f \n" \
5788+ " .set noreorder \n" \
5789+ "3: b 5f \n" \
5790+ " move %0, %1 \n" \
5791+ " .set reorder \n"
5792+#define __OVERFLOW_EXTABLE \
5793+ "3:\n" \
5794+ _ASM_EXTABLE(2b, 3b)
5795+#else
5796+#define __OVERFLOW_POST
5797+#define __OVERFLOW_EXTABLE
5798+#endif
5799+
5800+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5801+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5802 { \
5803 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5804 int temp; \
5805 \
5806 __asm__ __volatile__( \
5807- " .set arch=r4000 \n" \
5808- "1: ll %0, %1 # atomic_" #op " \n" \
5809- " " #asm_op " %0, %2 \n" \
5810+ " .set mips3 \n" \
5811+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5812+ "2: " #asm_op " %0, %2 \n" \
5813 " sc %0, %1 \n" \
5814 " beqzl %0, 1b \n" \
5815+ extable \
5816 " .set mips0 \n" \
5817 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5818 : "Ir" (i)); \
5819 } else if (kernel_uses_llsc) { \
5820 int temp; \
5821 \
5822- do { \
5823- __asm__ __volatile__( \
5824- " .set "MIPS_ISA_LEVEL" \n" \
5825- " ll %0, %1 # atomic_" #op "\n" \
5826- " " #asm_op " %0, %2 \n" \
5827- " sc %0, %1 \n" \
5828- " .set mips0 \n" \
5829- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5830- : "Ir" (i)); \
5831- } while (unlikely(!temp)); \
5832+ __asm__ __volatile__( \
5833+ " .set "MIPS_ISA_LEVEL" \n" \
5834+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5835+ "2: " #asm_op " %0, %2 \n" \
5836+ " sc %0, %1 \n" \
5837+ " beqz %0, 1b \n" \
5838+ extable \
5839+ " .set mips0 \n" \
5840+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5841+ : "Ir" (i)); \
5842 } else { \
5843 unsigned long flags; \
5844 \
5845 raw_local_irq_save(flags); \
5846- v->counter c_op i; \
5847+ __asm__ __volatile__( \
5848+ "2: " #asm_op " %0, %1 \n" \
5849+ extable \
5850+ : "+r" (v->counter) : "Ir" (i)); \
5851 raw_local_irq_restore(flags); \
5852 } \
5853 }
5854
5855-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5856-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5857+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
5858+ __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
5859+
5860+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5861+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5862 { \
5863 int result; \
5864 \
5865@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5866 int temp; \
5867 \
5868 __asm__ __volatile__( \
5869- " .set arch=r4000 \n" \
5870- "1: ll %1, %2 # atomic_" #op "_return \n" \
5871- " " #asm_op " %0, %1, %3 \n" \
5872+ " .set mips3 \n" \
5873+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5874+ "2: " #asm_op " %0, %1, %3 \n" \
5875 " sc %0, %2 \n" \
5876 " beqzl %0, 1b \n" \
5877- " " #asm_op " %0, %1, %3 \n" \
5878+ post_op \
5879+ extable \
5880+ "4: " #asm_op " %0, %1, %3 \n" \
5881+ "5: \n" \
5882 " .set mips0 \n" \
5883 : "=&r" (result), "=&r" (temp), \
5884 "+" GCC_OFF_SMALL_ASM() (v->counter) \
5885@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5886 } else if (kernel_uses_llsc) { \
5887 int temp; \
5888 \
5889- do { \
5890- __asm__ __volatile__( \
5891- " .set "MIPS_ISA_LEVEL" \n" \
5892- " ll %1, %2 # atomic_" #op "_return \n" \
5893- " " #asm_op " %0, %1, %3 \n" \
5894- " sc %0, %2 \n" \
5895- " .set mips0 \n" \
5896- : "=&r" (result), "=&r" (temp), \
5897- "+" GCC_OFF_SMALL_ASM() (v->counter) \
5898- : "Ir" (i)); \
5899- } while (unlikely(!result)); \
5900+ __asm__ __volatile__( \
5901+ " .set "MIPS_ISA_LEVEL" \n" \
5902+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5903+ "2: " #asm_op " %0, %1, %3 \n" \
5904+ " sc %0, %2 \n" \
5905+ post_op \
5906+ extable \
5907+ "4: " #asm_op " %0, %1, %3 \n" \
5908+ "5: \n" \
5909+ " .set mips0 \n" \
5910+ : "=&r" (result), "=&r" (temp), \
5911+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
5912+ : "Ir" (i)); \
5913 \
5914 result = temp; result c_op i; \
5915 } else { \
5916 unsigned long flags; \
5917 \
5918 raw_local_irq_save(flags); \
5919- result = v->counter; \
5920- result c_op i; \
5921- v->counter = result; \
5922+ __asm__ __volatile__( \
5923+ " lw %0, %1 \n" \
5924+ "2: " #asm_op " %0, %1, %2 \n" \
5925+ " sw %0, %1 \n" \
5926+ "3: \n" \
5927+ extable \
5928+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5929+ : "Ir" (i)); \
5930 raw_local_irq_restore(flags); \
5931 } \
5932 \
5933@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5934 return result; \
5935 }
5936
5937-#define ATOMIC_OPS(op, c_op, asm_op) \
5938- ATOMIC_OP(op, c_op, asm_op) \
5939- ATOMIC_OP_RETURN(op, c_op, asm_op)
5940+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
5941+ __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5942
5943-ATOMIC_OPS(add, +=, addu)
5944-ATOMIC_OPS(sub, -=, subu)
5945+#define ATOMIC_OPS(op, asm_op) \
5946+ ATOMIC_OP(op, asm_op) \
5947+ ATOMIC_OP_RETURN(op, asm_op)
5948+
5949+ATOMIC_OPS(add, add)
5950+ATOMIC_OPS(sub, sub)
5951
5952 #undef ATOMIC_OPS
5953 #undef ATOMIC_OP_RETURN
5954+#undef __ATOMIC_OP_RETURN
5955 #undef ATOMIC_OP
5956+#undef __ATOMIC_OP
5957
5958 /*
5959 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5960@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5961 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5962 * The function returns the old value of @v minus @i.
5963 */
5964-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5965+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5966 {
5967 int result;
5968
5969@@ -159,7 +228,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5970 int temp;
5971
5972 __asm__ __volatile__(
5973- " .set arch=r4000 \n"
5974+ " .set "MIPS_ISA_LEVEL" \n"
5975 "1: ll %1, %2 # atomic_sub_if_positive\n"
5976 " subu %0, %1, %3 \n"
5977 " bltz %0, 1f \n"
5978@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5979 return result;
5980 }
5981
5982-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5983-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5984+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5985+{
5986+ return cmpxchg(&v->counter, old, new);
5987+}
5988+
5989+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5990+ int new)
5991+{
5992+ return cmpxchg(&(v->counter), old, new);
5993+}
5994+
5995+static inline int atomic_xchg(atomic_t *v, int new)
5996+{
5997+ return xchg(&v->counter, new);
5998+}
5999+
6000+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6001+{
6002+ return xchg(&(v->counter), new);
6003+}
6004
6005 /**
6006 * __atomic_add_unless - add unless the number is a given value
6007@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6008
6009 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6010 #define atomic_inc_return(v) atomic_add_return(1, (v))
6011+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6012+{
6013+ return atomic_add_return_unchecked(1, v);
6014+}
6015
6016 /*
6017 * atomic_sub_and_test - subtract value from variable and test result
6018@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6019 * other cases.
6020 */
6021 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6022+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6023+{
6024+ return atomic_add_return_unchecked(1, v) == 0;
6025+}
6026
6027 /*
6028 * atomic_dec_and_test - decrement by 1 and test
6029@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6030 * Atomically increments @v by 1.
6031 */
6032 #define atomic_inc(v) atomic_add(1, (v))
6033+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6034+{
6035+ atomic_add_unchecked(1, v);
6036+}
6037
6038 /*
6039 * atomic_dec - decrement and test
6040@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6041 * Atomically decrements @v by 1.
6042 */
6043 #define atomic_dec(v) atomic_sub(1, (v))
6044+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6045+{
6046+ atomic_sub_unchecked(1, v);
6047+}
6048
6049 /*
6050 * atomic_add_negative - add and test if negative
6051@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6052 * @v: pointer of type atomic64_t
6053 *
6054 */
6055-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6056+static inline long atomic64_read(const atomic64_t *v)
6057+{
6058+ return ACCESS_ONCE(v->counter);
6059+}
6060+
6061+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6062+{
6063+ return ACCESS_ONCE(v->counter);
6064+}
6065
6066 /*
6067 * atomic64_set - set atomic variable
6068 * @v: pointer of type atomic64_t
6069 * @i: required value
6070 */
6071-#define atomic64_set(v, i) ((v)->counter = (i))
6072+static inline void atomic64_set(atomic64_t *v, long i)
6073+{
6074+ v->counter = i;
6075+}
6076
6077-#define ATOMIC64_OP(op, c_op, asm_op) \
6078-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6079+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6080+{
6081+ v->counter = i;
6082+}
6083+
6084+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6085+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6086 { \
6087 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6088 long temp; \
6089 \
6090 __asm__ __volatile__( \
6091- " .set arch=r4000 \n" \
6092- "1: lld %0, %1 # atomic64_" #op " \n" \
6093- " " #asm_op " %0, %2 \n" \
6094+ " .set "MIPS_ISA_LEVEL" \n" \
6095+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6096+ "2: " #asm_op " %0, %2 \n" \
6097 " scd %0, %1 \n" \
6098 " beqzl %0, 1b \n" \
6099+ extable \
6100 " .set mips0 \n" \
6101 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6102 : "Ir" (i)); \
6103 } else if (kernel_uses_llsc) { \
6104 long temp; \
6105 \
6106- do { \
6107- __asm__ __volatile__( \
6108- " .set "MIPS_ISA_LEVEL" \n" \
6109- " lld %0, %1 # atomic64_" #op "\n" \
6110- " " #asm_op " %0, %2 \n" \
6111- " scd %0, %1 \n" \
6112- " .set mips0 \n" \
6113- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6114- : "Ir" (i)); \
6115- } while (unlikely(!temp)); \
6116+ __asm__ __volatile__( \
6117+ " .set "MIPS_ISA_LEVEL" \n" \
6118+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6119+ "2: " #asm_op " %0, %2 \n" \
6120+ " scd %0, %1 \n" \
6121+ " beqz %0, 1b \n" \
6122+ extable \
6123+ " .set mips0 \n" \
6124+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6125+ : "Ir" (i)); \
6126 } else { \
6127 unsigned long flags; \
6128 \
6129 raw_local_irq_save(flags); \
6130- v->counter c_op i; \
6131+ __asm__ __volatile__( \
6132+ "2: " #asm_op " %0, %1 \n" \
6133+ extable \
6134+ : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); \
6135 raw_local_irq_restore(flags); \
6136 } \
6137 }
6138
6139-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6140-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6141+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
6142+ __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
6143+
6144+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6145+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6146 { \
6147 long result; \
6148 \
6149@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6150 long temp; \
6151 \
6152 __asm__ __volatile__( \
6153- " .set arch=r4000 \n" \
6154+ " .set mips3 \n" \
6155 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6156- " " #asm_op " %0, %1, %3 \n" \
6157+ "2: " #asm_op " %0, %1, %3 \n" \
6158 " scd %0, %2 \n" \
6159 " beqzl %0, 1b \n" \
6160- " " #asm_op " %0, %1, %3 \n" \
6161+ post_op \
6162+ extable \
6163+ "4: " #asm_op " %0, %1, %3 \n" \
6164+ "5: \n" \
6165 " .set mips0 \n" \
6166 : "=&r" (result), "=&r" (temp), \
6167 "+" GCC_OFF_SMALL_ASM() (v->counter) \
6168@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6169 } else if (kernel_uses_llsc) { \
6170 long temp; \
6171 \
6172- do { \
6173- __asm__ __volatile__( \
6174- " .set "MIPS_ISA_LEVEL" \n" \
6175- " lld %1, %2 # atomic64_" #op "_return\n" \
6176- " " #asm_op " %0, %1, %3 \n" \
6177- " scd %0, %2 \n" \
6178- " .set mips0 \n" \
6179- : "=&r" (result), "=&r" (temp), \
6180- "=" GCC_OFF_SMALL_ASM() (v->counter) \
6181- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6182- : "memory"); \
6183- } while (unlikely(!result)); \
6184+ __asm__ __volatile__( \
6185+ " .set "MIPS_ISA_LEVEL" \n" \
6186+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6187+ "2: " #asm_op " %0, %1, %3 \n" \
6188+ " scd %0, %2 \n" \
6189+ " beqz %0, 1b \n" \
6190+ post_op \
6191+ extable \
6192+ "4: " #asm_op " %0, %1, %3 \n" \
6193+ "5: \n" \
6194+ " .set mips0 \n" \
6195+ : "=&r" (result), "=&r" (temp), \
6196+ "=" GCC_OFF_SMALL_ASM() (v->counter) \
6197+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6198+ : "memory"); \
6199 \
6200 result = temp; result c_op i; \
6201 } else { \
6202 unsigned long flags; \
6203 \
6204 raw_local_irq_save(flags); \
6205- result = v->counter; \
6206- result c_op i; \
6207- v->counter = result; \
6208+ __asm__ __volatile__( \
6209+ " ld %0, %1 \n" \
6210+ "2: " #asm_op " %0, %1, %2 \n" \
6211+ " sd %0, %1 \n" \
6212+ "3: \n" \
6213+ extable \
6214+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6215+ : "Ir" (i)); \
6216 raw_local_irq_restore(flags); \
6217 } \
6218 \
6219@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6220 return result; \
6221 }
6222
6223-#define ATOMIC64_OPS(op, c_op, asm_op) \
6224- ATOMIC64_OP(op, c_op, asm_op) \
6225- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6226+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
6227+ __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6228
6229-ATOMIC64_OPS(add, +=, daddu)
6230-ATOMIC64_OPS(sub, -=, dsubu)
6231+#define ATOMIC64_OPS(op, asm_op) \
6232+ ATOMIC64_OP(op, asm_op) \
6233+ ATOMIC64_OP_RETURN(op, asm_op)
6234+
6235+ATOMIC64_OPS(add, dadd)
6236+ATOMIC64_OPS(sub, dsub)
6237
6238 #undef ATOMIC64_OPS
6239 #undef ATOMIC64_OP_RETURN
6240+#undef __ATOMIC64_OP_RETURN
6241 #undef ATOMIC64_OP
6242+#undef __ATOMIC64_OP
6243+#undef __OVERFLOW_EXTABLE
6244+#undef __OVERFLOW_POST
6245
6246 /*
6247 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6248@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6249 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6250 * The function returns the old value of @v minus @i.
6251 */
6252-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6253+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6254 {
6255 long result;
6256
6257@@ -440,7 +584,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6258 long temp;
6259
6260 __asm__ __volatile__(
6261- " .set arch=r4000 \n"
6262+ " .set "MIPS_ISA_LEVEL" \n"
6263 "1: lld %1, %2 # atomic64_sub_if_positive\n"
6264 " dsubu %0, %1, %3 \n"
6265 " bltz %0, 1f \n"
6266@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6267 return result;
6268 }
6269
6270-#define atomic64_cmpxchg(v, o, n) \
6271- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6272-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6273+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6274+{
6275+ return cmpxchg(&v->counter, old, new);
6276+}
6277+
6278+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6279+ long new)
6280+{
6281+ return cmpxchg(&(v->counter), old, new);
6282+}
6283+
6284+static inline long atomic64_xchg(atomic64_t *v, long new)
6285+{
6286+ return xchg(&v->counter, new);
6287+}
6288+
6289+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6290+{
6291+ return xchg(&(v->counter), new);
6292+}
6293
6294 /**
6295 * atomic64_add_unless - add unless the number is a given value
6296@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6297
6298 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6299 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6300+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6301
6302 /*
6303 * atomic64_sub_and_test - subtract value from variable and test result
6304@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6305 * other cases.
6306 */
6307 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6308+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6309
6310 /*
6311 * atomic64_dec_and_test - decrement by 1 and test
6312@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6313 * Atomically increments @v by 1.
6314 */
6315 #define atomic64_inc(v) atomic64_add(1, (v))
6316+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6317
6318 /*
6319 * atomic64_dec - decrement and test
6320@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6321 * Atomically decrements @v by 1.
6322 */
6323 #define atomic64_dec(v) atomic64_sub(1, (v))
6324+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6325
6326 /*
6327 * atomic64_add_negative - add and test if negative
6328diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6329index 2b8bbbc..4556df6 100644
6330--- a/arch/mips/include/asm/barrier.h
6331+++ b/arch/mips/include/asm/barrier.h
6332@@ -133,7 +133,7 @@
6333 do { \
6334 compiletime_assert_atomic_type(*p); \
6335 smp_mb(); \
6336- ACCESS_ONCE(*p) = (v); \
6337+ ACCESS_ONCE_RW(*p) = (v); \
6338 } while (0)
6339
6340 #define smp_load_acquire(p) \
6341diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6342index b4db69f..8f3b093 100644
6343--- a/arch/mips/include/asm/cache.h
6344+++ b/arch/mips/include/asm/cache.h
6345@@ -9,10 +9,11 @@
6346 #ifndef _ASM_CACHE_H
6347 #define _ASM_CACHE_H
6348
6349+#include <linux/const.h>
6350 #include <kmalloc.h>
6351
6352 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6353-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6354+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6355
6356 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6357 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6358diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6359index 694925a..990fa62 100644
6360--- a/arch/mips/include/asm/elf.h
6361+++ b/arch/mips/include/asm/elf.h
6362@@ -410,15 +410,18 @@ extern const char *__elf_platform;
6363 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6364 #endif
6365
6366+#ifdef CONFIG_PAX_ASLR
6367+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6368+
6369+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6370+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6371+#endif
6372+
6373 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6374 struct linux_binprm;
6375 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6376 int uses_interp);
6377
6378-struct mm_struct;
6379-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6380-#define arch_randomize_brk arch_randomize_brk
6381-
6382 struct arch_elf_state {
6383 int fp_abi;
6384 int interp_fp_abi;
6385diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6386index c1f6afa..38cc6e9 100644
6387--- a/arch/mips/include/asm/exec.h
6388+++ b/arch/mips/include/asm/exec.h
6389@@ -12,6 +12,6 @@
6390 #ifndef _ASM_EXEC_H
6391 #define _ASM_EXEC_H
6392
6393-extern unsigned long arch_align_stack(unsigned long sp);
6394+#define arch_align_stack(x) ((x) & ~0xfUL)
6395
6396 #endif /* _ASM_EXEC_H */
6397diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6398index 9e8ef59..1139d6b 100644
6399--- a/arch/mips/include/asm/hw_irq.h
6400+++ b/arch/mips/include/asm/hw_irq.h
6401@@ -10,7 +10,7 @@
6402
6403 #include <linux/atomic.h>
6404
6405-extern atomic_t irq_err_count;
6406+extern atomic_unchecked_t irq_err_count;
6407
6408 /*
6409 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6410diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6411index 8feaed6..1bd8a64 100644
6412--- a/arch/mips/include/asm/local.h
6413+++ b/arch/mips/include/asm/local.h
6414@@ -13,15 +13,25 @@ typedef struct
6415 atomic_long_t a;
6416 } local_t;
6417
6418+typedef struct {
6419+ atomic_long_unchecked_t a;
6420+} local_unchecked_t;
6421+
6422 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6423
6424 #define local_read(l) atomic_long_read(&(l)->a)
6425+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6426 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6427+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6428
6429 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6430+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6431 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6432+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6433 #define local_inc(l) atomic_long_inc(&(l)->a)
6434+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6435 #define local_dec(l) atomic_long_dec(&(l)->a)
6436+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6437
6438 /*
6439 * Same as above, but return the result value
6440@@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6441 return result;
6442 }
6443
6444+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6445+{
6446+ unsigned long result;
6447+
6448+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6449+ unsigned long temp;
6450+
6451+ __asm__ __volatile__(
6452+ " .set mips3 \n"
6453+ "1:" __LL "%1, %2 # local_add_return \n"
6454+ " addu %0, %1, %3 \n"
6455+ __SC "%0, %2 \n"
6456+ " beqzl %0, 1b \n"
6457+ " addu %0, %1, %3 \n"
6458+ " .set mips0 \n"
6459+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6460+ : "Ir" (i), "m" (l->a.counter)
6461+ : "memory");
6462+ } else if (kernel_uses_llsc) {
6463+ unsigned long temp;
6464+
6465+ __asm__ __volatile__(
6466+ " .set mips3 \n"
6467+ "1:" __LL "%1, %2 # local_add_return \n"
6468+ " addu %0, %1, %3 \n"
6469+ __SC "%0, %2 \n"
6470+ " beqz %0, 1b \n"
6471+ " addu %0, %1, %3 \n"
6472+ " .set mips0 \n"
6473+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6474+ : "Ir" (i), "m" (l->a.counter)
6475+ : "memory");
6476+ } else {
6477+ unsigned long flags;
6478+
6479+ local_irq_save(flags);
6480+ result = l->a.counter;
6481+ result += i;
6482+ l->a.counter = result;
6483+ local_irq_restore(flags);
6484+ }
6485+
6486+ return result;
6487+}
6488+
6489 static __inline__ long local_sub_return(long i, local_t * l)
6490 {
6491 unsigned long result;
6492@@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6493
6494 #define local_cmpxchg(l, o, n) \
6495 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6496+#define local_cmpxchg_unchecked(l, o, n) \
6497+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6498 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6499
6500 /**
6501diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6502index 154b70a..426ae3d 100644
6503--- a/arch/mips/include/asm/page.h
6504+++ b/arch/mips/include/asm/page.h
6505@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6506 #ifdef CONFIG_CPU_MIPS32
6507 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6508 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6509- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6510+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6511 #else
6512 typedef struct { unsigned long long pte; } pte_t;
6513 #define pte_val(x) ((x).pte)
6514diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6515index b336037..5b874cc 100644
6516--- a/arch/mips/include/asm/pgalloc.h
6517+++ b/arch/mips/include/asm/pgalloc.h
6518@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6519 {
6520 set_pud(pud, __pud((unsigned long)pmd));
6521 }
6522+
6523+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6524+{
6525+ pud_populate(mm, pud, pmd);
6526+}
6527 #endif
6528
6529 /*
6530diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6531index f8f809f..b5f3fa4 100644
6532--- a/arch/mips/include/asm/pgtable.h
6533+++ b/arch/mips/include/asm/pgtable.h
6534@@ -20,6 +20,9 @@
6535 #include <asm/io.h>
6536 #include <asm/pgtable-bits.h>
6537
6538+#define ktla_ktva(addr) (addr)
6539+#define ktva_ktla(addr) (addr)
6540+
6541 struct mm_struct;
6542 struct vm_area_struct;
6543
6544diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6545index 55ed660..3dc9422 100644
6546--- a/arch/mips/include/asm/thread_info.h
6547+++ b/arch/mips/include/asm/thread_info.h
6548@@ -102,6 +102,9 @@ static inline struct thread_info *current_thread_info(void)
6549 #define TIF_SECCOMP 4 /* secure computing */
6550 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6551 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6552+/* li takes a 32bit immediate */
6553+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6554+
6555 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6556 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6557 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6558@@ -137,14 +140,16 @@ static inline struct thread_info *current_thread_info(void)
6559 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6560 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6561 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6562+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6563
6564 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6565 _TIF_SYSCALL_AUDIT | \
6566- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6567+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6568+ _TIF_GRSEC_SETXID)
6569
6570 /* work to do in syscall_trace_leave() */
6571 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6572- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6573+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6574
6575 /* work to do on interrupt/exception return */
6576 #define _TIF_WORK_MASK \
6577@@ -152,7 +157,7 @@ static inline struct thread_info *current_thread_info(void)
6578 /* work to do on any return to u-space */
6579 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6580 _TIF_WORK_SYSCALL_EXIT | \
6581- _TIF_SYSCALL_TRACEPOINT)
6582+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6583
6584 /*
6585 * We stash processor id into a COP0 register to retrieve it fast
6586diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6587index bf8b324..cec5705 100644
6588--- a/arch/mips/include/asm/uaccess.h
6589+++ b/arch/mips/include/asm/uaccess.h
6590@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6591 __ok == 0; \
6592 })
6593
6594+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6595 #define access_ok(type, addr, size) \
6596 likely(__access_ok((addr), (size), __access_mask))
6597
6598diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6599index 1188e00..41cf144 100644
6600--- a/arch/mips/kernel/binfmt_elfn32.c
6601+++ b/arch/mips/kernel/binfmt_elfn32.c
6602@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6603 #undef ELF_ET_DYN_BASE
6604 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6605
6606+#ifdef CONFIG_PAX_ASLR
6607+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6608+
6609+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6610+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6611+#endif
6612+
6613 #include <asm/processor.h>
6614 #include <linux/module.h>
6615 #include <linux/elfcore.h>
6616diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6617index 9287678..f870e47 100644
6618--- a/arch/mips/kernel/binfmt_elfo32.c
6619+++ b/arch/mips/kernel/binfmt_elfo32.c
6620@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6621 #undef ELF_ET_DYN_BASE
6622 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6623
6624+#ifdef CONFIG_PAX_ASLR
6625+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6626+
6627+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6628+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6629+#endif
6630+
6631 #include <asm/processor.h>
6632
6633 #include <linux/module.h>
6634diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6635index a74ec3a..4f06f18 100644
6636--- a/arch/mips/kernel/i8259.c
6637+++ b/arch/mips/kernel/i8259.c
6638@@ -202,7 +202,7 @@ spurious_8259A_irq:
6639 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6640 spurious_irq_mask |= irqmask;
6641 }
6642- atomic_inc(&irq_err_count);
6643+ atomic_inc_unchecked(&irq_err_count);
6644 /*
6645 * Theoretically we do not have to handle this IRQ,
6646 * but in Linux this does not cause problems and is
6647diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6648index 44a1f79..2bd6aa3 100644
6649--- a/arch/mips/kernel/irq-gt641xx.c
6650+++ b/arch/mips/kernel/irq-gt641xx.c
6651@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6652 }
6653 }
6654
6655- atomic_inc(&irq_err_count);
6656+ atomic_inc_unchecked(&irq_err_count);
6657 }
6658
6659 void __init gt641xx_irq_init(void)
6660diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6661index d2bfbc2..a8eacd2 100644
6662--- a/arch/mips/kernel/irq.c
6663+++ b/arch/mips/kernel/irq.c
6664@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6665 printk("unexpected IRQ # %d\n", irq);
6666 }
6667
6668-atomic_t irq_err_count;
6669+atomic_unchecked_t irq_err_count;
6670
6671 int arch_show_interrupts(struct seq_file *p, int prec)
6672 {
6673- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6674+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6675 return 0;
6676 }
6677
6678 asmlinkage void spurious_interrupt(void)
6679 {
6680- atomic_inc(&irq_err_count);
6681+ atomic_inc_unchecked(&irq_err_count);
6682 }
6683
6684 void __init init_IRQ(void)
6685@@ -109,7 +109,10 @@ void __init init_IRQ(void)
6686 #endif
6687 }
6688
6689+
6690 #ifdef DEBUG_STACKOVERFLOW
6691+extern void gr_handle_kernel_exploit(void);
6692+
6693 static inline void check_stack_overflow(void)
6694 {
6695 unsigned long sp;
6696@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
6697 printk("do_IRQ: stack overflow: %ld\n",
6698 sp - sizeof(struct thread_info));
6699 dump_stack();
6700+ gr_handle_kernel_exploit();
6701 }
6702 }
6703 #else
6704diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6705index 0614717..002fa43 100644
6706--- a/arch/mips/kernel/pm-cps.c
6707+++ b/arch/mips/kernel/pm-cps.c
6708@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6709 nc_core_ready_count = nc_addr;
6710
6711 /* Ensure ready_count is zero-initialised before the assembly runs */
6712- ACCESS_ONCE(*nc_core_ready_count) = 0;
6713+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6714 coupled_barrier(&per_cpu(pm_barrier, core), online);
6715
6716 /* Run the generated entry code */
6717diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6718index bf85cc1..b365c61 100644
6719--- a/arch/mips/kernel/process.c
6720+++ b/arch/mips/kernel/process.c
6721@@ -535,18 +535,6 @@ out:
6722 return pc;
6723 }
6724
6725-/*
6726- * Don't forget that the stack pointer must be aligned on a 8 bytes
6727- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6728- */
6729-unsigned long arch_align_stack(unsigned long sp)
6730-{
6731- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6732- sp -= get_random_int() & ~PAGE_MASK;
6733-
6734- return sp & ALMASK;
6735-}
6736-
6737 static void arch_dump_stack(void *info)
6738 {
6739 struct pt_regs *regs;
6740diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6741index 5104528..950bbdc 100644
6742--- a/arch/mips/kernel/ptrace.c
6743+++ b/arch/mips/kernel/ptrace.c
6744@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6745 return ret;
6746 }
6747
6748+#ifdef CONFIG_GRKERNSEC_SETXID
6749+extern void gr_delayed_cred_worker(void);
6750+#endif
6751+
6752 /*
6753 * Notification of system call entry/exit
6754 * - triggered by current->work.syscall_trace
6755@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6756 tracehook_report_syscall_entry(regs))
6757 ret = -1;
6758
6759+#ifdef CONFIG_GRKERNSEC_SETXID
6760+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6761+ gr_delayed_cred_worker();
6762+#endif
6763+
6764 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6765 trace_sys_enter(regs, regs->regs[2]);
6766
6767diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6768index 07fc524..b9d7f28 100644
6769--- a/arch/mips/kernel/reset.c
6770+++ b/arch/mips/kernel/reset.c
6771@@ -13,6 +13,7 @@
6772 #include <linux/reboot.h>
6773
6774 #include <asm/reboot.h>
6775+#include <asm/bug.h>
6776
6777 /*
6778 * Urgs ... Too many MIPS machines to handle this in a generic way.
6779@@ -29,16 +30,19 @@ void machine_restart(char *command)
6780 {
6781 if (_machine_restart)
6782 _machine_restart(command);
6783+ BUG();
6784 }
6785
6786 void machine_halt(void)
6787 {
6788 if (_machine_halt)
6789 _machine_halt();
6790+ BUG();
6791 }
6792
6793 void machine_power_off(void)
6794 {
6795 if (pm_power_off)
6796 pm_power_off();
6797+ BUG();
6798 }
6799diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6800index 2242bdd..b284048 100644
6801--- a/arch/mips/kernel/sync-r4k.c
6802+++ b/arch/mips/kernel/sync-r4k.c
6803@@ -18,8 +18,8 @@
6804 #include <asm/mipsregs.h>
6805
6806 static atomic_t count_start_flag = ATOMIC_INIT(0);
6807-static atomic_t count_count_start = ATOMIC_INIT(0);
6808-static atomic_t count_count_stop = ATOMIC_INIT(0);
6809+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6810+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6811 static atomic_t count_reference = ATOMIC_INIT(0);
6812
6813 #define COUNTON 100
6814@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6815
6816 for (i = 0; i < NR_LOOPS; i++) {
6817 /* slaves loop on '!= 2' */
6818- while (atomic_read(&count_count_start) != 1)
6819+ while (atomic_read_unchecked(&count_count_start) != 1)
6820 mb();
6821- atomic_set(&count_count_stop, 0);
6822+ atomic_set_unchecked(&count_count_stop, 0);
6823 smp_wmb();
6824
6825 /* this lets the slaves write their count register */
6826- atomic_inc(&count_count_start);
6827+ atomic_inc_unchecked(&count_count_start);
6828
6829 /*
6830 * Everyone initialises count in the last loop:
6831@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6832 /*
6833 * Wait for all slaves to leave the synchronization point:
6834 */
6835- while (atomic_read(&count_count_stop) != 1)
6836+ while (atomic_read_unchecked(&count_count_stop) != 1)
6837 mb();
6838- atomic_set(&count_count_start, 0);
6839+ atomic_set_unchecked(&count_count_start, 0);
6840 smp_wmb();
6841- atomic_inc(&count_count_stop);
6842+ atomic_inc_unchecked(&count_count_stop);
6843 }
6844 /* Arrange for an interrupt in a short while */
6845 write_c0_compare(read_c0_count() + COUNTON);
6846@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6847 initcount = atomic_read(&count_reference);
6848
6849 for (i = 0; i < NR_LOOPS; i++) {
6850- atomic_inc(&count_count_start);
6851- while (atomic_read(&count_count_start) != 2)
6852+ atomic_inc_unchecked(&count_count_start);
6853+ while (atomic_read_unchecked(&count_count_start) != 2)
6854 mb();
6855
6856 /*
6857@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6858 if (i == NR_LOOPS-1)
6859 write_c0_count(initcount);
6860
6861- atomic_inc(&count_count_stop);
6862- while (atomic_read(&count_count_stop) != 2)
6863+ atomic_inc_unchecked(&count_count_stop);
6864+ while (atomic_read_unchecked(&count_count_stop) != 2)
6865 mb();
6866 }
6867 /* Arrange for an interrupt in a short while */
6868diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6869index 33984c0..666a96d 100644
6870--- a/arch/mips/kernel/traps.c
6871+++ b/arch/mips/kernel/traps.c
6872@@ -689,7 +689,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6873 siginfo_t info;
6874
6875 prev_state = exception_enter();
6876- die_if_kernel("Integer overflow", regs);
6877+ if (unlikely(!user_mode(regs))) {
6878+
6879+#ifdef CONFIG_PAX_REFCOUNT
6880+ if (fixup_exception(regs)) {
6881+ pax_report_refcount_overflow(regs);
6882+ exception_exit(prev_state);
6883+ return;
6884+ }
6885+#endif
6886+
6887+ die("Integer overflow", regs);
6888+ }
6889
6890 info.si_code = FPE_INTOVF;
6891 info.si_signo = SIGFPE;
6892diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6893index f5e7dda..47198ec 100644
6894--- a/arch/mips/kvm/mips.c
6895+++ b/arch/mips/kvm/mips.c
6896@@ -816,7 +816,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6897 return r;
6898 }
6899
6900-int kvm_arch_init(void *opaque)
6901+int kvm_arch_init(const void *opaque)
6902 {
6903 if (kvm_mips_callbacks) {
6904 kvm_err("kvm: module already exists\n");
6905diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6906index 7ff8637..6004edb 100644
6907--- a/arch/mips/mm/fault.c
6908+++ b/arch/mips/mm/fault.c
6909@@ -31,6 +31,23 @@
6910
6911 int show_unhandled_signals = 1;
6912
6913+#ifdef CONFIG_PAX_PAGEEXEC
6914+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6915+{
6916+ unsigned long i;
6917+
6918+ printk(KERN_ERR "PAX: bytes at PC: ");
6919+ for (i = 0; i < 5; i++) {
6920+ unsigned int c;
6921+ if (get_user(c, (unsigned int *)pc+i))
6922+ printk(KERN_CONT "???????? ");
6923+ else
6924+ printk(KERN_CONT "%08x ", c);
6925+ }
6926+ printk("\n");
6927+}
6928+#endif
6929+
6930 /*
6931 * This routine handles page faults. It determines the address,
6932 * and the problem, and then passes it off to one of the appropriate
6933@@ -206,6 +223,14 @@ bad_area:
6934 bad_area_nosemaphore:
6935 /* User mode accesses just cause a SIGSEGV */
6936 if (user_mode(regs)) {
6937+
6938+#ifdef CONFIG_PAX_PAGEEXEC
6939+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6940+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6941+ do_group_exit(SIGKILL);
6942+ }
6943+#endif
6944+
6945 tsk->thread.cp0_badvaddr = address;
6946 tsk->thread.error_code = write;
6947 if (show_unhandled_signals &&
6948diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6949index f1baadd..5472dca 100644
6950--- a/arch/mips/mm/mmap.c
6951+++ b/arch/mips/mm/mmap.c
6952@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6953 struct vm_area_struct *vma;
6954 unsigned long addr = addr0;
6955 int do_color_align;
6956+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6957 struct vm_unmapped_area_info info;
6958
6959 if (unlikely(len > TASK_SIZE))
6960@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6961 do_color_align = 1;
6962
6963 /* requesting a specific address */
6964+
6965+#ifdef CONFIG_PAX_RANDMMAP
6966+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6967+#endif
6968+
6969 if (addr) {
6970 if (do_color_align)
6971 addr = COLOUR_ALIGN(addr, pgoff);
6972@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6973 addr = PAGE_ALIGN(addr);
6974
6975 vma = find_vma(mm, addr);
6976- if (TASK_SIZE - len >= addr &&
6977- (!vma || addr + len <= vma->vm_start))
6978+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6979 return addr;
6980 }
6981
6982 info.length = len;
6983 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6984 info.align_offset = pgoff << PAGE_SHIFT;
6985+ info.threadstack_offset = offset;
6986
6987 if (dir == DOWN) {
6988 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6989@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6990 {
6991 unsigned long random_factor = 0UL;
6992
6993+#ifdef CONFIG_PAX_RANDMMAP
6994+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6995+#endif
6996+
6997 if (current->flags & PF_RANDOMIZE) {
6998 random_factor = get_random_int();
6999 random_factor = random_factor << PAGE_SHIFT;
7000@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7001
7002 if (mmap_is_legacy()) {
7003 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7004+
7005+#ifdef CONFIG_PAX_RANDMMAP
7006+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7007+ mm->mmap_base += mm->delta_mmap;
7008+#endif
7009+
7010 mm->get_unmapped_area = arch_get_unmapped_area;
7011 } else {
7012 mm->mmap_base = mmap_base(random_factor);
7013+
7014+#ifdef CONFIG_PAX_RANDMMAP
7015+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7016+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7017+#endif
7018+
7019 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7020 }
7021 }
7022
7023-static inline unsigned long brk_rnd(void)
7024-{
7025- unsigned long rnd = get_random_int();
7026-
7027- rnd = rnd << PAGE_SHIFT;
7028- /* 8MB for 32bit, 256MB for 64bit */
7029- if (TASK_IS_32BIT_ADDR)
7030- rnd = rnd & 0x7ffffful;
7031- else
7032- rnd = rnd & 0xffffffful;
7033-
7034- return rnd;
7035-}
7036-
7037-unsigned long arch_randomize_brk(struct mm_struct *mm)
7038-{
7039- unsigned long base = mm->brk;
7040- unsigned long ret;
7041-
7042- ret = PAGE_ALIGN(base + brk_rnd());
7043-
7044- if (ret < mm->brk)
7045- return mm->brk;
7046-
7047- return ret;
7048-}
7049-
7050 int __virt_addr_valid(const volatile void *kaddr)
7051 {
7052 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7053diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7054index a2358b4..7cead4f 100644
7055--- a/arch/mips/sgi-ip27/ip27-nmi.c
7056+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7057@@ -187,9 +187,9 @@ void
7058 cont_nmi_dump(void)
7059 {
7060 #ifndef REAL_NMI_SIGNAL
7061- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7062+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7063
7064- atomic_inc(&nmied_cpus);
7065+ atomic_inc_unchecked(&nmied_cpus);
7066 #endif
7067 /*
7068 * Only allow 1 cpu to proceed
7069@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7070 udelay(10000);
7071 }
7072 #else
7073- while (atomic_read(&nmied_cpus) != num_online_cpus());
7074+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7075 #endif
7076
7077 /*
7078diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7079index a046b30..6799527 100644
7080--- a/arch/mips/sni/rm200.c
7081+++ b/arch/mips/sni/rm200.c
7082@@ -270,7 +270,7 @@ spurious_8259A_irq:
7083 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7084 spurious_irq_mask |= irqmask;
7085 }
7086- atomic_inc(&irq_err_count);
7087+ atomic_inc_unchecked(&irq_err_count);
7088 /*
7089 * Theoretically we do not have to handle this IRQ,
7090 * but in Linux this does not cause problems and is
7091diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7092index 41e873b..34d33a7 100644
7093--- a/arch/mips/vr41xx/common/icu.c
7094+++ b/arch/mips/vr41xx/common/icu.c
7095@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7096
7097 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7098
7099- atomic_inc(&irq_err_count);
7100+ atomic_inc_unchecked(&irq_err_count);
7101
7102 return -1;
7103 }
7104diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7105index ae0e4ee..e8f0692 100644
7106--- a/arch/mips/vr41xx/common/irq.c
7107+++ b/arch/mips/vr41xx/common/irq.c
7108@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7109 irq_cascade_t *cascade;
7110
7111 if (irq >= NR_IRQS) {
7112- atomic_inc(&irq_err_count);
7113+ atomic_inc_unchecked(&irq_err_count);
7114 return;
7115 }
7116
7117@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7118 ret = cascade->get_irq(irq);
7119 irq = ret;
7120 if (ret < 0)
7121- atomic_inc(&irq_err_count);
7122+ atomic_inc_unchecked(&irq_err_count);
7123 else
7124 irq_dispatch(irq);
7125 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7126diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7127index 967d144..db12197 100644
7128--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7129+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7130@@ -11,12 +11,14 @@
7131 #ifndef _ASM_PROC_CACHE_H
7132 #define _ASM_PROC_CACHE_H
7133
7134+#include <linux/const.h>
7135+
7136 /* L1 cache */
7137
7138 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7139 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7140-#define L1_CACHE_BYTES 16 /* bytes per entry */
7141 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7142+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7143 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7144
7145 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7146diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7147index bcb5df2..84fabd2 100644
7148--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7149+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7150@@ -16,13 +16,15 @@
7151 #ifndef _ASM_PROC_CACHE_H
7152 #define _ASM_PROC_CACHE_H
7153
7154+#include <linux/const.h>
7155+
7156 /*
7157 * L1 cache
7158 */
7159 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7160 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7161-#define L1_CACHE_BYTES 32 /* bytes per entry */
7162 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7163+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7164 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7165
7166 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7167diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7168index 4ce7a01..449202a 100644
7169--- a/arch/openrisc/include/asm/cache.h
7170+++ b/arch/openrisc/include/asm/cache.h
7171@@ -19,11 +19,13 @@
7172 #ifndef __ASM_OPENRISC_CACHE_H
7173 #define __ASM_OPENRISC_CACHE_H
7174
7175+#include <linux/const.h>
7176+
7177 /* FIXME: How can we replace these with values from the CPU...
7178 * they shouldn't be hard-coded!
7179 */
7180
7181-#define L1_CACHE_BYTES 16
7182 #define L1_CACHE_SHIFT 4
7183+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7184
7185 #endif /* __ASM_OPENRISC_CACHE_H */
7186diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7187index 226f8ca..9d9b87d 100644
7188--- a/arch/parisc/include/asm/atomic.h
7189+++ b/arch/parisc/include/asm/atomic.h
7190@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7191 return dec;
7192 }
7193
7194+#define atomic64_read_unchecked(v) atomic64_read(v)
7195+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7196+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7197+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7198+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7199+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7200+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7201+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7202+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7203+
7204 #endif /* !CONFIG_64BIT */
7205
7206
7207diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7208index 47f11c7..3420df2 100644
7209--- a/arch/parisc/include/asm/cache.h
7210+++ b/arch/parisc/include/asm/cache.h
7211@@ -5,6 +5,7 @@
7212 #ifndef __ARCH_PARISC_CACHE_H
7213 #define __ARCH_PARISC_CACHE_H
7214
7215+#include <linux/const.h>
7216
7217 /*
7218 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7219@@ -15,13 +16,13 @@
7220 * just ruin performance.
7221 */
7222 #ifdef CONFIG_PA20
7223-#define L1_CACHE_BYTES 64
7224 #define L1_CACHE_SHIFT 6
7225 #else
7226-#define L1_CACHE_BYTES 32
7227 #define L1_CACHE_SHIFT 5
7228 #endif
7229
7230+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7231+
7232 #ifndef __ASSEMBLY__
7233
7234 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7235diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7236index 3391d06..c23a2cc 100644
7237--- a/arch/parisc/include/asm/elf.h
7238+++ b/arch/parisc/include/asm/elf.h
7239@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7240
7241 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7242
7243+#ifdef CONFIG_PAX_ASLR
7244+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7245+
7246+#define PAX_DELTA_MMAP_LEN 16
7247+#define PAX_DELTA_STACK_LEN 16
7248+#endif
7249+
7250 /* This yields a mask that user programs can use to figure out what
7251 instruction set this CPU supports. This could be done in user space,
7252 but it's not easy, and we've already done it here. */
7253diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7254index d174372..f27fe5c 100644
7255--- a/arch/parisc/include/asm/pgalloc.h
7256+++ b/arch/parisc/include/asm/pgalloc.h
7257@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7258 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7259 }
7260
7261+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7262+{
7263+ pgd_populate(mm, pgd, pmd);
7264+}
7265+
7266 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7267 {
7268 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7269@@ -96,6 +101,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7270 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7271 #define pmd_free(mm, x) do { } while (0)
7272 #define pgd_populate(mm, pmd, pte) BUG()
7273+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7274
7275 #endif
7276
7277diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7278index 15207b9..3209e65 100644
7279--- a/arch/parisc/include/asm/pgtable.h
7280+++ b/arch/parisc/include/asm/pgtable.h
7281@@ -215,6 +215,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7282 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7283 #define PAGE_COPY PAGE_EXECREAD
7284 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7285+
7286+#ifdef CONFIG_PAX_PAGEEXEC
7287+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7288+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7289+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7290+#else
7291+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7292+# define PAGE_COPY_NOEXEC PAGE_COPY
7293+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7294+#endif
7295+
7296 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7297 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7298 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7299diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7300index 0abdd4c..1af92f0 100644
7301--- a/arch/parisc/include/asm/uaccess.h
7302+++ b/arch/parisc/include/asm/uaccess.h
7303@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7304 const void __user *from,
7305 unsigned long n)
7306 {
7307- int sz = __compiletime_object_size(to);
7308+ size_t sz = __compiletime_object_size(to);
7309 int ret = -EFAULT;
7310
7311- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7312+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7313 ret = __copy_from_user(to, from, n);
7314 else
7315 copy_from_user_overflow();
7316diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7317index 3c63a82..b1d6ee9 100644
7318--- a/arch/parisc/kernel/module.c
7319+++ b/arch/parisc/kernel/module.c
7320@@ -98,16 +98,38 @@
7321
7322 /* three functions to determine where in the module core
7323 * or init pieces the location is */
7324+static inline int in_init_rx(struct module *me, void *loc)
7325+{
7326+ return (loc >= me->module_init_rx &&
7327+ loc < (me->module_init_rx + me->init_size_rx));
7328+}
7329+
7330+static inline int in_init_rw(struct module *me, void *loc)
7331+{
7332+ return (loc >= me->module_init_rw &&
7333+ loc < (me->module_init_rw + me->init_size_rw));
7334+}
7335+
7336 static inline int in_init(struct module *me, void *loc)
7337 {
7338- return (loc >= me->module_init &&
7339- loc <= (me->module_init + me->init_size));
7340+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7341+}
7342+
7343+static inline int in_core_rx(struct module *me, void *loc)
7344+{
7345+ return (loc >= me->module_core_rx &&
7346+ loc < (me->module_core_rx + me->core_size_rx));
7347+}
7348+
7349+static inline int in_core_rw(struct module *me, void *loc)
7350+{
7351+ return (loc >= me->module_core_rw &&
7352+ loc < (me->module_core_rw + me->core_size_rw));
7353 }
7354
7355 static inline int in_core(struct module *me, void *loc)
7356 {
7357- return (loc >= me->module_core &&
7358- loc <= (me->module_core + me->core_size));
7359+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7360 }
7361
7362 static inline int in_local(struct module *me, void *loc)
7363@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7364 }
7365
7366 /* align things a bit */
7367- me->core_size = ALIGN(me->core_size, 16);
7368- me->arch.got_offset = me->core_size;
7369- me->core_size += gots * sizeof(struct got_entry);
7370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7371+ me->arch.got_offset = me->core_size_rw;
7372+ me->core_size_rw += gots * sizeof(struct got_entry);
7373
7374- me->core_size = ALIGN(me->core_size, 16);
7375- me->arch.fdesc_offset = me->core_size;
7376- me->core_size += fdescs * sizeof(Elf_Fdesc);
7377+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7378+ me->arch.fdesc_offset = me->core_size_rw;
7379+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7380
7381 me->arch.got_max = gots;
7382 me->arch.fdesc_max = fdescs;
7383@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7384
7385 BUG_ON(value == 0);
7386
7387- got = me->module_core + me->arch.got_offset;
7388+ got = me->module_core_rw + me->arch.got_offset;
7389 for (i = 0; got[i].addr; i++)
7390 if (got[i].addr == value)
7391 goto out;
7392@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7393 #ifdef CONFIG_64BIT
7394 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7395 {
7396- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7397+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7398
7399 if (!value) {
7400 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7401@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7402
7403 /* Create new one */
7404 fdesc->addr = value;
7405- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7406+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7407 return (Elf_Addr)fdesc;
7408 }
7409 #endif /* CONFIG_64BIT */
7410@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7411
7412 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7413 end = table + sechdrs[me->arch.unwind_section].sh_size;
7414- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7415+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7416
7417 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7418 me->arch.unwind_section, table, end, gp);
7419diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7420index e1ffea2..46ed66e 100644
7421--- a/arch/parisc/kernel/sys_parisc.c
7422+++ b/arch/parisc/kernel/sys_parisc.c
7423@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7424 unsigned long task_size = TASK_SIZE;
7425 int do_color_align, last_mmap;
7426 struct vm_unmapped_area_info info;
7427+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7428
7429 if (len > task_size)
7430 return -ENOMEM;
7431@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7432 goto found_addr;
7433 }
7434
7435+#ifdef CONFIG_PAX_RANDMMAP
7436+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7437+#endif
7438+
7439 if (addr) {
7440 if (do_color_align && last_mmap)
7441 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7442@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7443 info.high_limit = mmap_upper_limit();
7444 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7445 info.align_offset = shared_align_offset(last_mmap, pgoff);
7446+ info.threadstack_offset = offset;
7447 addr = vm_unmapped_area(&info);
7448
7449 found_addr:
7450@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7451 unsigned long addr = addr0;
7452 int do_color_align, last_mmap;
7453 struct vm_unmapped_area_info info;
7454+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7455
7456 #ifdef CONFIG_64BIT
7457 /* This should only ever run for 32-bit processes. */
7458@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7459 }
7460
7461 /* requesting a specific address */
7462+#ifdef CONFIG_PAX_RANDMMAP
7463+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7464+#endif
7465+
7466 if (addr) {
7467 if (do_color_align && last_mmap)
7468 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7469@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7470 info.high_limit = mm->mmap_base;
7471 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7472 info.align_offset = shared_align_offset(last_mmap, pgoff);
7473+ info.threadstack_offset = offset;
7474 addr = vm_unmapped_area(&info);
7475 if (!(addr & ~PAGE_MASK))
7476 goto found_addr;
7477@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7478 mm->mmap_legacy_base = mmap_legacy_base();
7479 mm->mmap_base = mmap_upper_limit();
7480
7481+#ifdef CONFIG_PAX_RANDMMAP
7482+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7483+ mm->mmap_legacy_base += mm->delta_mmap;
7484+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7485+ }
7486+#endif
7487+
7488 if (mmap_is_legacy()) {
7489 mm->mmap_base = mm->mmap_legacy_base;
7490 mm->get_unmapped_area = arch_get_unmapped_area;
7491diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7492index 47ee620..1107387 100644
7493--- a/arch/parisc/kernel/traps.c
7494+++ b/arch/parisc/kernel/traps.c
7495@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7496
7497 down_read(&current->mm->mmap_sem);
7498 vma = find_vma(current->mm,regs->iaoq[0]);
7499- if (vma && (regs->iaoq[0] >= vma->vm_start)
7500- && (vma->vm_flags & VM_EXEC)) {
7501-
7502+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7503 fault_address = regs->iaoq[0];
7504 fault_space = regs->iasq[0];
7505
7506diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7507index e5120e6..8ddb5cc 100644
7508--- a/arch/parisc/mm/fault.c
7509+++ b/arch/parisc/mm/fault.c
7510@@ -15,6 +15,7 @@
7511 #include <linux/sched.h>
7512 #include <linux/interrupt.h>
7513 #include <linux/module.h>
7514+#include <linux/unistd.h>
7515
7516 #include <asm/uaccess.h>
7517 #include <asm/traps.h>
7518@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7519 static unsigned long
7520 parisc_acctyp(unsigned long code, unsigned int inst)
7521 {
7522- if (code == 6 || code == 16)
7523+ if (code == 6 || code == 7 || code == 16)
7524 return VM_EXEC;
7525
7526 switch (inst & 0xf0000000) {
7527@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7528 }
7529 #endif
7530
7531+#ifdef CONFIG_PAX_PAGEEXEC
7532+/*
7533+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7534+ *
7535+ * returns 1 when task should be killed
7536+ * 2 when rt_sigreturn trampoline was detected
7537+ * 3 when unpatched PLT trampoline was detected
7538+ */
7539+static int pax_handle_fetch_fault(struct pt_regs *regs)
7540+{
7541+
7542+#ifdef CONFIG_PAX_EMUPLT
7543+ int err;
7544+
7545+ do { /* PaX: unpatched PLT emulation */
7546+ unsigned int bl, depwi;
7547+
7548+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7549+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7550+
7551+ if (err)
7552+ break;
7553+
7554+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7555+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7556+
7557+ err = get_user(ldw, (unsigned int *)addr);
7558+ err |= get_user(bv, (unsigned int *)(addr+4));
7559+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7560+
7561+ if (err)
7562+ break;
7563+
7564+ if (ldw == 0x0E801096U &&
7565+ bv == 0xEAC0C000U &&
7566+ ldw2 == 0x0E881095U)
7567+ {
7568+ unsigned int resolver, map;
7569+
7570+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7571+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7572+ if (err)
7573+ break;
7574+
7575+ regs->gr[20] = instruction_pointer(regs)+8;
7576+ regs->gr[21] = map;
7577+ regs->gr[22] = resolver;
7578+ regs->iaoq[0] = resolver | 3UL;
7579+ regs->iaoq[1] = regs->iaoq[0] + 4;
7580+ return 3;
7581+ }
7582+ }
7583+ } while (0);
7584+#endif
7585+
7586+#ifdef CONFIG_PAX_EMUTRAMP
7587+
7588+#ifndef CONFIG_PAX_EMUSIGRT
7589+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7590+ return 1;
7591+#endif
7592+
7593+ do { /* PaX: rt_sigreturn emulation */
7594+ unsigned int ldi1, ldi2, bel, nop;
7595+
7596+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7597+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7598+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7599+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7600+
7601+ if (err)
7602+ break;
7603+
7604+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7605+ ldi2 == 0x3414015AU &&
7606+ bel == 0xE4008200U &&
7607+ nop == 0x08000240U)
7608+ {
7609+ regs->gr[25] = (ldi1 & 2) >> 1;
7610+ regs->gr[20] = __NR_rt_sigreturn;
7611+ regs->gr[31] = regs->iaoq[1] + 16;
7612+ regs->sr[0] = regs->iasq[1];
7613+ regs->iaoq[0] = 0x100UL;
7614+ regs->iaoq[1] = regs->iaoq[0] + 4;
7615+ regs->iasq[0] = regs->sr[2];
7616+ regs->iasq[1] = regs->sr[2];
7617+ return 2;
7618+ }
7619+ } while (0);
7620+#endif
7621+
7622+ return 1;
7623+}
7624+
7625+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7626+{
7627+ unsigned long i;
7628+
7629+ printk(KERN_ERR "PAX: bytes at PC: ");
7630+ for (i = 0; i < 5; i++) {
7631+ unsigned int c;
7632+ if (get_user(c, (unsigned int *)pc+i))
7633+ printk(KERN_CONT "???????? ");
7634+ else
7635+ printk(KERN_CONT "%08x ", c);
7636+ }
7637+ printk("\n");
7638+}
7639+#endif
7640+
7641 int fixup_exception(struct pt_regs *regs)
7642 {
7643 const struct exception_table_entry *fix;
7644@@ -234,8 +345,33 @@ retry:
7645
7646 good_area:
7647
7648- if ((vma->vm_flags & acc_type) != acc_type)
7649+ if ((vma->vm_flags & acc_type) != acc_type) {
7650+
7651+#ifdef CONFIG_PAX_PAGEEXEC
7652+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7653+ (address & ~3UL) == instruction_pointer(regs))
7654+ {
7655+ up_read(&mm->mmap_sem);
7656+ switch (pax_handle_fetch_fault(regs)) {
7657+
7658+#ifdef CONFIG_PAX_EMUPLT
7659+ case 3:
7660+ return;
7661+#endif
7662+
7663+#ifdef CONFIG_PAX_EMUTRAMP
7664+ case 2:
7665+ return;
7666+#endif
7667+
7668+ }
7669+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7670+ do_group_exit(SIGKILL);
7671+ }
7672+#endif
7673+
7674 goto bad_area;
7675+ }
7676
7677 /*
7678 * If for any reason at all we couldn't handle the fault, make
7679diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7680index 22b0940..309f790 100644
7681--- a/arch/powerpc/Kconfig
7682+++ b/arch/powerpc/Kconfig
7683@@ -409,6 +409,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7684 config KEXEC
7685 bool "kexec system call"
7686 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7687+ depends on !GRKERNSEC_KMEM
7688 help
7689 kexec is a system call that implements the ability to shutdown your
7690 current kernel, and to start another kernel. It is like a reboot
7691diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7692index 512d278..d31fadd 100644
7693--- a/arch/powerpc/include/asm/atomic.h
7694+++ b/arch/powerpc/include/asm/atomic.h
7695@@ -12,6 +12,11 @@
7696
7697 #define ATOMIC_INIT(i) { (i) }
7698
7699+#define _ASM_EXTABLE(from, to) \
7700+" .section __ex_table,\"a\"\n" \
7701+ PPC_LONG" " #from ", " #to"\n" \
7702+" .previous\n"
7703+
7704 static __inline__ int atomic_read(const atomic_t *v)
7705 {
7706 int t;
7707@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7708 return t;
7709 }
7710
7711+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7712+{
7713+ int t;
7714+
7715+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7716+
7717+ return t;
7718+}
7719+
7720 static __inline__ void atomic_set(atomic_t *v, int i)
7721 {
7722 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7723 }
7724
7725-#define ATOMIC_OP(op, asm_op) \
7726-static __inline__ void atomic_##op(int a, atomic_t *v) \
7727+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7728+{
7729+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7730+}
7731+
7732+#ifdef CONFIG_PAX_REFCOUNT
7733+#define __REFCOUNT_OP(op) op##o.
7734+#define __OVERFLOW_PRE \
7735+ " mcrxr cr0\n"
7736+#define __OVERFLOW_POST \
7737+ " bf 4*cr0+so, 3f\n" \
7738+ "2: .long 0x00c00b00\n" \
7739+ "3:\n"
7740+#define __OVERFLOW_EXTABLE \
7741+ "\n4:\n"
7742+ _ASM_EXTABLE(2b, 4b)
7743+#else
7744+#define __REFCOUNT_OP(op) op
7745+#define __OVERFLOW_PRE
7746+#define __OVERFLOW_POST
7747+#define __OVERFLOW_EXTABLE
7748+#endif
7749+
7750+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7751+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7752 { \
7753 int t; \
7754 \
7755 __asm__ __volatile__( \
7756-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7757+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7758+ pre_op \
7759 #asm_op " %0,%2,%0\n" \
7760+ post_op \
7761 PPC405_ERR77(0,%3) \
7762 " stwcx. %0,0,%3 \n" \
7763 " bne- 1b\n" \
7764+ extable \
7765 : "=&r" (t), "+m" (v->counter) \
7766 : "r" (a), "r" (&v->counter) \
7767 : "cc"); \
7768 } \
7769
7770-#define ATOMIC_OP_RETURN(op, asm_op) \
7771-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7772+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7773+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7774+
7775+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7776+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7777 { \
7778 int t; \
7779 \
7780 __asm__ __volatile__( \
7781 PPC_ATOMIC_ENTRY_BARRIER \
7782-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7783+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7784+ pre_op \
7785 #asm_op " %0,%1,%0\n" \
7786+ post_op \
7787 PPC405_ERR77(0,%2) \
7788 " stwcx. %0,0,%2 \n" \
7789 " bne- 1b\n" \
7790+ extable \
7791 PPC_ATOMIC_EXIT_BARRIER \
7792 : "=&r" (t) \
7793 : "r" (a), "r" (&v->counter) \
7794@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7795 return t; \
7796 }
7797
7798+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7799+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7800+
7801 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7802
7803 ATOMIC_OPS(add, add)
7804@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7805
7806 #undef ATOMIC_OPS
7807 #undef ATOMIC_OP_RETURN
7808+#undef __ATOMIC_OP_RETURN
7809 #undef ATOMIC_OP
7810+#undef __ATOMIC_OP
7811
7812 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7813
7814-static __inline__ void atomic_inc(atomic_t *v)
7815-{
7816- int t;
7817+/*
7818+ * atomic_inc - increment atomic variable
7819+ * @v: pointer of type atomic_t
7820+ *
7821+ * Automatically increments @v by 1
7822+ */
7823+#define atomic_inc(v) atomic_add(1, (v))
7824+#define atomic_inc_return(v) atomic_add_return(1, (v))
7825
7826- __asm__ __volatile__(
7827-"1: lwarx %0,0,%2 # atomic_inc\n\
7828- addic %0,%0,1\n"
7829- PPC405_ERR77(0,%2)
7830-" stwcx. %0,0,%2 \n\
7831- bne- 1b"
7832- : "=&r" (t), "+m" (v->counter)
7833- : "r" (&v->counter)
7834- : "cc", "xer");
7835+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7836+{
7837+ atomic_add_unchecked(1, v);
7838 }
7839
7840-static __inline__ int atomic_inc_return(atomic_t *v)
7841+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7842 {
7843- int t;
7844-
7845- __asm__ __volatile__(
7846- PPC_ATOMIC_ENTRY_BARRIER
7847-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7848- addic %0,%0,1\n"
7849- PPC405_ERR77(0,%1)
7850-" stwcx. %0,0,%1 \n\
7851- bne- 1b"
7852- PPC_ATOMIC_EXIT_BARRIER
7853- : "=&r" (t)
7854- : "r" (&v->counter)
7855- : "cc", "xer", "memory");
7856-
7857- return t;
7858+ return atomic_add_return_unchecked(1, v);
7859 }
7860
7861 /*
7862@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7863 */
7864 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7865
7866-static __inline__ void atomic_dec(atomic_t *v)
7867+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7868 {
7869- int t;
7870-
7871- __asm__ __volatile__(
7872-"1: lwarx %0,0,%2 # atomic_dec\n\
7873- addic %0,%0,-1\n"
7874- PPC405_ERR77(0,%2)\
7875-" stwcx. %0,0,%2\n\
7876- bne- 1b"
7877- : "=&r" (t), "+m" (v->counter)
7878- : "r" (&v->counter)
7879- : "cc", "xer");
7880+ return atomic_add_return_unchecked(1, v) == 0;
7881 }
7882
7883-static __inline__ int atomic_dec_return(atomic_t *v)
7884+/*
7885+ * atomic_dec - decrement atomic variable
7886+ * @v: pointer of type atomic_t
7887+ *
7888+ * Atomically decrements @v by 1
7889+ */
7890+#define atomic_dec(v) atomic_sub(1, (v))
7891+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7892+
7893+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7894 {
7895- int t;
7896-
7897- __asm__ __volatile__(
7898- PPC_ATOMIC_ENTRY_BARRIER
7899-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7900- addic %0,%0,-1\n"
7901- PPC405_ERR77(0,%1)
7902-" stwcx. %0,0,%1\n\
7903- bne- 1b"
7904- PPC_ATOMIC_EXIT_BARRIER
7905- : "=&r" (t)
7906- : "r" (&v->counter)
7907- : "cc", "xer", "memory");
7908-
7909- return t;
7910+ atomic_sub_unchecked(1, v);
7911 }
7912
7913 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7914 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7915
7916+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7917+{
7918+ return cmpxchg(&(v->counter), old, new);
7919+}
7920+
7921+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7922+{
7923+ return xchg(&(v->counter), new);
7924+}
7925+
7926 /**
7927 * __atomic_add_unless - add unless the number is a given value
7928 * @v: pointer of type atomic_t
7929@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7930 PPC_ATOMIC_ENTRY_BARRIER
7931 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7932 cmpw 0,%0,%3 \n\
7933- beq- 2f \n\
7934- add %0,%2,%0 \n"
7935+ beq- 2f \n"
7936+
7937+#ifdef CONFIG_PAX_REFCOUNT
7938+" mcrxr cr0\n"
7939+" addo. %0,%2,%0\n"
7940+" bf 4*cr0+so, 4f\n"
7941+"3:.long " "0x00c00b00""\n"
7942+"4:\n"
7943+#else
7944+ "add %0,%2,%0 \n"
7945+#endif
7946+
7947 PPC405_ERR77(0,%2)
7948 " stwcx. %0,0,%1 \n\
7949 bne- 1b \n"
7950+"5:"
7951+
7952+#ifdef CONFIG_PAX_REFCOUNT
7953+ _ASM_EXTABLE(3b, 5b)
7954+#endif
7955+
7956 PPC_ATOMIC_EXIT_BARRIER
7957 " subf %0,%2,%0 \n\
7958 2:"
7959@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
7960 }
7961 #define atomic_dec_if_positive atomic_dec_if_positive
7962
7963+#define smp_mb__before_atomic_dec() smp_mb()
7964+#define smp_mb__after_atomic_dec() smp_mb()
7965+#define smp_mb__before_atomic_inc() smp_mb()
7966+#define smp_mb__after_atomic_inc() smp_mb()
7967+
7968 #ifdef __powerpc64__
7969
7970 #define ATOMIC64_INIT(i) { (i) }
7971@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
7972 return t;
7973 }
7974
7975+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7976+{
7977+ long t;
7978+
7979+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7980+
7981+ return t;
7982+}
7983+
7984 static __inline__ void atomic64_set(atomic64_t *v, long i)
7985 {
7986 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7987 }
7988
7989-#define ATOMIC64_OP(op, asm_op) \
7990-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
7991+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7992+{
7993+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7994+}
7995+
7996+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7997+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
7998 { \
7999 long t; \
8000 \
8001 __asm__ __volatile__( \
8002 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8003+ pre_op \
8004 #asm_op " %0,%2,%0\n" \
8005+ post_op \
8006 " stdcx. %0,0,%3 \n" \
8007 " bne- 1b\n" \
8008+ extable \
8009 : "=&r" (t), "+m" (v->counter) \
8010 : "r" (a), "r" (&v->counter) \
8011 : "cc"); \
8012 }
8013
8014-#define ATOMIC64_OP_RETURN(op, asm_op) \
8015-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8016+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8017+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8018+
8019+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8020+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8021 { \
8022 long t; \
8023 \
8024 __asm__ __volatile__( \
8025 PPC_ATOMIC_ENTRY_BARRIER \
8026 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8027+ pre_op \
8028 #asm_op " %0,%1,%0\n" \
8029+ post_op \
8030 " stdcx. %0,0,%2 \n" \
8031 " bne- 1b\n" \
8032+ extable \
8033 PPC_ATOMIC_EXIT_BARRIER \
8034 : "=&r" (t) \
8035 : "r" (a), "r" (&v->counter) \
8036@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8037 return t; \
8038 }
8039
8040+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8041+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8042+
8043 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8044
8045 ATOMIC64_OPS(add, add)
8046@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8047
8048 #undef ATOMIC64_OPS
8049 #undef ATOMIC64_OP_RETURN
8050+#undef __ATOMIC64_OP_RETURN
8051 #undef ATOMIC64_OP
8052+#undef __ATOMIC64_OP
8053+#undef __OVERFLOW_EXTABLE
8054+#undef __OVERFLOW_POST
8055+#undef __OVERFLOW_PRE
8056+#undef __REFCOUNT_OP
8057
8058 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8059
8060-static __inline__ void atomic64_inc(atomic64_t *v)
8061-{
8062- long t;
8063+/*
8064+ * atomic64_inc - increment atomic variable
8065+ * @v: pointer of type atomic64_t
8066+ *
8067+ * Automatically increments @v by 1
8068+ */
8069+#define atomic64_inc(v) atomic64_add(1, (v))
8070+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8071
8072- __asm__ __volatile__(
8073-"1: ldarx %0,0,%2 # atomic64_inc\n\
8074- addic %0,%0,1\n\
8075- stdcx. %0,0,%2 \n\
8076- bne- 1b"
8077- : "=&r" (t), "+m" (v->counter)
8078- : "r" (&v->counter)
8079- : "cc", "xer");
8080+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8081+{
8082+ atomic64_add_unchecked(1, v);
8083 }
8084
8085-static __inline__ long atomic64_inc_return(atomic64_t *v)
8086+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8087 {
8088- long t;
8089-
8090- __asm__ __volatile__(
8091- PPC_ATOMIC_ENTRY_BARRIER
8092-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8093- addic %0,%0,1\n\
8094- stdcx. %0,0,%1 \n\
8095- bne- 1b"
8096- PPC_ATOMIC_EXIT_BARRIER
8097- : "=&r" (t)
8098- : "r" (&v->counter)
8099- : "cc", "xer", "memory");
8100-
8101- return t;
8102+ return atomic64_add_return_unchecked(1, v);
8103 }
8104
8105 /*
8106@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8107 */
8108 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8109
8110-static __inline__ void atomic64_dec(atomic64_t *v)
8111+/*
8112+ * atomic64_dec - decrement atomic variable
8113+ * @v: pointer of type atomic64_t
8114+ *
8115+ * Atomically decrements @v by 1
8116+ */
8117+#define atomic64_dec(v) atomic64_sub(1, (v))
8118+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8119+
8120+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8121 {
8122- long t;
8123-
8124- __asm__ __volatile__(
8125-"1: ldarx %0,0,%2 # atomic64_dec\n\
8126- addic %0,%0,-1\n\
8127- stdcx. %0,0,%2\n\
8128- bne- 1b"
8129- : "=&r" (t), "+m" (v->counter)
8130- : "r" (&v->counter)
8131- : "cc", "xer");
8132-}
8133-
8134-static __inline__ long atomic64_dec_return(atomic64_t *v)
8135-{
8136- long t;
8137-
8138- __asm__ __volatile__(
8139- PPC_ATOMIC_ENTRY_BARRIER
8140-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8141- addic %0,%0,-1\n\
8142- stdcx. %0,0,%1\n\
8143- bne- 1b"
8144- PPC_ATOMIC_EXIT_BARRIER
8145- : "=&r" (t)
8146- : "r" (&v->counter)
8147- : "cc", "xer", "memory");
8148-
8149- return t;
8150+ atomic64_sub_unchecked(1, v);
8151 }
8152
8153 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8154@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8155 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8156 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8157
8158+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8159+{
8160+ return cmpxchg(&(v->counter), old, new);
8161+}
8162+
8163+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8164+{
8165+ return xchg(&(v->counter), new);
8166+}
8167+
8168 /**
8169 * atomic64_add_unless - add unless the number is a given value
8170 * @v: pointer of type atomic64_t
8171@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8172
8173 __asm__ __volatile__ (
8174 PPC_ATOMIC_ENTRY_BARRIER
8175-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8176+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8177 cmpd 0,%0,%3 \n\
8178- beq- 2f \n\
8179- add %0,%2,%0 \n"
8180+ beq- 2f \n"
8181+
8182+#ifdef CONFIG_PAX_REFCOUNT
8183+" mcrxr cr0\n"
8184+" addo. %0,%2,%0\n"
8185+" bf 4*cr0+so, 4f\n"
8186+"3:.long " "0x00c00b00""\n"
8187+"4:\n"
8188+#else
8189+ "add %0,%2,%0 \n"
8190+#endif
8191+
8192 " stdcx. %0,0,%1 \n\
8193 bne- 1b \n"
8194 PPC_ATOMIC_EXIT_BARRIER
8195+"5:"
8196+
8197+#ifdef CONFIG_PAX_REFCOUNT
8198+ _ASM_EXTABLE(3b, 5b)
8199+#endif
8200+
8201 " subf %0,%2,%0 \n\
8202 2:"
8203 : "=&r" (t)
8204diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8205index a3bf5be..e03ba81 100644
8206--- a/arch/powerpc/include/asm/barrier.h
8207+++ b/arch/powerpc/include/asm/barrier.h
8208@@ -76,7 +76,7 @@
8209 do { \
8210 compiletime_assert_atomic_type(*p); \
8211 smp_lwsync(); \
8212- ACCESS_ONCE(*p) = (v); \
8213+ ACCESS_ONCE_RW(*p) = (v); \
8214 } while (0)
8215
8216 #define smp_load_acquire(p) \
8217diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8218index 34a05a1..a1f2c67 100644
8219--- a/arch/powerpc/include/asm/cache.h
8220+++ b/arch/powerpc/include/asm/cache.h
8221@@ -4,6 +4,7 @@
8222 #ifdef __KERNEL__
8223
8224 #include <asm/reg.h>
8225+#include <linux/const.h>
8226
8227 /* bytes per L1 cache line */
8228 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8229@@ -23,7 +24,7 @@
8230 #define L1_CACHE_SHIFT 7
8231 #endif
8232
8233-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8234+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8235
8236 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8237
8238diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8239index 57d289a..b36c98c 100644
8240--- a/arch/powerpc/include/asm/elf.h
8241+++ b/arch/powerpc/include/asm/elf.h
8242@@ -30,6 +30,18 @@
8243
8244 #define ELF_ET_DYN_BASE 0x20000000
8245
8246+#ifdef CONFIG_PAX_ASLR
8247+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8248+
8249+#ifdef __powerpc64__
8250+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8251+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8252+#else
8253+#define PAX_DELTA_MMAP_LEN 15
8254+#define PAX_DELTA_STACK_LEN 15
8255+#endif
8256+#endif
8257+
8258 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8259
8260 /*
8261@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8262 (0x7ff >> (PAGE_SHIFT - 12)) : \
8263 (0x3ffff >> (PAGE_SHIFT - 12)))
8264
8265-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8266-#define arch_randomize_brk arch_randomize_brk
8267-
8268-
8269 #ifdef CONFIG_SPU_BASE
8270 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8271 #define NT_SPU 1
8272diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8273index 8196e9c..d83a9f3 100644
8274--- a/arch/powerpc/include/asm/exec.h
8275+++ b/arch/powerpc/include/asm/exec.h
8276@@ -4,6 +4,6 @@
8277 #ifndef _ASM_POWERPC_EXEC_H
8278 #define _ASM_POWERPC_EXEC_H
8279
8280-extern unsigned long arch_align_stack(unsigned long sp);
8281+#define arch_align_stack(x) ((x) & ~0xfUL)
8282
8283 #endif /* _ASM_POWERPC_EXEC_H */
8284diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8285index 5acabbd..7ea14fa 100644
8286--- a/arch/powerpc/include/asm/kmap_types.h
8287+++ b/arch/powerpc/include/asm/kmap_types.h
8288@@ -10,7 +10,7 @@
8289 * 2 of the License, or (at your option) any later version.
8290 */
8291
8292-#define KM_TYPE_NR 16
8293+#define KM_TYPE_NR 17
8294
8295 #endif /* __KERNEL__ */
8296 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8297diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8298index b8da913..c02b593 100644
8299--- a/arch/powerpc/include/asm/local.h
8300+++ b/arch/powerpc/include/asm/local.h
8301@@ -9,21 +9,65 @@ typedef struct
8302 atomic_long_t a;
8303 } local_t;
8304
8305+typedef struct
8306+{
8307+ atomic_long_unchecked_t a;
8308+} local_unchecked_t;
8309+
8310 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8311
8312 #define local_read(l) atomic_long_read(&(l)->a)
8313+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8314 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8315+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8316
8317 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8318+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8319 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8320+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8321 #define local_inc(l) atomic_long_inc(&(l)->a)
8322+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8323 #define local_dec(l) atomic_long_dec(&(l)->a)
8324+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8325
8326 static __inline__ long local_add_return(long a, local_t *l)
8327 {
8328 long t;
8329
8330 __asm__ __volatile__(
8331+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8332+
8333+#ifdef CONFIG_PAX_REFCOUNT
8334+" mcrxr cr0\n"
8335+" addo. %0,%1,%0\n"
8336+" bf 4*cr0+so, 3f\n"
8337+"2:.long " "0x00c00b00""\n"
8338+#else
8339+" add %0,%1,%0\n"
8340+#endif
8341+
8342+"3:\n"
8343+ PPC405_ERR77(0,%2)
8344+ PPC_STLCX "%0,0,%2 \n\
8345+ bne- 1b"
8346+
8347+#ifdef CONFIG_PAX_REFCOUNT
8348+"\n4:\n"
8349+ _ASM_EXTABLE(2b, 4b)
8350+#endif
8351+
8352+ : "=&r" (t)
8353+ : "r" (a), "r" (&(l->a.counter))
8354+ : "cc", "memory");
8355+
8356+ return t;
8357+}
8358+
8359+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8360+{
8361+ long t;
8362+
8363+ __asm__ __volatile__(
8364 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8365 add %0,%1,%0\n"
8366 PPC405_ERR77(0,%2)
8367@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8368
8369 #define local_cmpxchg(l, o, n) \
8370 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8371+#define local_cmpxchg_unchecked(l, o, n) \
8372+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8373 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8374
8375 /**
8376diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8377index 8565c25..2865190 100644
8378--- a/arch/powerpc/include/asm/mman.h
8379+++ b/arch/powerpc/include/asm/mman.h
8380@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8381 }
8382 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8383
8384-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8385+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8386 {
8387 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8388 }
8389diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8390index 69c0598..2c56964 100644
8391--- a/arch/powerpc/include/asm/page.h
8392+++ b/arch/powerpc/include/asm/page.h
8393@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8394 * and needs to be executable. This means the whole heap ends
8395 * up being executable.
8396 */
8397-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8398- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8399+#define VM_DATA_DEFAULT_FLAGS32 \
8400+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8401+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8402
8403 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8404 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8405@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8406 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8407 #endif
8408
8409+#define ktla_ktva(addr) (addr)
8410+#define ktva_ktla(addr) (addr)
8411+
8412 #ifndef CONFIG_PPC_BOOK3S_64
8413 /*
8414 * Use the top bit of the higher-level page table entries to indicate whether
8415diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8416index d908a46..3753f71 100644
8417--- a/arch/powerpc/include/asm/page_64.h
8418+++ b/arch/powerpc/include/asm/page_64.h
8419@@ -172,15 +172,18 @@ do { \
8420 * stack by default, so in the absence of a PT_GNU_STACK program header
8421 * we turn execute permission off.
8422 */
8423-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8424- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8425+#define VM_STACK_DEFAULT_FLAGS32 \
8426+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8427+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8428
8429 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8430 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8431
8432+#ifndef CONFIG_PAX_PAGEEXEC
8433 #define VM_STACK_DEFAULT_FLAGS \
8434 (is_32bit_task() ? \
8435 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8436+#endif
8437
8438 #include <asm-generic/getorder.h>
8439
8440diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8441index 4b0be20..c15a27d 100644
8442--- a/arch/powerpc/include/asm/pgalloc-64.h
8443+++ b/arch/powerpc/include/asm/pgalloc-64.h
8444@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8445 #ifndef CONFIG_PPC_64K_PAGES
8446
8447 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8448+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8449
8450 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8451 {
8452@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8453 pud_set(pud, (unsigned long)pmd);
8454 }
8455
8456+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8457+{
8458+ pud_populate(mm, pud, pmd);
8459+}
8460+
8461 #define pmd_populate(mm, pmd, pte_page) \
8462 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8463 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8464@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8465 #endif
8466
8467 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8468+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8469
8470 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8471 pte_t *pte)
8472diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8473index 9835ac4..900430f 100644
8474--- a/arch/powerpc/include/asm/pgtable.h
8475+++ b/arch/powerpc/include/asm/pgtable.h
8476@@ -2,6 +2,7 @@
8477 #define _ASM_POWERPC_PGTABLE_H
8478 #ifdef __KERNEL__
8479
8480+#include <linux/const.h>
8481 #ifndef __ASSEMBLY__
8482 #include <linux/mmdebug.h>
8483 #include <linux/mmzone.h>
8484diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8485index 62cfb0c..50c6402 100644
8486--- a/arch/powerpc/include/asm/pte-hash32.h
8487+++ b/arch/powerpc/include/asm/pte-hash32.h
8488@@ -20,6 +20,7 @@
8489 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
8490 #define _PAGE_USER 0x004 /* usermode access allowed */
8491 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8492+#define _PAGE_EXEC _PAGE_GUARDED
8493 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8494 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8495 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8496diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8497index af56b5c..f86f3f6 100644
8498--- a/arch/powerpc/include/asm/reg.h
8499+++ b/arch/powerpc/include/asm/reg.h
8500@@ -253,6 +253,7 @@
8501 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8502 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8503 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8504+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8505 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8506 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8507 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8508diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8509index d607df5..08dc9ae 100644
8510--- a/arch/powerpc/include/asm/smp.h
8511+++ b/arch/powerpc/include/asm/smp.h
8512@@ -51,7 +51,7 @@ struct smp_ops_t {
8513 int (*cpu_disable)(void);
8514 void (*cpu_die)(unsigned int nr);
8515 int (*cpu_bootable)(unsigned int nr);
8516-};
8517+} __no_const;
8518
8519 extern void smp_send_debugger_break(void);
8520 extern void start_secondary_resume(void);
8521diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8522index 4dbe072..b803275 100644
8523--- a/arch/powerpc/include/asm/spinlock.h
8524+++ b/arch/powerpc/include/asm/spinlock.h
8525@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8526 __asm__ __volatile__(
8527 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8528 __DO_SIGN_EXTEND
8529-" addic. %0,%0,1\n\
8530- ble- 2f\n"
8531+
8532+#ifdef CONFIG_PAX_REFCOUNT
8533+" mcrxr cr0\n"
8534+" addico. %0,%0,1\n"
8535+" bf 4*cr0+so, 3f\n"
8536+"2:.long " "0x00c00b00""\n"
8537+#else
8538+" addic. %0,%0,1\n"
8539+#endif
8540+
8541+"3:\n"
8542+ "ble- 4f\n"
8543 PPC405_ERR77(0,%1)
8544 " stwcx. %0,0,%1\n\
8545 bne- 1b\n"
8546 PPC_ACQUIRE_BARRIER
8547-"2:" : "=&r" (tmp)
8548+"4:"
8549+
8550+#ifdef CONFIG_PAX_REFCOUNT
8551+ _ASM_EXTABLE(2b,4b)
8552+#endif
8553+
8554+ : "=&r" (tmp)
8555 : "r" (&rw->lock)
8556 : "cr0", "xer", "memory");
8557
8558@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8559 __asm__ __volatile__(
8560 "# read_unlock\n\t"
8561 PPC_RELEASE_BARRIER
8562-"1: lwarx %0,0,%1\n\
8563- addic %0,%0,-1\n"
8564+"1: lwarx %0,0,%1\n"
8565+
8566+#ifdef CONFIG_PAX_REFCOUNT
8567+" mcrxr cr0\n"
8568+" addico. %0,%0,-1\n"
8569+" bf 4*cr0+so, 3f\n"
8570+"2:.long " "0x00c00b00""\n"
8571+#else
8572+" addic. %0,%0,-1\n"
8573+#endif
8574+
8575+"3:\n"
8576 PPC405_ERR77(0,%1)
8577 " stwcx. %0,0,%1\n\
8578 bne- 1b"
8579+
8580+#ifdef CONFIG_PAX_REFCOUNT
8581+"\n4:\n"
8582+ _ASM_EXTABLE(2b, 4b)
8583+#endif
8584+
8585 : "=&r"(tmp)
8586 : "r"(&rw->lock)
8587 : "cr0", "xer", "memory");
8588diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8589index 7248979..80b75de 100644
8590--- a/arch/powerpc/include/asm/thread_info.h
8591+++ b/arch/powerpc/include/asm/thread_info.h
8592@@ -103,6 +103,8 @@ static inline struct thread_info *current_thread_info(void)
8593 #if defined(CONFIG_PPC64)
8594 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8595 #endif
8596+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8597+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8598
8599 /* as above, but as bit values */
8600 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8601@@ -121,9 +123,10 @@ static inline struct thread_info *current_thread_info(void)
8602 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8603 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8604 #define _TIF_NOHZ (1<<TIF_NOHZ)
8605+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8606 #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8607 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8608- _TIF_NOHZ)
8609+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8610
8611 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8612 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8613diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8614index a0c071d..49cdc7f 100644
8615--- a/arch/powerpc/include/asm/uaccess.h
8616+++ b/arch/powerpc/include/asm/uaccess.h
8617@@ -58,6 +58,7 @@
8618
8619 #endif
8620
8621+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8622 #define access_ok(type, addr, size) \
8623 (__chk_user_ptr(addr), \
8624 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8625@@ -318,52 +319,6 @@ do { \
8626 extern unsigned long __copy_tofrom_user(void __user *to,
8627 const void __user *from, unsigned long size);
8628
8629-#ifndef __powerpc64__
8630-
8631-static inline unsigned long copy_from_user(void *to,
8632- const void __user *from, unsigned long n)
8633-{
8634- unsigned long over;
8635-
8636- if (access_ok(VERIFY_READ, from, n))
8637- return __copy_tofrom_user((__force void __user *)to, from, n);
8638- if ((unsigned long)from < TASK_SIZE) {
8639- over = (unsigned long)from + n - TASK_SIZE;
8640- return __copy_tofrom_user((__force void __user *)to, from,
8641- n - over) + over;
8642- }
8643- return n;
8644-}
8645-
8646-static inline unsigned long copy_to_user(void __user *to,
8647- const void *from, unsigned long n)
8648-{
8649- unsigned long over;
8650-
8651- if (access_ok(VERIFY_WRITE, to, n))
8652- return __copy_tofrom_user(to, (__force void __user *)from, n);
8653- if ((unsigned long)to < TASK_SIZE) {
8654- over = (unsigned long)to + n - TASK_SIZE;
8655- return __copy_tofrom_user(to, (__force void __user *)from,
8656- n - over) + over;
8657- }
8658- return n;
8659-}
8660-
8661-#else /* __powerpc64__ */
8662-
8663-#define __copy_in_user(to, from, size) \
8664- __copy_tofrom_user((to), (from), (size))
8665-
8666-extern unsigned long copy_from_user(void *to, const void __user *from,
8667- unsigned long n);
8668-extern unsigned long copy_to_user(void __user *to, const void *from,
8669- unsigned long n);
8670-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8671- unsigned long n);
8672-
8673-#endif /* __powerpc64__ */
8674-
8675 static inline unsigned long __copy_from_user_inatomic(void *to,
8676 const void __user *from, unsigned long n)
8677 {
8678@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8679 if (ret == 0)
8680 return 0;
8681 }
8682+
8683+ if (!__builtin_constant_p(n))
8684+ check_object_size(to, n, false);
8685+
8686 return __copy_tofrom_user((__force void __user *)to, from, n);
8687 }
8688
8689@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8690 if (ret == 0)
8691 return 0;
8692 }
8693+
8694+ if (!__builtin_constant_p(n))
8695+ check_object_size(from, n, true);
8696+
8697 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8698 }
8699
8700@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8701 return __copy_to_user_inatomic(to, from, size);
8702 }
8703
8704+#ifndef __powerpc64__
8705+
8706+static inline unsigned long __must_check copy_from_user(void *to,
8707+ const void __user *from, unsigned long n)
8708+{
8709+ unsigned long over;
8710+
8711+ if ((long)n < 0)
8712+ return n;
8713+
8714+ if (access_ok(VERIFY_READ, from, n)) {
8715+ if (!__builtin_constant_p(n))
8716+ check_object_size(to, n, false);
8717+ return __copy_tofrom_user((__force void __user *)to, from, n);
8718+ }
8719+ if ((unsigned long)from < TASK_SIZE) {
8720+ over = (unsigned long)from + n - TASK_SIZE;
8721+ if (!__builtin_constant_p(n - over))
8722+ check_object_size(to, n - over, false);
8723+ return __copy_tofrom_user((__force void __user *)to, from,
8724+ n - over) + over;
8725+ }
8726+ return n;
8727+}
8728+
8729+static inline unsigned long __must_check copy_to_user(void __user *to,
8730+ const void *from, unsigned long n)
8731+{
8732+ unsigned long over;
8733+
8734+ if ((long)n < 0)
8735+ return n;
8736+
8737+ if (access_ok(VERIFY_WRITE, to, n)) {
8738+ if (!__builtin_constant_p(n))
8739+ check_object_size(from, n, true);
8740+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8741+ }
8742+ if ((unsigned long)to < TASK_SIZE) {
8743+ over = (unsigned long)to + n - TASK_SIZE;
8744+ if (!__builtin_constant_p(n))
8745+ check_object_size(from, n - over, true);
8746+ return __copy_tofrom_user(to, (__force void __user *)from,
8747+ n - over) + over;
8748+ }
8749+ return n;
8750+}
8751+
8752+#else /* __powerpc64__ */
8753+
8754+#define __copy_in_user(to, from, size) \
8755+ __copy_tofrom_user((to), (from), (size))
8756+
8757+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8758+{
8759+ if ((long)n < 0 || n > INT_MAX)
8760+ return n;
8761+
8762+ if (!__builtin_constant_p(n))
8763+ check_object_size(to, n, false);
8764+
8765+ if (likely(access_ok(VERIFY_READ, from, n)))
8766+ n = __copy_from_user(to, from, n);
8767+ else
8768+ memset(to, 0, n);
8769+ return n;
8770+}
8771+
8772+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8773+{
8774+ if ((long)n < 0 || n > INT_MAX)
8775+ return n;
8776+
8777+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8778+ if (!__builtin_constant_p(n))
8779+ check_object_size(from, n, true);
8780+ n = __copy_to_user(to, from, n);
8781+ }
8782+ return n;
8783+}
8784+
8785+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8786+ unsigned long n);
8787+
8788+#endif /* __powerpc64__ */
8789+
8790 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8791
8792 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8793diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8794index 502cf69..53936a1 100644
8795--- a/arch/powerpc/kernel/Makefile
8796+++ b/arch/powerpc/kernel/Makefile
8797@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8798 CFLAGS_btext.o += -fPIC
8799 endif
8800
8801+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8802+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8803+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8804+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8805+
8806 ifdef CONFIG_FUNCTION_TRACER
8807 # Do not trace early boot code
8808 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8809@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8810 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8811 endif
8812
8813+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8814+
8815 obj-y := cputable.o ptrace.o syscalls.o \
8816 irq.o align.o signal_32.o pmc.o vdso.o \
8817 process.o systbl.o idle.o \
8818diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8819index 3e68d1c..72a5ee6 100644
8820--- a/arch/powerpc/kernel/exceptions-64e.S
8821+++ b/arch/powerpc/kernel/exceptions-64e.S
8822@@ -1010,6 +1010,7 @@ storage_fault_common:
8823 std r14,_DAR(r1)
8824 std r15,_DSISR(r1)
8825 addi r3,r1,STACK_FRAME_OVERHEAD
8826+ bl save_nvgprs
8827 mr r4,r14
8828 mr r5,r15
8829 ld r14,PACA_EXGEN+EX_R14(r13)
8830@@ -1018,8 +1019,7 @@ storage_fault_common:
8831 cmpdi r3,0
8832 bne- 1f
8833 b ret_from_except_lite
8834-1: bl save_nvgprs
8835- mr r5,r3
8836+1: mr r5,r3
8837 addi r3,r1,STACK_FRAME_OVERHEAD
8838 ld r4,_DAR(r1)
8839 bl bad_page_fault
8840diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8841index 9519e6b..13f6c38 100644
8842--- a/arch/powerpc/kernel/exceptions-64s.S
8843+++ b/arch/powerpc/kernel/exceptions-64s.S
8844@@ -1599,10 +1599,10 @@ handle_page_fault:
8845 11: ld r4,_DAR(r1)
8846 ld r5,_DSISR(r1)
8847 addi r3,r1,STACK_FRAME_OVERHEAD
8848+ bl save_nvgprs
8849 bl do_page_fault
8850 cmpdi r3,0
8851 beq+ 12f
8852- bl save_nvgprs
8853 mr r5,r3
8854 addi r3,r1,STACK_FRAME_OVERHEAD
8855 lwz r4,_DAR(r1)
8856diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8857index 4509603..cdb491f 100644
8858--- a/arch/powerpc/kernel/irq.c
8859+++ b/arch/powerpc/kernel/irq.c
8860@@ -460,6 +460,8 @@ void migrate_irqs(void)
8861 }
8862 #endif
8863
8864+extern void gr_handle_kernel_exploit(void);
8865+
8866 static inline void check_stack_overflow(void)
8867 {
8868 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8869@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8870 pr_err("do_IRQ: stack overflow: %ld\n",
8871 sp - sizeof(struct thread_info));
8872 dump_stack();
8873+ gr_handle_kernel_exploit();
8874 }
8875 #endif
8876 }
8877diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8878index c94d2e0..992a9ce 100644
8879--- a/arch/powerpc/kernel/module_32.c
8880+++ b/arch/powerpc/kernel/module_32.c
8881@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8882 me->arch.core_plt_section = i;
8883 }
8884 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8885- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8886+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8887 return -ENOEXEC;
8888 }
8889
8890@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8891
8892 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8893 /* Init, or core PLT? */
8894- if (location >= mod->module_core
8895- && location < mod->module_core + mod->core_size)
8896+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8897+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8898 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8899- else
8900+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8901+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8902 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8903+ else {
8904+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8905+ return ~0UL;
8906+ }
8907
8908 /* Find this entry, or if that fails, the next avail. entry */
8909 while (entry->jump[0]) {
8910@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8911 }
8912 #ifdef CONFIG_DYNAMIC_FTRACE
8913 module->arch.tramp =
8914- do_plt_call(module->module_core,
8915+ do_plt_call(module->module_core_rx,
8916 (unsigned long)ftrace_caller,
8917 sechdrs, module);
8918 #endif
8919diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8920index b4cc7be..1fe8bb3 100644
8921--- a/arch/powerpc/kernel/process.c
8922+++ b/arch/powerpc/kernel/process.c
8923@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8924 * Lookup NIP late so we have the best change of getting the
8925 * above info out without failing
8926 */
8927- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8928- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8929+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8930+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8931 #endif
8932 show_stack(current, (unsigned long *) regs->gpr[1]);
8933 if (!user_mode(regs))
8934@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8935 newsp = stack[0];
8936 ip = stack[STACK_FRAME_LR_SAVE];
8937 if (!firstframe || ip != lr) {
8938- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8939+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8940 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8941 if ((ip == rth) && curr_frame >= 0) {
8942- printk(" (%pS)",
8943+ printk(" (%pA)",
8944 (void *)current->ret_stack[curr_frame].ret);
8945 curr_frame--;
8946 }
8947@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8948 struct pt_regs *regs = (struct pt_regs *)
8949 (sp + STACK_FRAME_OVERHEAD);
8950 lr = regs->link;
8951- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
8952+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
8953 regs->trap, (void *)regs->nip, (void *)lr);
8954 firstframe = 1;
8955 }
8956@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
8957 mtspr(SPRN_CTRLT, ctrl);
8958 }
8959 #endif /* CONFIG_PPC64 */
8960-
8961-unsigned long arch_align_stack(unsigned long sp)
8962-{
8963- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8964- sp -= get_random_int() & ~PAGE_MASK;
8965- return sp & ~0xf;
8966-}
8967-
8968-static inline unsigned long brk_rnd(void)
8969-{
8970- unsigned long rnd = 0;
8971-
8972- /* 8MB for 32bit, 1GB for 64bit */
8973- if (is_32bit_task())
8974- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8975- else
8976- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8977-
8978- return rnd << PAGE_SHIFT;
8979-}
8980-
8981-unsigned long arch_randomize_brk(struct mm_struct *mm)
8982-{
8983- unsigned long base = mm->brk;
8984- unsigned long ret;
8985-
8986-#ifdef CONFIG_PPC_STD_MMU_64
8987- /*
8988- * If we are using 1TB segments and we are allowed to randomise
8989- * the heap, we can put it above 1TB so it is backed by a 1TB
8990- * segment. Otherwise the heap will be in the bottom 1TB
8991- * which always uses 256MB segments and this may result in a
8992- * performance penalty.
8993- */
8994- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8995- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8996-#endif
8997-
8998- ret = PAGE_ALIGN(base + brk_rnd());
8999-
9000- if (ret < mm->brk)
9001- return mm->brk;
9002-
9003- return ret;
9004-}
9005-
9006diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9007index f21897b..28c0428 100644
9008--- a/arch/powerpc/kernel/ptrace.c
9009+++ b/arch/powerpc/kernel/ptrace.c
9010@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9011 return ret;
9012 }
9013
9014+#ifdef CONFIG_GRKERNSEC_SETXID
9015+extern void gr_delayed_cred_worker(void);
9016+#endif
9017+
9018 /*
9019 * We must return the syscall number to actually look up in the table.
9020 * This can be -1L to skip running any syscall at all.
9021@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9022
9023 secure_computing_strict(regs->gpr[0]);
9024
9025+#ifdef CONFIG_GRKERNSEC_SETXID
9026+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9027+ gr_delayed_cred_worker();
9028+#endif
9029+
9030 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9031 tracehook_report_syscall_entry(regs))
9032 /*
9033@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9034 {
9035 int step;
9036
9037+#ifdef CONFIG_GRKERNSEC_SETXID
9038+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9039+ gr_delayed_cred_worker();
9040+#endif
9041+
9042 audit_syscall_exit(regs);
9043
9044 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9045diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9046index d3a831a..3a33123 100644
9047--- a/arch/powerpc/kernel/signal_32.c
9048+++ b/arch/powerpc/kernel/signal_32.c
9049@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9050 /* Save user registers on the stack */
9051 frame = &rt_sf->uc.uc_mcontext;
9052 addr = frame;
9053- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9054+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9055 sigret = 0;
9056 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9057 } else {
9058diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9059index c7c24d2..1bf7039 100644
9060--- a/arch/powerpc/kernel/signal_64.c
9061+++ b/arch/powerpc/kernel/signal_64.c
9062@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9063 current->thread.fp_state.fpscr = 0;
9064
9065 /* Set up to return from userspace. */
9066- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9067+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9068 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9069 } else {
9070 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9071diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9072index 19e4744..28a8d7b 100644
9073--- a/arch/powerpc/kernel/traps.c
9074+++ b/arch/powerpc/kernel/traps.c
9075@@ -36,6 +36,7 @@
9076 #include <linux/debugfs.h>
9077 #include <linux/ratelimit.h>
9078 #include <linux/context_tracking.h>
9079+#include <linux/uaccess.h>
9080
9081 #include <asm/emulated_ops.h>
9082 #include <asm/pgtable.h>
9083@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9084 return flags;
9085 }
9086
9087+extern void gr_handle_kernel_exploit(void);
9088+
9089 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9090 int signr)
9091 {
9092@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9093 panic("Fatal exception in interrupt");
9094 if (panic_on_oops)
9095 panic("Fatal exception");
9096+
9097+ gr_handle_kernel_exploit();
9098+
9099 do_exit(signr);
9100 }
9101
9102@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9103 enum ctx_state prev_state = exception_enter();
9104 unsigned int reason = get_reason(regs);
9105
9106+#ifdef CONFIG_PAX_REFCOUNT
9107+ unsigned int bkpt;
9108+ const struct exception_table_entry *entry;
9109+
9110+ if (reason & REASON_ILLEGAL) {
9111+ /* Check if PaX bad instruction */
9112+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9113+ current->thread.trap_nr = 0;
9114+ pax_report_refcount_overflow(regs);
9115+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9116+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9117+ regs->nip = entry->fixup;
9118+ return;
9119+ }
9120+ /* fixup_exception() could not handle */
9121+ goto bail;
9122+ }
9123+ }
9124+#endif
9125+
9126 /* We can now get here via a FP Unavailable exception if the core
9127 * has no FPU, in that case the reason flags will be 0 */
9128
9129diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9130index 305eb0d..accc5b40 100644
9131--- a/arch/powerpc/kernel/vdso.c
9132+++ b/arch/powerpc/kernel/vdso.c
9133@@ -34,6 +34,7 @@
9134 #include <asm/vdso.h>
9135 #include <asm/vdso_datapage.h>
9136 #include <asm/setup.h>
9137+#include <asm/mman.h>
9138
9139 #undef DEBUG
9140
9141@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9142 vdso_base = VDSO32_MBASE;
9143 #endif
9144
9145- current->mm->context.vdso_base = 0;
9146+ current->mm->context.vdso_base = ~0UL;
9147
9148 /* vDSO has a problem and was disabled, just don't "enable" it for the
9149 * process
9150@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9151 vdso_base = get_unmapped_area(NULL, vdso_base,
9152 (vdso_pages << PAGE_SHIFT) +
9153 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9154- 0, 0);
9155+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9156 if (IS_ERR_VALUE(vdso_base)) {
9157 rc = vdso_base;
9158 goto fail_mmapsem;
9159diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9160index 27c0fac..6ec4a32 100644
9161--- a/arch/powerpc/kvm/powerpc.c
9162+++ b/arch/powerpc/kvm/powerpc.c
9163@@ -1402,7 +1402,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9164 }
9165 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9166
9167-int kvm_arch_init(void *opaque)
9168+int kvm_arch_init(const void *opaque)
9169 {
9170 return 0;
9171 }
9172diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9173index 5eea6f3..5d10396 100644
9174--- a/arch/powerpc/lib/usercopy_64.c
9175+++ b/arch/powerpc/lib/usercopy_64.c
9176@@ -9,22 +9,6 @@
9177 #include <linux/module.h>
9178 #include <asm/uaccess.h>
9179
9180-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9181-{
9182- if (likely(access_ok(VERIFY_READ, from, n)))
9183- n = __copy_from_user(to, from, n);
9184- else
9185- memset(to, 0, n);
9186- return n;
9187-}
9188-
9189-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9190-{
9191- if (likely(access_ok(VERIFY_WRITE, to, n)))
9192- n = __copy_to_user(to, from, n);
9193- return n;
9194-}
9195-
9196 unsigned long copy_in_user(void __user *to, const void __user *from,
9197 unsigned long n)
9198 {
9199@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9200 return n;
9201 }
9202
9203-EXPORT_SYMBOL(copy_from_user);
9204-EXPORT_SYMBOL(copy_to_user);
9205 EXPORT_SYMBOL(copy_in_user);
9206
9207diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9208index b396868..3eb6b9f 100644
9209--- a/arch/powerpc/mm/fault.c
9210+++ b/arch/powerpc/mm/fault.c
9211@@ -33,6 +33,10 @@
9212 #include <linux/ratelimit.h>
9213 #include <linux/context_tracking.h>
9214 #include <linux/hugetlb.h>
9215+#include <linux/slab.h>
9216+#include <linux/pagemap.h>
9217+#include <linux/compiler.h>
9218+#include <linux/unistd.h>
9219
9220 #include <asm/firmware.h>
9221 #include <asm/page.h>
9222@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9223 }
9224 #endif
9225
9226+#ifdef CONFIG_PAX_PAGEEXEC
9227+/*
9228+ * PaX: decide what to do with offenders (regs->nip = fault address)
9229+ *
9230+ * returns 1 when task should be killed
9231+ */
9232+static int pax_handle_fetch_fault(struct pt_regs *regs)
9233+{
9234+ return 1;
9235+}
9236+
9237+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9238+{
9239+ unsigned long i;
9240+
9241+ printk(KERN_ERR "PAX: bytes at PC: ");
9242+ for (i = 0; i < 5; i++) {
9243+ unsigned int c;
9244+ if (get_user(c, (unsigned int __user *)pc+i))
9245+ printk(KERN_CONT "???????? ");
9246+ else
9247+ printk(KERN_CONT "%08x ", c);
9248+ }
9249+ printk("\n");
9250+}
9251+#endif
9252+
9253 /*
9254 * Check whether the instruction at regs->nip is a store using
9255 * an update addressing form which will update r1.
9256@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9257 * indicate errors in DSISR but can validly be set in SRR1.
9258 */
9259 if (trap == 0x400)
9260- error_code &= 0x48200000;
9261+ error_code &= 0x58200000;
9262 else
9263 is_write = error_code & DSISR_ISSTORE;
9264 #else
9265@@ -383,12 +414,16 @@ good_area:
9266 * "undefined". Of those that can be set, this is the only
9267 * one which seems bad.
9268 */
9269- if (error_code & 0x10000000)
9270+ if (error_code & DSISR_GUARDED)
9271 /* Guarded storage error. */
9272 goto bad_area;
9273 #endif /* CONFIG_8xx */
9274
9275 if (is_exec) {
9276+#ifdef CONFIG_PPC_STD_MMU
9277+ if (error_code & DSISR_GUARDED)
9278+ goto bad_area;
9279+#endif
9280 /*
9281 * Allow execution from readable areas if the MMU does not
9282 * provide separate controls over reading and executing.
9283@@ -483,6 +518,23 @@ bad_area:
9284 bad_area_nosemaphore:
9285 /* User mode accesses cause a SIGSEGV */
9286 if (user_mode(regs)) {
9287+
9288+#ifdef CONFIG_PAX_PAGEEXEC
9289+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9290+#ifdef CONFIG_PPC_STD_MMU
9291+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9292+#else
9293+ if (is_exec && regs->nip == address) {
9294+#endif
9295+ switch (pax_handle_fetch_fault(regs)) {
9296+ }
9297+
9298+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9299+ do_group_exit(SIGKILL);
9300+ }
9301+ }
9302+#endif
9303+
9304 _exception(SIGSEGV, regs, code, address);
9305 goto bail;
9306 }
9307diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9308index cb8bdbe..cde4bc7 100644
9309--- a/arch/powerpc/mm/mmap.c
9310+++ b/arch/powerpc/mm/mmap.c
9311@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9312 return sysctl_legacy_va_layout;
9313 }
9314
9315-static unsigned long mmap_rnd(void)
9316+static unsigned long mmap_rnd(struct mm_struct *mm)
9317 {
9318 unsigned long rnd = 0;
9319
9320+#ifdef CONFIG_PAX_RANDMMAP
9321+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9322+#endif
9323+
9324 if (current->flags & PF_RANDOMIZE) {
9325 /* 8MB for 32bit, 1GB for 64bit */
9326 if (is_32bit_task())
9327@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9328 return rnd << PAGE_SHIFT;
9329 }
9330
9331-static inline unsigned long mmap_base(void)
9332+static inline unsigned long mmap_base(struct mm_struct *mm)
9333 {
9334 unsigned long gap = rlimit(RLIMIT_STACK);
9335
9336@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9337 else if (gap > MAX_GAP)
9338 gap = MAX_GAP;
9339
9340- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9341+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9342 }
9343
9344 /*
9345@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9346 */
9347 if (mmap_is_legacy()) {
9348 mm->mmap_base = TASK_UNMAPPED_BASE;
9349+
9350+#ifdef CONFIG_PAX_RANDMMAP
9351+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9352+ mm->mmap_base += mm->delta_mmap;
9353+#endif
9354+
9355 mm->get_unmapped_area = arch_get_unmapped_area;
9356 } else {
9357- mm->mmap_base = mmap_base();
9358+ mm->mmap_base = mmap_base(mm);
9359+
9360+#ifdef CONFIG_PAX_RANDMMAP
9361+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9362+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9363+#endif
9364+
9365 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9366 }
9367 }
9368diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9369index 0f432a7..abfe841 100644
9370--- a/arch/powerpc/mm/slice.c
9371+++ b/arch/powerpc/mm/slice.c
9372@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9373 if ((mm->task_size - len) < addr)
9374 return 0;
9375 vma = find_vma(mm, addr);
9376- return (!vma || (addr + len) <= vma->vm_start);
9377+ return check_heap_stack_gap(vma, addr, len, 0);
9378 }
9379
9380 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9381@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9382 info.align_offset = 0;
9383
9384 addr = TASK_UNMAPPED_BASE;
9385+
9386+#ifdef CONFIG_PAX_RANDMMAP
9387+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9388+ addr += mm->delta_mmap;
9389+#endif
9390+
9391 while (addr < TASK_SIZE) {
9392 info.low_limit = addr;
9393 if (!slice_scan_available(addr, available, 1, &addr))
9394@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9395 if (fixed && addr > (mm->task_size - len))
9396 return -ENOMEM;
9397
9398+#ifdef CONFIG_PAX_RANDMMAP
9399+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9400+ addr = 0;
9401+#endif
9402+
9403 /* If hint, make sure it matches our alignment restrictions */
9404 if (!fixed && addr) {
9405 addr = _ALIGN_UP(addr, 1ul << pshift);
9406diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9407index d966bbe..372124a 100644
9408--- a/arch/powerpc/platforms/cell/spufs/file.c
9409+++ b/arch/powerpc/platforms/cell/spufs/file.c
9410@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9411 return VM_FAULT_NOPAGE;
9412 }
9413
9414-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9415+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9416 unsigned long address,
9417- void *buf, int len, int write)
9418+ void *buf, size_t len, int write)
9419 {
9420 struct spu_context *ctx = vma->vm_file->private_data;
9421 unsigned long offset = address - vma->vm_start;
9422diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9423index fa934fe..c296056 100644
9424--- a/arch/s390/include/asm/atomic.h
9425+++ b/arch/s390/include/asm/atomic.h
9426@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9427 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9428 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9429
9430+#define atomic64_read_unchecked(v) atomic64_read(v)
9431+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9432+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9433+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9434+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9435+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9436+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9437+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9438+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9439+
9440 #endif /* __ARCH_S390_ATOMIC__ */
9441diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9442index 8d72471..5322500 100644
9443--- a/arch/s390/include/asm/barrier.h
9444+++ b/arch/s390/include/asm/barrier.h
9445@@ -42,7 +42,7 @@
9446 do { \
9447 compiletime_assert_atomic_type(*p); \
9448 barrier(); \
9449- ACCESS_ONCE(*p) = (v); \
9450+ ACCESS_ONCE_RW(*p) = (v); \
9451 } while (0)
9452
9453 #define smp_load_acquire(p) \
9454diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9455index 4d7ccac..d03d0ad 100644
9456--- a/arch/s390/include/asm/cache.h
9457+++ b/arch/s390/include/asm/cache.h
9458@@ -9,8 +9,10 @@
9459 #ifndef __ARCH_S390_CACHE_H
9460 #define __ARCH_S390_CACHE_H
9461
9462-#define L1_CACHE_BYTES 256
9463+#include <linux/const.h>
9464+
9465 #define L1_CACHE_SHIFT 8
9466+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9467 #define NET_SKB_PAD 32
9468
9469 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9470diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9471index c9c875d..b4b0e4c 100644
9472--- a/arch/s390/include/asm/elf.h
9473+++ b/arch/s390/include/asm/elf.h
9474@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9475 the loader. We need to make sure that it is out of the way of the program
9476 that it will "exec", and that there is sufficient room for the brk. */
9477
9478-extern unsigned long randomize_et_dyn(void);
9479-#define ELF_ET_DYN_BASE randomize_et_dyn()
9480+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9481+
9482+#ifdef CONFIG_PAX_ASLR
9483+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9484+
9485+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9486+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9487+#endif
9488
9489 /* This yields a mask that user programs can use to figure out what
9490 instruction set this CPU supports. */
9491@@ -225,9 +231,6 @@ struct linux_binprm;
9492 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9493 int arch_setup_additional_pages(struct linux_binprm *, int);
9494
9495-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9496-#define arch_randomize_brk arch_randomize_brk
9497-
9498 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9499
9500 #endif
9501diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9502index c4a93d6..4d2a9b4 100644
9503--- a/arch/s390/include/asm/exec.h
9504+++ b/arch/s390/include/asm/exec.h
9505@@ -7,6 +7,6 @@
9506 #ifndef __ASM_EXEC_H
9507 #define __ASM_EXEC_H
9508
9509-extern unsigned long arch_align_stack(unsigned long sp);
9510+#define arch_align_stack(x) ((x) & ~0xfUL)
9511
9512 #endif /* __ASM_EXEC_H */
9513diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9514index cd4c68e..6764641 100644
9515--- a/arch/s390/include/asm/uaccess.h
9516+++ b/arch/s390/include/asm/uaccess.h
9517@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9518 __range_ok((unsigned long)(addr), (size)); \
9519 })
9520
9521+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9522 #define access_ok(type, addr, size) __access_ok(addr, size)
9523
9524 /*
9525@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9526 copy_to_user(void __user *to, const void *from, unsigned long n)
9527 {
9528 might_fault();
9529+
9530+ if ((long)n < 0)
9531+ return n;
9532+
9533 return __copy_to_user(to, from, n);
9534 }
9535
9536@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9537 static inline unsigned long __must_check
9538 copy_from_user(void *to, const void __user *from, unsigned long n)
9539 {
9540- unsigned int sz = __compiletime_object_size(to);
9541+ size_t sz = __compiletime_object_size(to);
9542
9543 might_fault();
9544- if (unlikely(sz != -1 && sz < n)) {
9545+
9546+ if ((long)n < 0)
9547+ return n;
9548+
9549+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9550 copy_from_user_overflow();
9551 return n;
9552 }
9553diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9554index 2ca9586..55682a9 100644
9555--- a/arch/s390/kernel/module.c
9556+++ b/arch/s390/kernel/module.c
9557@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9558
9559 /* Increase core size by size of got & plt and set start
9560 offsets for got and plt. */
9561- me->core_size = ALIGN(me->core_size, 4);
9562- me->arch.got_offset = me->core_size;
9563- me->core_size += me->arch.got_size;
9564- me->arch.plt_offset = me->core_size;
9565- me->core_size += me->arch.plt_size;
9566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9567+ me->arch.got_offset = me->core_size_rw;
9568+ me->core_size_rw += me->arch.got_size;
9569+ me->arch.plt_offset = me->core_size_rx;
9570+ me->core_size_rx += me->arch.plt_size;
9571 return 0;
9572 }
9573
9574@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9575 if (info->got_initialized == 0) {
9576 Elf_Addr *gotent;
9577
9578- gotent = me->module_core + me->arch.got_offset +
9579+ gotent = me->module_core_rw + me->arch.got_offset +
9580 info->got_offset;
9581 *gotent = val;
9582 info->got_initialized = 1;
9583@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9584 rc = apply_rela_bits(loc, val, 0, 64, 0);
9585 else if (r_type == R_390_GOTENT ||
9586 r_type == R_390_GOTPLTENT) {
9587- val += (Elf_Addr) me->module_core - loc;
9588+ val += (Elf_Addr) me->module_core_rw - loc;
9589 rc = apply_rela_bits(loc, val, 1, 32, 1);
9590 }
9591 break;
9592@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9594 if (info->plt_initialized == 0) {
9595 unsigned int *ip;
9596- ip = me->module_core + me->arch.plt_offset +
9597+ ip = me->module_core_rx + me->arch.plt_offset +
9598 info->plt_offset;
9599 #ifndef CONFIG_64BIT
9600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9601@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9602 val - loc + 0xffffUL < 0x1ffffeUL) ||
9603 (r_type == R_390_PLT32DBL &&
9604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9605- val = (Elf_Addr) me->module_core +
9606+ val = (Elf_Addr) me->module_core_rx +
9607 me->arch.plt_offset +
9608 info->plt_offset;
9609 val += rela->r_addend - loc;
9610@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9613 val = val + rela->r_addend -
9614- ((Elf_Addr) me->module_core + me->arch.got_offset);
9615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9616 if (r_type == R_390_GOTOFF16)
9617 rc = apply_rela_bits(loc, val, 0, 16, 0);
9618 else if (r_type == R_390_GOTOFF32)
9619@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9620 break;
9621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9625 rela->r_addend - loc;
9626 if (r_type == R_390_GOTPC)
9627 rc = apply_rela_bits(loc, val, 1, 32, 0);
9628diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9629index 13fc097..84d375f 100644
9630--- a/arch/s390/kernel/process.c
9631+++ b/arch/s390/kernel/process.c
9632@@ -227,27 +227,3 @@ unsigned long get_wchan(struct task_struct *p)
9633 }
9634 return 0;
9635 }
9636-
9637-unsigned long arch_align_stack(unsigned long sp)
9638-{
9639- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9640- sp -= get_random_int() & ~PAGE_MASK;
9641- return sp & ~0xf;
9642-}
9643-
9644-static inline unsigned long brk_rnd(void)
9645-{
9646- /* 8MB for 32bit, 1GB for 64bit */
9647- if (is_32bit_task())
9648- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9649- else
9650- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9651-}
9652-
9653-unsigned long arch_randomize_brk(struct mm_struct *mm)
9654-{
9655- unsigned long ret;
9656-
9657- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9658- return (ret > mm->brk) ? ret : mm->brk;
9659-}
9660diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9661index 179a2c2..371e85c 100644
9662--- a/arch/s390/mm/mmap.c
9663+++ b/arch/s390/mm/mmap.c
9664@@ -204,9 +204,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9665 */
9666 if (mmap_is_legacy()) {
9667 mm->mmap_base = mmap_base_legacy();
9668+
9669+#ifdef CONFIG_PAX_RANDMMAP
9670+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9671+ mm->mmap_base += mm->delta_mmap;
9672+#endif
9673+
9674 mm->get_unmapped_area = arch_get_unmapped_area;
9675 } else {
9676 mm->mmap_base = mmap_base();
9677+
9678+#ifdef CONFIG_PAX_RANDMMAP
9679+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9680+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9681+#endif
9682+
9683 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9684 }
9685 }
9686@@ -279,9 +291,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9687 */
9688 if (mmap_is_legacy()) {
9689 mm->mmap_base = mmap_base_legacy();
9690+
9691+#ifdef CONFIG_PAX_RANDMMAP
9692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9693+ mm->mmap_base += mm->delta_mmap;
9694+#endif
9695+
9696 mm->get_unmapped_area = s390_get_unmapped_area;
9697 } else {
9698 mm->mmap_base = mmap_base();
9699+
9700+#ifdef CONFIG_PAX_RANDMMAP
9701+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9702+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9703+#endif
9704+
9705 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9706 }
9707 }
9708diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9709index ae3d59f..f65f075 100644
9710--- a/arch/score/include/asm/cache.h
9711+++ b/arch/score/include/asm/cache.h
9712@@ -1,7 +1,9 @@
9713 #ifndef _ASM_SCORE_CACHE_H
9714 #define _ASM_SCORE_CACHE_H
9715
9716+#include <linux/const.h>
9717+
9718 #define L1_CACHE_SHIFT 4
9719-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9720+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9721
9722 #endif /* _ASM_SCORE_CACHE_H */
9723diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9724index f9f3cd5..58ff438 100644
9725--- a/arch/score/include/asm/exec.h
9726+++ b/arch/score/include/asm/exec.h
9727@@ -1,6 +1,6 @@
9728 #ifndef _ASM_SCORE_EXEC_H
9729 #define _ASM_SCORE_EXEC_H
9730
9731-extern unsigned long arch_align_stack(unsigned long sp);
9732+#define arch_align_stack(x) (x)
9733
9734 #endif /* _ASM_SCORE_EXEC_H */
9735diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9736index a1519ad3..e8ac1ff 100644
9737--- a/arch/score/kernel/process.c
9738+++ b/arch/score/kernel/process.c
9739@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9740
9741 return task_pt_regs(task)->cp0_epc;
9742 }
9743-
9744-unsigned long arch_align_stack(unsigned long sp)
9745-{
9746- return sp;
9747-}
9748diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9749index ef9e555..331bd29 100644
9750--- a/arch/sh/include/asm/cache.h
9751+++ b/arch/sh/include/asm/cache.h
9752@@ -9,10 +9,11 @@
9753 #define __ASM_SH_CACHE_H
9754 #ifdef __KERNEL__
9755
9756+#include <linux/const.h>
9757 #include <linux/init.h>
9758 #include <cpu/cache.h>
9759
9760-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9761+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9762
9763 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9764
9765diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9766index 6777177..cb5e44f 100644
9767--- a/arch/sh/mm/mmap.c
9768+++ b/arch/sh/mm/mmap.c
9769@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9770 struct mm_struct *mm = current->mm;
9771 struct vm_area_struct *vma;
9772 int do_colour_align;
9773+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9774 struct vm_unmapped_area_info info;
9775
9776 if (flags & MAP_FIXED) {
9777@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9778 if (filp || (flags & MAP_SHARED))
9779 do_colour_align = 1;
9780
9781+#ifdef CONFIG_PAX_RANDMMAP
9782+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9783+#endif
9784+
9785 if (addr) {
9786 if (do_colour_align)
9787 addr = COLOUR_ALIGN(addr, pgoff);
9788@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9789 addr = PAGE_ALIGN(addr);
9790
9791 vma = find_vma(mm, addr);
9792- if (TASK_SIZE - len >= addr &&
9793- (!vma || addr + len <= vma->vm_start))
9794+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9795 return addr;
9796 }
9797
9798 info.flags = 0;
9799 info.length = len;
9800- info.low_limit = TASK_UNMAPPED_BASE;
9801+ info.low_limit = mm->mmap_base;
9802 info.high_limit = TASK_SIZE;
9803 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9804 info.align_offset = pgoff << PAGE_SHIFT;
9805@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9806 struct mm_struct *mm = current->mm;
9807 unsigned long addr = addr0;
9808 int do_colour_align;
9809+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9810 struct vm_unmapped_area_info info;
9811
9812 if (flags & MAP_FIXED) {
9813@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9814 if (filp || (flags & MAP_SHARED))
9815 do_colour_align = 1;
9816
9817+#ifdef CONFIG_PAX_RANDMMAP
9818+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9819+#endif
9820+
9821 /* requesting a specific address */
9822 if (addr) {
9823 if (do_colour_align)
9824@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9825 addr = PAGE_ALIGN(addr);
9826
9827 vma = find_vma(mm, addr);
9828- if (TASK_SIZE - len >= addr &&
9829- (!vma || addr + len <= vma->vm_start))
9830+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9831 return addr;
9832 }
9833
9834@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9835 VM_BUG_ON(addr != -ENOMEM);
9836 info.flags = 0;
9837 info.low_limit = TASK_UNMAPPED_BASE;
9838+
9839+#ifdef CONFIG_PAX_RANDMMAP
9840+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9841+ info.low_limit += mm->delta_mmap;
9842+#endif
9843+
9844 info.high_limit = TASK_SIZE;
9845 addr = vm_unmapped_area(&info);
9846 }
9847diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9848index 4082749..fd97781 100644
9849--- a/arch/sparc/include/asm/atomic_64.h
9850+++ b/arch/sparc/include/asm/atomic_64.h
9851@@ -15,18 +15,38 @@
9852 #define ATOMIC64_INIT(i) { (i) }
9853
9854 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9855+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9856+{
9857+ return ACCESS_ONCE(v->counter);
9858+}
9859 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9860+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9861+{
9862+ return ACCESS_ONCE(v->counter);
9863+}
9864
9865 #define atomic_set(v, i) (((v)->counter) = i)
9866+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9867+{
9868+ v->counter = i;
9869+}
9870 #define atomic64_set(v, i) (((v)->counter) = i)
9871+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9872+{
9873+ v->counter = i;
9874+}
9875
9876-#define ATOMIC_OP(op) \
9877-void atomic_##op(int, atomic_t *); \
9878-void atomic64_##op(long, atomic64_t *);
9879+#define __ATOMIC_OP(op, suffix) \
9880+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9881+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9882
9883-#define ATOMIC_OP_RETURN(op) \
9884-int atomic_##op##_return(int, atomic_t *); \
9885-long atomic64_##op##_return(long, atomic64_t *);
9886+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9887+
9888+#define __ATOMIC_OP_RETURN(op, suffix) \
9889+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9890+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9891+
9892+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9893
9894 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9895
9896@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9897
9898 #undef ATOMIC_OPS
9899 #undef ATOMIC_OP_RETURN
9900+#undef __ATOMIC_OP_RETURN
9901 #undef ATOMIC_OP
9902+#undef __ATOMIC_OP
9903
9904 #define atomic_dec_return(v) atomic_sub_return(1, v)
9905 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9906
9907 #define atomic_inc_return(v) atomic_add_return(1, v)
9908+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9909+{
9910+ return atomic_add_return_unchecked(1, v);
9911+}
9912 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9913+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9914+{
9915+ return atomic64_add_return_unchecked(1, v);
9916+}
9917
9918 /*
9919 * atomic_inc_and_test - increment and test
9920@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
9921 * other cases.
9922 */
9923 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9924+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9925+{
9926+ return atomic_inc_return_unchecked(v) == 0;
9927+}
9928 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9929
9930 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
9931@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
9932 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
9933
9934 #define atomic_inc(v) atomic_add(1, v)
9935+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9936+{
9937+ atomic_add_unchecked(1, v);
9938+}
9939 #define atomic64_inc(v) atomic64_add(1, v)
9940+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9941+{
9942+ atomic64_add_unchecked(1, v);
9943+}
9944
9945 #define atomic_dec(v) atomic_sub(1, v)
9946+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9947+{
9948+ atomic_sub_unchecked(1, v);
9949+}
9950 #define atomic64_dec(v) atomic64_sub(1, v)
9951+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9952+{
9953+ atomic64_sub_unchecked(1, v);
9954+}
9955
9956 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
9957 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
9958
9959 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9960+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9961+{
9962+ return cmpxchg(&v->counter, old, new);
9963+}
9964 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9965+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9966+{
9967+ return xchg(&v->counter, new);
9968+}
9969
9970 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9971 {
9972- int c, old;
9973+ int c, old, new;
9974 c = atomic_read(v);
9975 for (;;) {
9976- if (unlikely(c == (u)))
9977+ if (unlikely(c == u))
9978 break;
9979- old = atomic_cmpxchg((v), c, c + (a));
9980+
9981+ asm volatile("addcc %2, %0, %0\n"
9982+
9983+#ifdef CONFIG_PAX_REFCOUNT
9984+ "tvs %%icc, 6\n"
9985+#endif
9986+
9987+ : "=r" (new)
9988+ : "0" (c), "ir" (a)
9989+ : "cc");
9990+
9991+ old = atomic_cmpxchg(v, c, new);
9992 if (likely(old == c))
9993 break;
9994 c = old;
9995@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9996 #define atomic64_cmpxchg(v, o, n) \
9997 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
9998 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
9999+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10000+{
10001+ return xchg(&v->counter, new);
10002+}
10003
10004 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10005 {
10006- long c, old;
10007+ long c, old, new;
10008 c = atomic64_read(v);
10009 for (;;) {
10010- if (unlikely(c == (u)))
10011+ if (unlikely(c == u))
10012 break;
10013- old = atomic64_cmpxchg((v), c, c + (a));
10014+
10015+ asm volatile("addcc %2, %0, %0\n"
10016+
10017+#ifdef CONFIG_PAX_REFCOUNT
10018+ "tvs %%xcc, 6\n"
10019+#endif
10020+
10021+ : "=r" (new)
10022+ : "0" (c), "ir" (a)
10023+ : "cc");
10024+
10025+ old = atomic64_cmpxchg(v, c, new);
10026 if (likely(old == c))
10027 break;
10028 c = old;
10029 }
10030- return c != (u);
10031+ return c != u;
10032 }
10033
10034 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10035diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10036index 7664894..45a974b 100644
10037--- a/arch/sparc/include/asm/barrier_64.h
10038+++ b/arch/sparc/include/asm/barrier_64.h
10039@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10040 do { \
10041 compiletime_assert_atomic_type(*p); \
10042 barrier(); \
10043- ACCESS_ONCE(*p) = (v); \
10044+ ACCESS_ONCE_RW(*p) = (v); \
10045 } while (0)
10046
10047 #define smp_load_acquire(p) \
10048diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10049index 5bb6991..5c2132e 100644
10050--- a/arch/sparc/include/asm/cache.h
10051+++ b/arch/sparc/include/asm/cache.h
10052@@ -7,10 +7,12 @@
10053 #ifndef _SPARC_CACHE_H
10054 #define _SPARC_CACHE_H
10055
10056+#include <linux/const.h>
10057+
10058 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10059
10060 #define L1_CACHE_SHIFT 5
10061-#define L1_CACHE_BYTES 32
10062+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10063
10064 #ifdef CONFIG_SPARC32
10065 #define SMP_CACHE_BYTES_SHIFT 5
10066diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10067index a24e41f..47677ff 100644
10068--- a/arch/sparc/include/asm/elf_32.h
10069+++ b/arch/sparc/include/asm/elf_32.h
10070@@ -114,6 +114,13 @@ typedef struct {
10071
10072 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10073
10074+#ifdef CONFIG_PAX_ASLR
10075+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10076+
10077+#define PAX_DELTA_MMAP_LEN 16
10078+#define PAX_DELTA_STACK_LEN 16
10079+#endif
10080+
10081 /* This yields a mask that user programs can use to figure out what
10082 instruction set this cpu supports. This can NOT be done in userspace
10083 on Sparc. */
10084diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10085index 370ca1e..d4f4a98 100644
10086--- a/arch/sparc/include/asm/elf_64.h
10087+++ b/arch/sparc/include/asm/elf_64.h
10088@@ -189,6 +189,13 @@ typedef struct {
10089 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10090 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10091
10092+#ifdef CONFIG_PAX_ASLR
10093+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10094+
10095+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10096+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10097+#endif
10098+
10099 extern unsigned long sparc64_elf_hwcap;
10100 #define ELF_HWCAP sparc64_elf_hwcap
10101
10102diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10103index a3890da..f6a408e 100644
10104--- a/arch/sparc/include/asm/pgalloc_32.h
10105+++ b/arch/sparc/include/asm/pgalloc_32.h
10106@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10107 }
10108
10109 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10110+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10111
10112 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10113 unsigned long address)
10114diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10115index 5e31871..13469c6 100644
10116--- a/arch/sparc/include/asm/pgalloc_64.h
10117+++ b/arch/sparc/include/asm/pgalloc_64.h
10118@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10119 }
10120
10121 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10122+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10123
10124 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10125 {
10126@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10127 }
10128
10129 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10130+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10131
10132 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10133 {
10134diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10135index 59ba6f6..4518128 100644
10136--- a/arch/sparc/include/asm/pgtable.h
10137+++ b/arch/sparc/include/asm/pgtable.h
10138@@ -5,4 +5,8 @@
10139 #else
10140 #include <asm/pgtable_32.h>
10141 #endif
10142+
10143+#define ktla_ktva(addr) (addr)
10144+#define ktva_ktla(addr) (addr)
10145+
10146 #endif
10147diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10148index f06b36a..bca3189 100644
10149--- a/arch/sparc/include/asm/pgtable_32.h
10150+++ b/arch/sparc/include/asm/pgtable_32.h
10151@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10152 #define PAGE_SHARED SRMMU_PAGE_SHARED
10153 #define PAGE_COPY SRMMU_PAGE_COPY
10154 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10155+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10156+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10157+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10158 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10159
10160 /* Top-level page directory - dummy used by init-mm.
10161@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10162
10163 /* xwr */
10164 #define __P000 PAGE_NONE
10165-#define __P001 PAGE_READONLY
10166-#define __P010 PAGE_COPY
10167-#define __P011 PAGE_COPY
10168+#define __P001 PAGE_READONLY_NOEXEC
10169+#define __P010 PAGE_COPY_NOEXEC
10170+#define __P011 PAGE_COPY_NOEXEC
10171 #define __P100 PAGE_READONLY
10172 #define __P101 PAGE_READONLY
10173 #define __P110 PAGE_COPY
10174 #define __P111 PAGE_COPY
10175
10176 #define __S000 PAGE_NONE
10177-#define __S001 PAGE_READONLY
10178-#define __S010 PAGE_SHARED
10179-#define __S011 PAGE_SHARED
10180+#define __S001 PAGE_READONLY_NOEXEC
10181+#define __S010 PAGE_SHARED_NOEXEC
10182+#define __S011 PAGE_SHARED_NOEXEC
10183 #define __S100 PAGE_READONLY
10184 #define __S101 PAGE_READONLY
10185 #define __S110 PAGE_SHARED
10186diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10187index ae51a11..eadfd03 100644
10188--- a/arch/sparc/include/asm/pgtsrmmu.h
10189+++ b/arch/sparc/include/asm/pgtsrmmu.h
10190@@ -111,6 +111,11 @@
10191 SRMMU_EXEC | SRMMU_REF)
10192 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10193 SRMMU_EXEC | SRMMU_REF)
10194+
10195+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10196+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10197+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10198+
10199 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10200 SRMMU_DIRTY | SRMMU_REF)
10201
10202diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10203index 29d64b1..4272fe8 100644
10204--- a/arch/sparc/include/asm/setup.h
10205+++ b/arch/sparc/include/asm/setup.h
10206@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10207 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10208
10209 /* init_64.c */
10210-extern atomic_t dcpage_flushes;
10211-extern atomic_t dcpage_flushes_xcall;
10212+extern atomic_unchecked_t dcpage_flushes;
10213+extern atomic_unchecked_t dcpage_flushes_xcall;
10214
10215 extern int sysctl_tsb_ratio;
10216 #endif
10217diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10218index 9689176..63c18ea 100644
10219--- a/arch/sparc/include/asm/spinlock_64.h
10220+++ b/arch/sparc/include/asm/spinlock_64.h
10221@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10222
10223 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10224
10225-static void inline arch_read_lock(arch_rwlock_t *lock)
10226+static inline void arch_read_lock(arch_rwlock_t *lock)
10227 {
10228 unsigned long tmp1, tmp2;
10229
10230 __asm__ __volatile__ (
10231 "1: ldsw [%2], %0\n"
10232 " brlz,pn %0, 2f\n"
10233-"4: add %0, 1, %1\n"
10234+"4: addcc %0, 1, %1\n"
10235+
10236+#ifdef CONFIG_PAX_REFCOUNT
10237+" tvs %%icc, 6\n"
10238+#endif
10239+
10240 " cas [%2], %0, %1\n"
10241 " cmp %0, %1\n"
10242 " bne,pn %%icc, 1b\n"
10243@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10244 " .previous"
10245 : "=&r" (tmp1), "=&r" (tmp2)
10246 : "r" (lock)
10247- : "memory");
10248+ : "memory", "cc");
10249 }
10250
10251-static int inline arch_read_trylock(arch_rwlock_t *lock)
10252+static inline int arch_read_trylock(arch_rwlock_t *lock)
10253 {
10254 int tmp1, tmp2;
10255
10256@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10257 "1: ldsw [%2], %0\n"
10258 " brlz,a,pn %0, 2f\n"
10259 " mov 0, %0\n"
10260-" add %0, 1, %1\n"
10261+" addcc %0, 1, %1\n"
10262+
10263+#ifdef CONFIG_PAX_REFCOUNT
10264+" tvs %%icc, 6\n"
10265+#endif
10266+
10267 " cas [%2], %0, %1\n"
10268 " cmp %0, %1\n"
10269 " bne,pn %%icc, 1b\n"
10270@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10271 return tmp1;
10272 }
10273
10274-static void inline arch_read_unlock(arch_rwlock_t *lock)
10275+static inline void arch_read_unlock(arch_rwlock_t *lock)
10276 {
10277 unsigned long tmp1, tmp2;
10278
10279 __asm__ __volatile__(
10280 "1: lduw [%2], %0\n"
10281-" sub %0, 1, %1\n"
10282+" subcc %0, 1, %1\n"
10283+
10284+#ifdef CONFIG_PAX_REFCOUNT
10285+" tvs %%icc, 6\n"
10286+#endif
10287+
10288 " cas [%2], %0, %1\n"
10289 " cmp %0, %1\n"
10290 " bne,pn %%xcc, 1b\n"
10291@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10292 : "memory");
10293 }
10294
10295-static void inline arch_write_lock(arch_rwlock_t *lock)
10296+static inline void arch_write_lock(arch_rwlock_t *lock)
10297 {
10298 unsigned long mask, tmp1, tmp2;
10299
10300@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10301 : "memory");
10302 }
10303
10304-static void inline arch_write_unlock(arch_rwlock_t *lock)
10305+static inline void arch_write_unlock(arch_rwlock_t *lock)
10306 {
10307 __asm__ __volatile__(
10308 " stw %%g0, [%0]"
10309@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10310 : "memory");
10311 }
10312
10313-static int inline arch_write_trylock(arch_rwlock_t *lock)
10314+static inline int arch_write_trylock(arch_rwlock_t *lock)
10315 {
10316 unsigned long mask, tmp1, tmp2, result;
10317
10318diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10319index fd7bd0a..2e2fa7a 100644
10320--- a/arch/sparc/include/asm/thread_info_32.h
10321+++ b/arch/sparc/include/asm/thread_info_32.h
10322@@ -47,6 +47,7 @@ struct thread_info {
10323 struct reg_window32 reg_window[NSWINS]; /* align for ldd! */
10324 unsigned long rwbuf_stkptrs[NSWINS];
10325 unsigned long w_saved;
10326+ unsigned long lowest_stack;
10327 };
10328
10329 /*
10330diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10331index ff45516..73001ab 100644
10332--- a/arch/sparc/include/asm/thread_info_64.h
10333+++ b/arch/sparc/include/asm/thread_info_64.h
10334@@ -61,6 +61,8 @@ struct thread_info {
10335 struct pt_regs *kern_una_regs;
10336 unsigned int kern_una_insn;
10337
10338+ unsigned long lowest_stack;
10339+
10340 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10341 __attribute__ ((aligned(64)));
10342 };
10343@@ -184,12 +186,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10344 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10345 /* flag bit 4 is available */
10346 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10347-/* flag bit 6 is available */
10348+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10349 #define TIF_32BIT 7 /* 32-bit binary */
10350 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10351 #define TIF_SECCOMP 9 /* secure computing */
10352 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10353 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10354+
10355 /* NOTE: Thread flags >= 12 should be ones we have no interest
10356 * in using in assembly, else we can't use the mask as
10357 * an immediate value in instructions such as andcc.
10358@@ -209,12 +212,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10359 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10360 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10361 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10362+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10363
10364 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10365 _TIF_DO_NOTIFY_RESUME_MASK | \
10366 _TIF_NEED_RESCHED)
10367 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10368
10369+#define _TIF_WORK_SYSCALL \
10370+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10371+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10372+
10373 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10374
10375 /*
10376diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10377index bd56c28..4b63d83 100644
10378--- a/arch/sparc/include/asm/uaccess.h
10379+++ b/arch/sparc/include/asm/uaccess.h
10380@@ -1,5 +1,6 @@
10381 #ifndef ___ASM_SPARC_UACCESS_H
10382 #define ___ASM_SPARC_UACCESS_H
10383+
10384 #if defined(__sparc__) && defined(__arch64__)
10385 #include <asm/uaccess_64.h>
10386 #else
10387diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10388index 64ee103..388aef0 100644
10389--- a/arch/sparc/include/asm/uaccess_32.h
10390+++ b/arch/sparc/include/asm/uaccess_32.h
10391@@ -47,6 +47,7 @@
10392 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
10393 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
10394 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
10395+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10396 #define access_ok(type, addr, size) \
10397 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
10398
10399@@ -313,27 +314,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10400
10401 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10402 {
10403- if (n && __access_ok((unsigned long) to, n))
10404+ if ((long)n < 0)
10405+ return n;
10406+
10407+ if (n && __access_ok((unsigned long) to, n)) {
10408+ if (!__builtin_constant_p(n))
10409+ check_object_size(from, n, true);
10410 return __copy_user(to, (__force void __user *) from, n);
10411- else
10412+ } else
10413 return n;
10414 }
10415
10416 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10417 {
10418+ if ((long)n < 0)
10419+ return n;
10420+
10421+ if (!__builtin_constant_p(n))
10422+ check_object_size(from, n, true);
10423+
10424 return __copy_user(to, (__force void __user *) from, n);
10425 }
10426
10427 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10428 {
10429- if (n && __access_ok((unsigned long) from, n))
10430+ if ((long)n < 0)
10431+ return n;
10432+
10433+ if (n && __access_ok((unsigned long) from, n)) {
10434+ if (!__builtin_constant_p(n))
10435+ check_object_size(to, n, false);
10436 return __copy_user((__force void __user *) to, from, n);
10437- else
10438+ } else
10439 return n;
10440 }
10441
10442 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10443 {
10444+ if ((long)n < 0)
10445+ return n;
10446+
10447 return __copy_user((__force void __user *) to, from, n);
10448 }
10449
10450diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10451index a35194b..47dabc0d 100644
10452--- a/arch/sparc/include/asm/uaccess_64.h
10453+++ b/arch/sparc/include/asm/uaccess_64.h
10454@@ -10,6 +10,7 @@
10455 #include <linux/compiler.h>
10456 #include <linux/string.h>
10457 #include <linux/thread_info.h>
10458+#include <linux/kernel.h>
10459 #include <asm/asi.h>
10460 #include <asm/spitfire.h>
10461 #include <asm-generic/uaccess-unaligned.h>
10462@@ -54,6 +55,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
10463 return 1;
10464 }
10465
10466+static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
10467+{
10468+ return 1;
10469+}
10470+
10471 static inline int access_ok(int type, const void __user * addr, unsigned long size)
10472 {
10473 return 1;
10474@@ -228,8 +234,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10475 static inline unsigned long __must_check
10476 copy_from_user(void *to, const void __user *from, unsigned long size)
10477 {
10478- unsigned long ret = ___copy_from_user(to, from, size);
10479+ unsigned long ret;
10480
10481+ if ((long)size < 0 || size > INT_MAX)
10482+ return size;
10483+
10484+ if (!__builtin_constant_p(size))
10485+ check_object_size(to, size, false);
10486+
10487+ ret = ___copy_from_user(to, from, size);
10488 if (unlikely(ret))
10489 ret = copy_from_user_fixup(to, from, size);
10490
10491@@ -245,8 +258,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10492 static inline unsigned long __must_check
10493 copy_to_user(void __user *to, const void *from, unsigned long size)
10494 {
10495- unsigned long ret = ___copy_to_user(to, from, size);
10496+ unsigned long ret;
10497
10498+ if ((long)size < 0 || size > INT_MAX)
10499+ return size;
10500+
10501+ if (!__builtin_constant_p(size))
10502+ check_object_size(from, size, true);
10503+
10504+ ret = ___copy_to_user(to, from, size);
10505 if (unlikely(ret))
10506 ret = copy_to_user_fixup(to, from, size);
10507 return ret;
10508diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10509index 7cf9c6e..6206648 100644
10510--- a/arch/sparc/kernel/Makefile
10511+++ b/arch/sparc/kernel/Makefile
10512@@ -4,7 +4,7 @@
10513 #
10514
10515 asflags-y := -ansi
10516-ccflags-y := -Werror
10517+#ccflags-y := -Werror
10518
10519 extra-y := head_$(BITS).o
10520
10521diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10522index 50e7b62..79fae35 100644
10523--- a/arch/sparc/kernel/process_32.c
10524+++ b/arch/sparc/kernel/process_32.c
10525@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10526
10527 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10528 r->psr, r->pc, r->npc, r->y, print_tainted());
10529- printk("PC: <%pS>\n", (void *) r->pc);
10530+ printk("PC: <%pA>\n", (void *) r->pc);
10531 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10532 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10533 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10534 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10535 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10536 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10537- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10538+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10539
10540 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10541 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10542@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10543 rw = (struct reg_window32 *) fp;
10544 pc = rw->ins[7];
10545 printk("[%08lx : ", pc);
10546- printk("%pS ] ", (void *) pc);
10547+ printk("%pA ] ", (void *) pc);
10548 fp = rw->ins[6];
10549 } while (++count < 16);
10550 printk("\n");
10551diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10552index 46a5964..a35c62c 100644
10553--- a/arch/sparc/kernel/process_64.c
10554+++ b/arch/sparc/kernel/process_64.c
10555@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10556 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10557 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10558 if (regs->tstate & TSTATE_PRIV)
10559- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10560+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10561 }
10562
10563 void show_regs(struct pt_regs *regs)
10564@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10565
10566 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10567 regs->tpc, regs->tnpc, regs->y, print_tainted());
10568- printk("TPC: <%pS>\n", (void *) regs->tpc);
10569+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10570 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10571 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10572 regs->u_regs[3]);
10573@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10574 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10575 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10576 regs->u_regs[15]);
10577- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10578+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10579 show_regwindow(regs);
10580 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10581 }
10582@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10583 ((tp && tp->task) ? tp->task->pid : -1));
10584
10585 if (gp->tstate & TSTATE_PRIV) {
10586- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10587+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10588 (void *) gp->tpc,
10589 (void *) gp->o7,
10590 (void *) gp->i7,
10591diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10592index 79cc0d1..ec62734 100644
10593--- a/arch/sparc/kernel/prom_common.c
10594+++ b/arch/sparc/kernel/prom_common.c
10595@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10596
10597 unsigned int prom_early_allocated __initdata;
10598
10599-static struct of_pdt_ops prom_sparc_ops __initdata = {
10600+static struct of_pdt_ops prom_sparc_ops __initconst = {
10601 .nextprop = prom_common_nextprop,
10602 .getproplen = prom_getproplen,
10603 .getproperty = prom_getproperty,
10604diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10605index 9ddc492..27a5619 100644
10606--- a/arch/sparc/kernel/ptrace_64.c
10607+++ b/arch/sparc/kernel/ptrace_64.c
10608@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10609 return ret;
10610 }
10611
10612+#ifdef CONFIG_GRKERNSEC_SETXID
10613+extern void gr_delayed_cred_worker(void);
10614+#endif
10615+
10616 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10617 {
10618 int ret = 0;
10619@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10620 if (test_thread_flag(TIF_NOHZ))
10621 user_exit();
10622
10623+#ifdef CONFIG_GRKERNSEC_SETXID
10624+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10625+ gr_delayed_cred_worker();
10626+#endif
10627+
10628 if (test_thread_flag(TIF_SYSCALL_TRACE))
10629 ret = tracehook_report_syscall_entry(regs);
10630
10631@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10632 if (test_thread_flag(TIF_NOHZ))
10633 user_exit();
10634
10635+#ifdef CONFIG_GRKERNSEC_SETXID
10636+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10637+ gr_delayed_cred_worker();
10638+#endif
10639+
10640 audit_syscall_exit(regs);
10641
10642 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10643diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10644index 61139d9..c1a5f28 100644
10645--- a/arch/sparc/kernel/smp_64.c
10646+++ b/arch/sparc/kernel/smp_64.c
10647@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10648 return;
10649
10650 #ifdef CONFIG_DEBUG_DCFLUSH
10651- atomic_inc(&dcpage_flushes);
10652+ atomic_inc_unchecked(&dcpage_flushes);
10653 #endif
10654
10655 this_cpu = get_cpu();
10656@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10657 xcall_deliver(data0, __pa(pg_addr),
10658 (u64) pg_addr, cpumask_of(cpu));
10659 #ifdef CONFIG_DEBUG_DCFLUSH
10660- atomic_inc(&dcpage_flushes_xcall);
10661+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10662 #endif
10663 }
10664 }
10665@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10666 preempt_disable();
10667
10668 #ifdef CONFIG_DEBUG_DCFLUSH
10669- atomic_inc(&dcpage_flushes);
10670+ atomic_inc_unchecked(&dcpage_flushes);
10671 #endif
10672 data0 = 0;
10673 pg_addr = page_address(page);
10674@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10675 xcall_deliver(data0, __pa(pg_addr),
10676 (u64) pg_addr, cpu_online_mask);
10677 #ifdef CONFIG_DEBUG_DCFLUSH
10678- atomic_inc(&dcpage_flushes_xcall);
10679+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10680 #endif
10681 }
10682 __local_flush_dcache_page(page);
10683diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10684index 646988d..b88905f 100644
10685--- a/arch/sparc/kernel/sys_sparc_32.c
10686+++ b/arch/sparc/kernel/sys_sparc_32.c
10687@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10688 if (len > TASK_SIZE - PAGE_SIZE)
10689 return -ENOMEM;
10690 if (!addr)
10691- addr = TASK_UNMAPPED_BASE;
10692+ addr = current->mm->mmap_base;
10693
10694 info.flags = 0;
10695 info.length = len;
10696diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10697index 30e7ddb..266a3b0 100644
10698--- a/arch/sparc/kernel/sys_sparc_64.c
10699+++ b/arch/sparc/kernel/sys_sparc_64.c
10700@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10701 struct vm_area_struct * vma;
10702 unsigned long task_size = TASK_SIZE;
10703 int do_color_align;
10704+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10705 struct vm_unmapped_area_info info;
10706
10707 if (flags & MAP_FIXED) {
10708 /* We do not accept a shared mapping if it would violate
10709 * cache aliasing constraints.
10710 */
10711- if ((flags & MAP_SHARED) &&
10712+ if ((filp || (flags & MAP_SHARED)) &&
10713 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10714 return -EINVAL;
10715 return addr;
10716@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10717 if (filp || (flags & MAP_SHARED))
10718 do_color_align = 1;
10719
10720+#ifdef CONFIG_PAX_RANDMMAP
10721+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10722+#endif
10723+
10724 if (addr) {
10725 if (do_color_align)
10726 addr = COLOR_ALIGN(addr, pgoff);
10727@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10728 addr = PAGE_ALIGN(addr);
10729
10730 vma = find_vma(mm, addr);
10731- if (task_size - len >= addr &&
10732- (!vma || addr + len <= vma->vm_start))
10733+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10734 return addr;
10735 }
10736
10737 info.flags = 0;
10738 info.length = len;
10739- info.low_limit = TASK_UNMAPPED_BASE;
10740+ info.low_limit = mm->mmap_base;
10741 info.high_limit = min(task_size, VA_EXCLUDE_START);
10742 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10743 info.align_offset = pgoff << PAGE_SHIFT;
10744+ info.threadstack_offset = offset;
10745 addr = vm_unmapped_area(&info);
10746
10747 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10748 VM_BUG_ON(addr != -ENOMEM);
10749 info.low_limit = VA_EXCLUDE_END;
10750+
10751+#ifdef CONFIG_PAX_RANDMMAP
10752+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10753+ info.low_limit += mm->delta_mmap;
10754+#endif
10755+
10756 info.high_limit = task_size;
10757 addr = vm_unmapped_area(&info);
10758 }
10759@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10760 unsigned long task_size = STACK_TOP32;
10761 unsigned long addr = addr0;
10762 int do_color_align;
10763+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10764 struct vm_unmapped_area_info info;
10765
10766 /* This should only ever run for 32-bit processes. */
10767@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10768 /* We do not accept a shared mapping if it would violate
10769 * cache aliasing constraints.
10770 */
10771- if ((flags & MAP_SHARED) &&
10772+ if ((filp || (flags & MAP_SHARED)) &&
10773 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10774 return -EINVAL;
10775 return addr;
10776@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10777 if (filp || (flags & MAP_SHARED))
10778 do_color_align = 1;
10779
10780+#ifdef CONFIG_PAX_RANDMMAP
10781+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10782+#endif
10783+
10784 /* requesting a specific address */
10785 if (addr) {
10786 if (do_color_align)
10787@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10788 addr = PAGE_ALIGN(addr);
10789
10790 vma = find_vma(mm, addr);
10791- if (task_size - len >= addr &&
10792- (!vma || addr + len <= vma->vm_start))
10793+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10794 return addr;
10795 }
10796
10797@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10798 info.high_limit = mm->mmap_base;
10799 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10800 info.align_offset = pgoff << PAGE_SHIFT;
10801+ info.threadstack_offset = offset;
10802 addr = vm_unmapped_area(&info);
10803
10804 /*
10805@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10806 VM_BUG_ON(addr != -ENOMEM);
10807 info.flags = 0;
10808 info.low_limit = TASK_UNMAPPED_BASE;
10809+
10810+#ifdef CONFIG_PAX_RANDMMAP
10811+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10812+ info.low_limit += mm->delta_mmap;
10813+#endif
10814+
10815 info.high_limit = STACK_TOP32;
10816 addr = vm_unmapped_area(&info);
10817 }
10818@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10819 EXPORT_SYMBOL(get_fb_unmapped_area);
10820
10821 /* Essentially the same as PowerPC. */
10822-static unsigned long mmap_rnd(void)
10823+static unsigned long mmap_rnd(struct mm_struct *mm)
10824 {
10825 unsigned long rnd = 0UL;
10826
10827+#ifdef CONFIG_PAX_RANDMMAP
10828+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10829+#endif
10830+
10831 if (current->flags & PF_RANDOMIZE) {
10832 unsigned long val = get_random_int();
10833 if (test_thread_flag(TIF_32BIT))
10834@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10835
10836 void arch_pick_mmap_layout(struct mm_struct *mm)
10837 {
10838- unsigned long random_factor = mmap_rnd();
10839+ unsigned long random_factor = mmap_rnd(mm);
10840 unsigned long gap;
10841
10842 /*
10843@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10844 gap == RLIM_INFINITY ||
10845 sysctl_legacy_va_layout) {
10846 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10847+
10848+#ifdef CONFIG_PAX_RANDMMAP
10849+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10850+ mm->mmap_base += mm->delta_mmap;
10851+#endif
10852+
10853 mm->get_unmapped_area = arch_get_unmapped_area;
10854 } else {
10855 /* We know it's 32-bit */
10856@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10857 gap = (task_size / 6 * 5);
10858
10859 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10860+
10861+#ifdef CONFIG_PAX_RANDMMAP
10862+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10863+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10864+#endif
10865+
10866 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10867 }
10868 }
10869diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10870index bb00089..e0ea580 100644
10871--- a/arch/sparc/kernel/syscalls.S
10872+++ b/arch/sparc/kernel/syscalls.S
10873@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10874 #endif
10875 .align 32
10876 1: ldx [%g6 + TI_FLAGS], %l5
10877- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10878+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10879 be,pt %icc, rtrap
10880 nop
10881 call syscall_trace_leave
10882@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10883
10884 srl %i3, 0, %o3 ! IEU0
10885 srl %i2, 0, %o2 ! IEU0 Group
10886- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10887+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10888 bne,pn %icc, linux_syscall_trace32 ! CTI
10889 mov %i0, %l5 ! IEU1
10890 5: call %l7 ! CTI Group brk forced
10891@@ -218,7 +218,7 @@ linux_sparc_syscall:
10892
10893 mov %i3, %o3 ! IEU1
10894 mov %i4, %o4 ! IEU0 Group
10895- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10896+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10897 bne,pn %icc, linux_syscall_trace ! CTI Group
10898 mov %i0, %l5 ! IEU0
10899 2: call %l7 ! CTI Group brk forced
10900@@ -233,7 +233,7 @@ ret_sys_call:
10901
10902 cmp %o0, -ERESTART_RESTARTBLOCK
10903 bgeu,pn %xcc, 1f
10904- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10905+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10906 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10907
10908 2:
10909diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10910index 6fd386c5..6907d81 100644
10911--- a/arch/sparc/kernel/traps_32.c
10912+++ b/arch/sparc/kernel/traps_32.c
10913@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10914 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10915 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10916
10917+extern void gr_handle_kernel_exploit(void);
10918+
10919 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10920 {
10921 static int die_counter;
10922@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10923 count++ < 30 &&
10924 (((unsigned long) rw) >= PAGE_OFFSET) &&
10925 !(((unsigned long) rw) & 0x7)) {
10926- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10927+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10928 (void *) rw->ins[7]);
10929 rw = (struct reg_window32 *)rw->ins[6];
10930 }
10931 }
10932 printk("Instruction DUMP:");
10933 instruction_dump ((unsigned long *) regs->pc);
10934- if(regs->psr & PSR_PS)
10935+ if(regs->psr & PSR_PS) {
10936+ gr_handle_kernel_exploit();
10937 do_exit(SIGKILL);
10938+ }
10939 do_exit(SIGSEGV);
10940 }
10941
10942diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10943index 0e69974..0c15a6e 100644
10944--- a/arch/sparc/kernel/traps_64.c
10945+++ b/arch/sparc/kernel/traps_64.c
10946@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10947 i + 1,
10948 p->trapstack[i].tstate, p->trapstack[i].tpc,
10949 p->trapstack[i].tnpc, p->trapstack[i].tt);
10950- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10951+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10952 }
10953 }
10954
10955@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10956
10957 lvl -= 0x100;
10958 if (regs->tstate & TSTATE_PRIV) {
10959+
10960+#ifdef CONFIG_PAX_REFCOUNT
10961+ if (lvl == 6)
10962+ pax_report_refcount_overflow(regs);
10963+#endif
10964+
10965 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10966 die_if_kernel(buffer, regs);
10967 }
10968@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10969 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10970 {
10971 char buffer[32];
10972-
10973+
10974 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10975 0, lvl, SIGTRAP) == NOTIFY_STOP)
10976 return;
10977
10978+#ifdef CONFIG_PAX_REFCOUNT
10979+ if (lvl == 6)
10980+ pax_report_refcount_overflow(regs);
10981+#endif
10982+
10983 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10984
10985 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10986@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
10987 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
10988 printk("%s" "ERROR(%d): ",
10989 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
10990- printk("TPC<%pS>\n", (void *) regs->tpc);
10991+ printk("TPC<%pA>\n", (void *) regs->tpc);
10992 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
10993 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
10994 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
10995@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10996 smp_processor_id(),
10997 (type & 0x1) ? 'I' : 'D',
10998 regs->tpc);
10999- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11000+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11001 panic("Irrecoverable Cheetah+ parity error.");
11002 }
11003
11004@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11005 smp_processor_id(),
11006 (type & 0x1) ? 'I' : 'D',
11007 regs->tpc);
11008- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11009+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11010 }
11011
11012 struct sun4v_error_entry {
11013@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11014 /*0x38*/u64 reserved_5;
11015 };
11016
11017-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11018-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11019+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11020+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11021
11022 static const char *sun4v_err_type_to_str(u8 type)
11023 {
11024@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11025 }
11026
11027 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11028- int cpu, const char *pfx, atomic_t *ocnt)
11029+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11030 {
11031 u64 *raw_ptr = (u64 *) ent;
11032 u32 attrs;
11033@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11034
11035 show_regs(regs);
11036
11037- if ((cnt = atomic_read(ocnt)) != 0) {
11038- atomic_set(ocnt, 0);
11039+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11040+ atomic_set_unchecked(ocnt, 0);
11041 wmb();
11042 printk("%s: Queue overflowed %d times.\n",
11043 pfx, cnt);
11044@@ -2048,7 +2059,7 @@ out:
11045 */
11046 void sun4v_resum_overflow(struct pt_regs *regs)
11047 {
11048- atomic_inc(&sun4v_resum_oflow_cnt);
11049+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11050 }
11051
11052 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11053@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11054 /* XXX Actually even this can make not that much sense. Perhaps
11055 * XXX we should just pull the plug and panic directly from here?
11056 */
11057- atomic_inc(&sun4v_nonresum_oflow_cnt);
11058+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11059 }
11060
11061 static void sun4v_tlb_error(struct pt_regs *regs)
11062@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11063
11064 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11065 regs->tpc, tl);
11066- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11067+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11068 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11069- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11070+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11071 (void *) regs->u_regs[UREG_I7]);
11072 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11073 "pte[%lx] error[%lx]\n",
11074@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11075
11076 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11077 regs->tpc, tl);
11078- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11079+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11080 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11081- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11082+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11083 (void *) regs->u_regs[UREG_I7]);
11084 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11085 "pte[%lx] error[%lx]\n",
11086@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11087 fp = (unsigned long)sf->fp + STACK_BIAS;
11088 }
11089
11090- printk(" [%016lx] %pS\n", pc, (void *) pc);
11091+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11092 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11093 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11094 int index = tsk->curr_ret_stack;
11095 if (tsk->ret_stack && index >= graph) {
11096 pc = tsk->ret_stack[index - graph].ret;
11097- printk(" [%016lx] %pS\n", pc, (void *) pc);
11098+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11099 graph++;
11100 }
11101 }
11102@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11103 return (struct reg_window *) (fp + STACK_BIAS);
11104 }
11105
11106+extern void gr_handle_kernel_exploit(void);
11107+
11108 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11109 {
11110 static int die_counter;
11111@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11112 while (rw &&
11113 count++ < 30 &&
11114 kstack_valid(tp, (unsigned long) rw)) {
11115- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11116+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11117 (void *) rw->ins[7]);
11118
11119 rw = kernel_stack_up(rw);
11120@@ -2429,8 +2442,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11121 }
11122 if (panic_on_oops)
11123 panic("Fatal exception");
11124- if (regs->tstate & TSTATE_PRIV)
11125+ if (regs->tstate & TSTATE_PRIV) {
11126+ gr_handle_kernel_exploit();
11127 do_exit(SIGKILL);
11128+ }
11129 do_exit(SIGSEGV);
11130 }
11131 EXPORT_SYMBOL(die_if_kernel);
11132diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11133index 62098a8..547ab2c 100644
11134--- a/arch/sparc/kernel/unaligned_64.c
11135+++ b/arch/sparc/kernel/unaligned_64.c
11136@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11137 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11138
11139 if (__ratelimit(&ratelimit)) {
11140- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11141+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11142 regs->tpc, (void *) regs->tpc);
11143 }
11144 }
11145diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11146index 3269b02..64f5231 100644
11147--- a/arch/sparc/lib/Makefile
11148+++ b/arch/sparc/lib/Makefile
11149@@ -2,7 +2,7 @@
11150 #
11151
11152 asflags-y := -ansi -DST_DIV0=0x02
11153-ccflags-y := -Werror
11154+#ccflags-y := -Werror
11155
11156 lib-$(CONFIG_SPARC32) += ashrdi3.o
11157 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11158diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11159index 05dac43..76f8ed4 100644
11160--- a/arch/sparc/lib/atomic_64.S
11161+++ b/arch/sparc/lib/atomic_64.S
11162@@ -15,11 +15,22 @@
11163 * a value and does the barriers.
11164 */
11165
11166-#define ATOMIC_OP(op) \
11167-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11168+#ifdef CONFIG_PAX_REFCOUNT
11169+#define __REFCOUNT_OP(op) op##cc
11170+#define __OVERFLOW_IOP tvs %icc, 6;
11171+#define __OVERFLOW_XOP tvs %xcc, 6;
11172+#else
11173+#define __REFCOUNT_OP(op) op
11174+#define __OVERFLOW_IOP
11175+#define __OVERFLOW_XOP
11176+#endif
11177+
11178+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11179+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11180 BACKOFF_SETUP(%o2); \
11181 1: lduw [%o1], %g1; \
11182- op %g1, %o0, %g7; \
11183+ asm_op %g1, %o0, %g7; \
11184+ post_op \
11185 cas [%o1], %g1, %g7; \
11186 cmp %g1, %g7; \
11187 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11188@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11189 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11190 ENDPROC(atomic_##op); \
11191
11192-#define ATOMIC_OP_RETURN(op) \
11193-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11194+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11195+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11196+
11197+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11198+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11199 BACKOFF_SETUP(%o2); \
11200 1: lduw [%o1], %g1; \
11201- op %g1, %o0, %g7; \
11202+ asm_op %g1, %o0, %g7; \
11203+ post_op \
11204 cas [%o1], %g1, %g7; \
11205 cmp %g1, %g7; \
11206 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11207@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11208 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11209 ENDPROC(atomic_##op##_return);
11210
11211+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11212+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11213+
11214 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11215
11216 ATOMIC_OPS(add)
11217@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11218
11219 #undef ATOMIC_OPS
11220 #undef ATOMIC_OP_RETURN
11221+#undef __ATOMIC_OP_RETURN
11222 #undef ATOMIC_OP
11223+#undef __ATOMIC_OP
11224
11225-#define ATOMIC64_OP(op) \
11226-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11227+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11228+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11229 BACKOFF_SETUP(%o2); \
11230 1: ldx [%o1], %g1; \
11231- op %g1, %o0, %g7; \
11232+ asm_op %g1, %o0, %g7; \
11233+ post_op \
11234 casx [%o1], %g1, %g7; \
11235 cmp %g1, %g7; \
11236 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11237@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11238 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11239 ENDPROC(atomic64_##op); \
11240
11241-#define ATOMIC64_OP_RETURN(op) \
11242-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11243+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11244+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11245+
11246+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11247+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11248 BACKOFF_SETUP(%o2); \
11249 1: ldx [%o1], %g1; \
11250- op %g1, %o0, %g7; \
11251+ asm_op %g1, %o0, %g7; \
11252+ post_op \
11253 casx [%o1], %g1, %g7; \
11254 cmp %g1, %g7; \
11255 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11256@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11257 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11258 ENDPROC(atomic64_##op##_return);
11259
11260+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11261+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11262+
11263 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11264
11265 ATOMIC64_OPS(add)
11266@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11267
11268 #undef ATOMIC64_OPS
11269 #undef ATOMIC64_OP_RETURN
11270+#undef __ATOMIC64_OP_RETURN
11271 #undef ATOMIC64_OP
11272+#undef __ATOMIC64_OP
11273+#undef __OVERFLOW_XOP
11274+#undef __OVERFLOW_IOP
11275+#undef __REFCOUNT_OP
11276
11277 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11278 BACKOFF_SETUP(%o2)
11279diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11280index 1d649a9..fbc5bfc 100644
11281--- a/arch/sparc/lib/ksyms.c
11282+++ b/arch/sparc/lib/ksyms.c
11283@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11284 /* Atomic counter implementation. */
11285 #define ATOMIC_OP(op) \
11286 EXPORT_SYMBOL(atomic_##op); \
11287-EXPORT_SYMBOL(atomic64_##op);
11288+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11289+EXPORT_SYMBOL(atomic64_##op); \
11290+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11291
11292 #define ATOMIC_OP_RETURN(op) \
11293 EXPORT_SYMBOL(atomic_##op##_return); \
11294@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11295 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11296
11297 ATOMIC_OPS(add)
11298+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11299+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11300 ATOMIC_OPS(sub)
11301
11302 #undef ATOMIC_OPS
11303diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11304index 30c3ecc..736f015 100644
11305--- a/arch/sparc/mm/Makefile
11306+++ b/arch/sparc/mm/Makefile
11307@@ -2,7 +2,7 @@
11308 #
11309
11310 asflags-y := -ansi
11311-ccflags-y := -Werror
11312+#ccflags-y := -Werror
11313
11314 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11315 obj-y += fault_$(BITS).o
11316diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11317index 70d8171..274c6c0 100644
11318--- a/arch/sparc/mm/fault_32.c
11319+++ b/arch/sparc/mm/fault_32.c
11320@@ -21,6 +21,9 @@
11321 #include <linux/perf_event.h>
11322 #include <linux/interrupt.h>
11323 #include <linux/kdebug.h>
11324+#include <linux/slab.h>
11325+#include <linux/pagemap.h>
11326+#include <linux/compiler.h>
11327
11328 #include <asm/page.h>
11329 #include <asm/pgtable.h>
11330@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11331 return safe_compute_effective_address(regs, insn);
11332 }
11333
11334+#ifdef CONFIG_PAX_PAGEEXEC
11335+#ifdef CONFIG_PAX_DLRESOLVE
11336+static void pax_emuplt_close(struct vm_area_struct *vma)
11337+{
11338+ vma->vm_mm->call_dl_resolve = 0UL;
11339+}
11340+
11341+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11342+{
11343+ unsigned int *kaddr;
11344+
11345+ vmf->page = alloc_page(GFP_HIGHUSER);
11346+ if (!vmf->page)
11347+ return VM_FAULT_OOM;
11348+
11349+ kaddr = kmap(vmf->page);
11350+ memset(kaddr, 0, PAGE_SIZE);
11351+ kaddr[0] = 0x9DE3BFA8U; /* save */
11352+ flush_dcache_page(vmf->page);
11353+ kunmap(vmf->page);
11354+ return VM_FAULT_MAJOR;
11355+}
11356+
11357+static const struct vm_operations_struct pax_vm_ops = {
11358+ .close = pax_emuplt_close,
11359+ .fault = pax_emuplt_fault
11360+};
11361+
11362+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11363+{
11364+ int ret;
11365+
11366+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11367+ vma->vm_mm = current->mm;
11368+ vma->vm_start = addr;
11369+ vma->vm_end = addr + PAGE_SIZE;
11370+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11371+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11372+ vma->vm_ops = &pax_vm_ops;
11373+
11374+ ret = insert_vm_struct(current->mm, vma);
11375+ if (ret)
11376+ return ret;
11377+
11378+ ++current->mm->total_vm;
11379+ return 0;
11380+}
11381+#endif
11382+
11383+/*
11384+ * PaX: decide what to do with offenders (regs->pc = fault address)
11385+ *
11386+ * returns 1 when task should be killed
11387+ * 2 when patched PLT trampoline was detected
11388+ * 3 when unpatched PLT trampoline was detected
11389+ */
11390+static int pax_handle_fetch_fault(struct pt_regs *regs)
11391+{
11392+
11393+#ifdef CONFIG_PAX_EMUPLT
11394+ int err;
11395+
11396+ do { /* PaX: patched PLT emulation #1 */
11397+ unsigned int sethi1, sethi2, jmpl;
11398+
11399+ err = get_user(sethi1, (unsigned int *)regs->pc);
11400+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11401+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11402+
11403+ if (err)
11404+ break;
11405+
11406+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11407+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11408+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11409+ {
11410+ unsigned int addr;
11411+
11412+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11413+ addr = regs->u_regs[UREG_G1];
11414+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11415+ regs->pc = addr;
11416+ regs->npc = addr+4;
11417+ return 2;
11418+ }
11419+ } while (0);
11420+
11421+ do { /* PaX: patched PLT emulation #2 */
11422+ unsigned int ba;
11423+
11424+ err = get_user(ba, (unsigned int *)regs->pc);
11425+
11426+ if (err)
11427+ break;
11428+
11429+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11430+ unsigned int addr;
11431+
11432+ if ((ba & 0xFFC00000U) == 0x30800000U)
11433+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11434+ else
11435+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11436+ regs->pc = addr;
11437+ regs->npc = addr+4;
11438+ return 2;
11439+ }
11440+ } while (0);
11441+
11442+ do { /* PaX: patched PLT emulation #3 */
11443+ unsigned int sethi, bajmpl, nop;
11444+
11445+ err = get_user(sethi, (unsigned int *)regs->pc);
11446+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11447+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11448+
11449+ if (err)
11450+ break;
11451+
11452+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11453+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11454+ nop == 0x01000000U)
11455+ {
11456+ unsigned int addr;
11457+
11458+ addr = (sethi & 0x003FFFFFU) << 10;
11459+ regs->u_regs[UREG_G1] = addr;
11460+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11461+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11462+ else
11463+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11464+ regs->pc = addr;
11465+ regs->npc = addr+4;
11466+ return 2;
11467+ }
11468+ } while (0);
11469+
11470+ do { /* PaX: unpatched PLT emulation step 1 */
11471+ unsigned int sethi, ba, nop;
11472+
11473+ err = get_user(sethi, (unsigned int *)regs->pc);
11474+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11475+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11476+
11477+ if (err)
11478+ break;
11479+
11480+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11481+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11482+ nop == 0x01000000U)
11483+ {
11484+ unsigned int addr, save, call;
11485+
11486+ if ((ba & 0xFFC00000U) == 0x30800000U)
11487+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11488+ else
11489+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11490+
11491+ err = get_user(save, (unsigned int *)addr);
11492+ err |= get_user(call, (unsigned int *)(addr+4));
11493+ err |= get_user(nop, (unsigned int *)(addr+8));
11494+ if (err)
11495+ break;
11496+
11497+#ifdef CONFIG_PAX_DLRESOLVE
11498+ if (save == 0x9DE3BFA8U &&
11499+ (call & 0xC0000000U) == 0x40000000U &&
11500+ nop == 0x01000000U)
11501+ {
11502+ struct vm_area_struct *vma;
11503+ unsigned long call_dl_resolve;
11504+
11505+ down_read(&current->mm->mmap_sem);
11506+ call_dl_resolve = current->mm->call_dl_resolve;
11507+ up_read(&current->mm->mmap_sem);
11508+ if (likely(call_dl_resolve))
11509+ goto emulate;
11510+
11511+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11512+
11513+ down_write(&current->mm->mmap_sem);
11514+ if (current->mm->call_dl_resolve) {
11515+ call_dl_resolve = current->mm->call_dl_resolve;
11516+ up_write(&current->mm->mmap_sem);
11517+ if (vma)
11518+ kmem_cache_free(vm_area_cachep, vma);
11519+ goto emulate;
11520+ }
11521+
11522+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11523+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11524+ up_write(&current->mm->mmap_sem);
11525+ if (vma)
11526+ kmem_cache_free(vm_area_cachep, vma);
11527+ return 1;
11528+ }
11529+
11530+ if (pax_insert_vma(vma, call_dl_resolve)) {
11531+ up_write(&current->mm->mmap_sem);
11532+ kmem_cache_free(vm_area_cachep, vma);
11533+ return 1;
11534+ }
11535+
11536+ current->mm->call_dl_resolve = call_dl_resolve;
11537+ up_write(&current->mm->mmap_sem);
11538+
11539+emulate:
11540+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11541+ regs->pc = call_dl_resolve;
11542+ regs->npc = addr+4;
11543+ return 3;
11544+ }
11545+#endif
11546+
11547+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11548+ if ((save & 0xFFC00000U) == 0x05000000U &&
11549+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11550+ nop == 0x01000000U)
11551+ {
11552+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11553+ regs->u_regs[UREG_G2] = addr + 4;
11554+ addr = (save & 0x003FFFFFU) << 10;
11555+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11556+ regs->pc = addr;
11557+ regs->npc = addr+4;
11558+ return 3;
11559+ }
11560+ }
11561+ } while (0);
11562+
11563+ do { /* PaX: unpatched PLT emulation step 2 */
11564+ unsigned int save, call, nop;
11565+
11566+ err = get_user(save, (unsigned int *)(regs->pc-4));
11567+ err |= get_user(call, (unsigned int *)regs->pc);
11568+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11569+ if (err)
11570+ break;
11571+
11572+ if (save == 0x9DE3BFA8U &&
11573+ (call & 0xC0000000U) == 0x40000000U &&
11574+ nop == 0x01000000U)
11575+ {
11576+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11577+
11578+ regs->u_regs[UREG_RETPC] = regs->pc;
11579+ regs->pc = dl_resolve;
11580+ regs->npc = dl_resolve+4;
11581+ return 3;
11582+ }
11583+ } while (0);
11584+#endif
11585+
11586+ return 1;
11587+}
11588+
11589+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11590+{
11591+ unsigned long i;
11592+
11593+ printk(KERN_ERR "PAX: bytes at PC: ");
11594+ for (i = 0; i < 8; i++) {
11595+ unsigned int c;
11596+ if (get_user(c, (unsigned int *)pc+i))
11597+ printk(KERN_CONT "???????? ");
11598+ else
11599+ printk(KERN_CONT "%08x ", c);
11600+ }
11601+ printk("\n");
11602+}
11603+#endif
11604+
11605 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11606 int text_fault)
11607 {
11608@@ -226,6 +500,24 @@ good_area:
11609 if (!(vma->vm_flags & VM_WRITE))
11610 goto bad_area;
11611 } else {
11612+
11613+#ifdef CONFIG_PAX_PAGEEXEC
11614+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11615+ up_read(&mm->mmap_sem);
11616+ switch (pax_handle_fetch_fault(regs)) {
11617+
11618+#ifdef CONFIG_PAX_EMUPLT
11619+ case 2:
11620+ case 3:
11621+ return;
11622+#endif
11623+
11624+ }
11625+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11626+ do_group_exit(SIGKILL);
11627+ }
11628+#endif
11629+
11630 /* Allow reads even for write-only mappings */
11631 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11632 goto bad_area;
11633diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11634index 4798232..f76e3aa 100644
11635--- a/arch/sparc/mm/fault_64.c
11636+++ b/arch/sparc/mm/fault_64.c
11637@@ -22,6 +22,9 @@
11638 #include <linux/kdebug.h>
11639 #include <linux/percpu.h>
11640 #include <linux/context_tracking.h>
11641+#include <linux/slab.h>
11642+#include <linux/pagemap.h>
11643+#include <linux/compiler.h>
11644
11645 #include <asm/page.h>
11646 #include <asm/pgtable.h>
11647@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11648 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11649 regs->tpc);
11650 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11651- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11652+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11653 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11654 dump_stack();
11655 unhandled_fault(regs->tpc, current, regs);
11656@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11657 show_regs(regs);
11658 }
11659
11660+#ifdef CONFIG_PAX_PAGEEXEC
11661+#ifdef CONFIG_PAX_DLRESOLVE
11662+static void pax_emuplt_close(struct vm_area_struct *vma)
11663+{
11664+ vma->vm_mm->call_dl_resolve = 0UL;
11665+}
11666+
11667+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11668+{
11669+ unsigned int *kaddr;
11670+
11671+ vmf->page = alloc_page(GFP_HIGHUSER);
11672+ if (!vmf->page)
11673+ return VM_FAULT_OOM;
11674+
11675+ kaddr = kmap(vmf->page);
11676+ memset(kaddr, 0, PAGE_SIZE);
11677+ kaddr[0] = 0x9DE3BFA8U; /* save */
11678+ flush_dcache_page(vmf->page);
11679+ kunmap(vmf->page);
11680+ return VM_FAULT_MAJOR;
11681+}
11682+
11683+static const struct vm_operations_struct pax_vm_ops = {
11684+ .close = pax_emuplt_close,
11685+ .fault = pax_emuplt_fault
11686+};
11687+
11688+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11689+{
11690+ int ret;
11691+
11692+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11693+ vma->vm_mm = current->mm;
11694+ vma->vm_start = addr;
11695+ vma->vm_end = addr + PAGE_SIZE;
11696+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11697+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11698+ vma->vm_ops = &pax_vm_ops;
11699+
11700+ ret = insert_vm_struct(current->mm, vma);
11701+ if (ret)
11702+ return ret;
11703+
11704+ ++current->mm->total_vm;
11705+ return 0;
11706+}
11707+#endif
11708+
11709+/*
11710+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11711+ *
11712+ * returns 1 when task should be killed
11713+ * 2 when patched PLT trampoline was detected
11714+ * 3 when unpatched PLT trampoline was detected
11715+ */
11716+static int pax_handle_fetch_fault(struct pt_regs *regs)
11717+{
11718+
11719+#ifdef CONFIG_PAX_EMUPLT
11720+ int err;
11721+
11722+ do { /* PaX: patched PLT emulation #1 */
11723+ unsigned int sethi1, sethi2, jmpl;
11724+
11725+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11726+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11727+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11728+
11729+ if (err)
11730+ break;
11731+
11732+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11733+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11734+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11735+ {
11736+ unsigned long addr;
11737+
11738+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11739+ addr = regs->u_regs[UREG_G1];
11740+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11741+
11742+ if (test_thread_flag(TIF_32BIT))
11743+ addr &= 0xFFFFFFFFUL;
11744+
11745+ regs->tpc = addr;
11746+ regs->tnpc = addr+4;
11747+ return 2;
11748+ }
11749+ } while (0);
11750+
11751+ do { /* PaX: patched PLT emulation #2 */
11752+ unsigned int ba;
11753+
11754+ err = get_user(ba, (unsigned int *)regs->tpc);
11755+
11756+ if (err)
11757+ break;
11758+
11759+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11760+ unsigned long addr;
11761+
11762+ if ((ba & 0xFFC00000U) == 0x30800000U)
11763+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11764+ else
11765+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11766+
11767+ if (test_thread_flag(TIF_32BIT))
11768+ addr &= 0xFFFFFFFFUL;
11769+
11770+ regs->tpc = addr;
11771+ regs->tnpc = addr+4;
11772+ return 2;
11773+ }
11774+ } while (0);
11775+
11776+ do { /* PaX: patched PLT emulation #3 */
11777+ unsigned int sethi, bajmpl, nop;
11778+
11779+ err = get_user(sethi, (unsigned int *)regs->tpc);
11780+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11781+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11782+
11783+ if (err)
11784+ break;
11785+
11786+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11787+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11788+ nop == 0x01000000U)
11789+ {
11790+ unsigned long addr;
11791+
11792+ addr = (sethi & 0x003FFFFFU) << 10;
11793+ regs->u_regs[UREG_G1] = addr;
11794+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11795+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11796+ else
11797+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11798+
11799+ if (test_thread_flag(TIF_32BIT))
11800+ addr &= 0xFFFFFFFFUL;
11801+
11802+ regs->tpc = addr;
11803+ regs->tnpc = addr+4;
11804+ return 2;
11805+ }
11806+ } while (0);
11807+
11808+ do { /* PaX: patched PLT emulation #4 */
11809+ unsigned int sethi, mov1, call, mov2;
11810+
11811+ err = get_user(sethi, (unsigned int *)regs->tpc);
11812+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11813+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11814+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11815+
11816+ if (err)
11817+ break;
11818+
11819+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11820+ mov1 == 0x8210000FU &&
11821+ (call & 0xC0000000U) == 0x40000000U &&
11822+ mov2 == 0x9E100001U)
11823+ {
11824+ unsigned long addr;
11825+
11826+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11827+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11828+
11829+ if (test_thread_flag(TIF_32BIT))
11830+ addr &= 0xFFFFFFFFUL;
11831+
11832+ regs->tpc = addr;
11833+ regs->tnpc = addr+4;
11834+ return 2;
11835+ }
11836+ } while (0);
11837+
11838+ do { /* PaX: patched PLT emulation #5 */
11839+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11840+
11841+ err = get_user(sethi, (unsigned int *)regs->tpc);
11842+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11843+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11844+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11845+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11846+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11847+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11848+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11849+
11850+ if (err)
11851+ break;
11852+
11853+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11854+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11855+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11856+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11857+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11858+ sllx == 0x83287020U &&
11859+ jmpl == 0x81C04005U &&
11860+ nop == 0x01000000U)
11861+ {
11862+ unsigned long addr;
11863+
11864+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11865+ regs->u_regs[UREG_G1] <<= 32;
11866+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11867+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11868+ regs->tpc = addr;
11869+ regs->tnpc = addr+4;
11870+ return 2;
11871+ }
11872+ } while (0);
11873+
11874+ do { /* PaX: patched PLT emulation #6 */
11875+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11876+
11877+ err = get_user(sethi, (unsigned int *)regs->tpc);
11878+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11879+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11880+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11881+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11882+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11883+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11884+
11885+ if (err)
11886+ break;
11887+
11888+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11889+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11890+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11891+ sllx == 0x83287020U &&
11892+ (or & 0xFFFFE000U) == 0x8A116000U &&
11893+ jmpl == 0x81C04005U &&
11894+ nop == 0x01000000U)
11895+ {
11896+ unsigned long addr;
11897+
11898+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11899+ regs->u_regs[UREG_G1] <<= 32;
11900+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11901+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11902+ regs->tpc = addr;
11903+ regs->tnpc = addr+4;
11904+ return 2;
11905+ }
11906+ } while (0);
11907+
11908+ do { /* PaX: unpatched PLT emulation step 1 */
11909+ unsigned int sethi, ba, nop;
11910+
11911+ err = get_user(sethi, (unsigned int *)regs->tpc);
11912+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11913+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11914+
11915+ if (err)
11916+ break;
11917+
11918+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11919+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11920+ nop == 0x01000000U)
11921+ {
11922+ unsigned long addr;
11923+ unsigned int save, call;
11924+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11925+
11926+ if ((ba & 0xFFC00000U) == 0x30800000U)
11927+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11928+ else
11929+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11930+
11931+ if (test_thread_flag(TIF_32BIT))
11932+ addr &= 0xFFFFFFFFUL;
11933+
11934+ err = get_user(save, (unsigned int *)addr);
11935+ err |= get_user(call, (unsigned int *)(addr+4));
11936+ err |= get_user(nop, (unsigned int *)(addr+8));
11937+ if (err)
11938+ break;
11939+
11940+#ifdef CONFIG_PAX_DLRESOLVE
11941+ if (save == 0x9DE3BFA8U &&
11942+ (call & 0xC0000000U) == 0x40000000U &&
11943+ nop == 0x01000000U)
11944+ {
11945+ struct vm_area_struct *vma;
11946+ unsigned long call_dl_resolve;
11947+
11948+ down_read(&current->mm->mmap_sem);
11949+ call_dl_resolve = current->mm->call_dl_resolve;
11950+ up_read(&current->mm->mmap_sem);
11951+ if (likely(call_dl_resolve))
11952+ goto emulate;
11953+
11954+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11955+
11956+ down_write(&current->mm->mmap_sem);
11957+ if (current->mm->call_dl_resolve) {
11958+ call_dl_resolve = current->mm->call_dl_resolve;
11959+ up_write(&current->mm->mmap_sem);
11960+ if (vma)
11961+ kmem_cache_free(vm_area_cachep, vma);
11962+ goto emulate;
11963+ }
11964+
11965+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11966+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11967+ up_write(&current->mm->mmap_sem);
11968+ if (vma)
11969+ kmem_cache_free(vm_area_cachep, vma);
11970+ return 1;
11971+ }
11972+
11973+ if (pax_insert_vma(vma, call_dl_resolve)) {
11974+ up_write(&current->mm->mmap_sem);
11975+ kmem_cache_free(vm_area_cachep, vma);
11976+ return 1;
11977+ }
11978+
11979+ current->mm->call_dl_resolve = call_dl_resolve;
11980+ up_write(&current->mm->mmap_sem);
11981+
11982+emulate:
11983+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11984+ regs->tpc = call_dl_resolve;
11985+ regs->tnpc = addr+4;
11986+ return 3;
11987+ }
11988+#endif
11989+
11990+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11991+ if ((save & 0xFFC00000U) == 0x05000000U &&
11992+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11993+ nop == 0x01000000U)
11994+ {
11995+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11996+ regs->u_regs[UREG_G2] = addr + 4;
11997+ addr = (save & 0x003FFFFFU) << 10;
11998+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11999+
12000+ if (test_thread_flag(TIF_32BIT))
12001+ addr &= 0xFFFFFFFFUL;
12002+
12003+ regs->tpc = addr;
12004+ regs->tnpc = addr+4;
12005+ return 3;
12006+ }
12007+
12008+ /* PaX: 64-bit PLT stub */
12009+ err = get_user(sethi1, (unsigned int *)addr);
12010+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12011+ err |= get_user(or1, (unsigned int *)(addr+8));
12012+ err |= get_user(or2, (unsigned int *)(addr+12));
12013+ err |= get_user(sllx, (unsigned int *)(addr+16));
12014+ err |= get_user(add, (unsigned int *)(addr+20));
12015+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12016+ err |= get_user(nop, (unsigned int *)(addr+28));
12017+ if (err)
12018+ break;
12019+
12020+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12021+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12022+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12023+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12024+ sllx == 0x89293020U &&
12025+ add == 0x8A010005U &&
12026+ jmpl == 0x89C14000U &&
12027+ nop == 0x01000000U)
12028+ {
12029+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12030+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12031+ regs->u_regs[UREG_G4] <<= 32;
12032+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12033+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12034+ regs->u_regs[UREG_G4] = addr + 24;
12035+ addr = regs->u_regs[UREG_G5];
12036+ regs->tpc = addr;
12037+ regs->tnpc = addr+4;
12038+ return 3;
12039+ }
12040+ }
12041+ } while (0);
12042+
12043+#ifdef CONFIG_PAX_DLRESOLVE
12044+ do { /* PaX: unpatched PLT emulation step 2 */
12045+ unsigned int save, call, nop;
12046+
12047+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12048+ err |= get_user(call, (unsigned int *)regs->tpc);
12049+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12050+ if (err)
12051+ break;
12052+
12053+ if (save == 0x9DE3BFA8U &&
12054+ (call & 0xC0000000U) == 0x40000000U &&
12055+ nop == 0x01000000U)
12056+ {
12057+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12058+
12059+ if (test_thread_flag(TIF_32BIT))
12060+ dl_resolve &= 0xFFFFFFFFUL;
12061+
12062+ regs->u_regs[UREG_RETPC] = regs->tpc;
12063+ regs->tpc = dl_resolve;
12064+ regs->tnpc = dl_resolve+4;
12065+ return 3;
12066+ }
12067+ } while (0);
12068+#endif
12069+
12070+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12071+ unsigned int sethi, ba, nop;
12072+
12073+ err = get_user(sethi, (unsigned int *)regs->tpc);
12074+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12075+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12076+
12077+ if (err)
12078+ break;
12079+
12080+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12081+ (ba & 0xFFF00000U) == 0x30600000U &&
12082+ nop == 0x01000000U)
12083+ {
12084+ unsigned long addr;
12085+
12086+ addr = (sethi & 0x003FFFFFU) << 10;
12087+ regs->u_regs[UREG_G1] = addr;
12088+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12089+
12090+ if (test_thread_flag(TIF_32BIT))
12091+ addr &= 0xFFFFFFFFUL;
12092+
12093+ regs->tpc = addr;
12094+ regs->tnpc = addr+4;
12095+ return 2;
12096+ }
12097+ } while (0);
12098+
12099+#endif
12100+
12101+ return 1;
12102+}
12103+
12104+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12105+{
12106+ unsigned long i;
12107+
12108+ printk(KERN_ERR "PAX: bytes at PC: ");
12109+ for (i = 0; i < 8; i++) {
12110+ unsigned int c;
12111+ if (get_user(c, (unsigned int *)pc+i))
12112+ printk(KERN_CONT "???????? ");
12113+ else
12114+ printk(KERN_CONT "%08x ", c);
12115+ }
12116+ printk("\n");
12117+}
12118+#endif
12119+
12120 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12121 {
12122 enum ctx_state prev_state = exception_enter();
12123@@ -353,6 +816,29 @@ retry:
12124 if (!vma)
12125 goto bad_area;
12126
12127+#ifdef CONFIG_PAX_PAGEEXEC
12128+ /* PaX: detect ITLB misses on non-exec pages */
12129+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12130+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12131+ {
12132+ if (address != regs->tpc)
12133+ goto good_area;
12134+
12135+ up_read(&mm->mmap_sem);
12136+ switch (pax_handle_fetch_fault(regs)) {
12137+
12138+#ifdef CONFIG_PAX_EMUPLT
12139+ case 2:
12140+ case 3:
12141+ return;
12142+#endif
12143+
12144+ }
12145+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12146+ do_group_exit(SIGKILL);
12147+ }
12148+#endif
12149+
12150 /* Pure DTLB misses do not tell us whether the fault causing
12151 * load/store/atomic was a write or not, it only says that there
12152 * was no match. So in such a case we (carefully) read the
12153diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12154index 4242eab..9ae6360 100644
12155--- a/arch/sparc/mm/hugetlbpage.c
12156+++ b/arch/sparc/mm/hugetlbpage.c
12157@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12158 unsigned long addr,
12159 unsigned long len,
12160 unsigned long pgoff,
12161- unsigned long flags)
12162+ unsigned long flags,
12163+ unsigned long offset)
12164 {
12165+ struct mm_struct *mm = current->mm;
12166 unsigned long task_size = TASK_SIZE;
12167 struct vm_unmapped_area_info info;
12168
12169@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12170
12171 info.flags = 0;
12172 info.length = len;
12173- info.low_limit = TASK_UNMAPPED_BASE;
12174+ info.low_limit = mm->mmap_base;
12175 info.high_limit = min(task_size, VA_EXCLUDE_START);
12176 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12177 info.align_offset = 0;
12178+ info.threadstack_offset = offset;
12179 addr = vm_unmapped_area(&info);
12180
12181 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12182 VM_BUG_ON(addr != -ENOMEM);
12183 info.low_limit = VA_EXCLUDE_END;
12184+
12185+#ifdef CONFIG_PAX_RANDMMAP
12186+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12187+ info.low_limit += mm->delta_mmap;
12188+#endif
12189+
12190 info.high_limit = task_size;
12191 addr = vm_unmapped_area(&info);
12192 }
12193@@ -55,7 +64,8 @@ static unsigned long
12194 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12195 const unsigned long len,
12196 const unsigned long pgoff,
12197- const unsigned long flags)
12198+ const unsigned long flags,
12199+ const unsigned long offset)
12200 {
12201 struct mm_struct *mm = current->mm;
12202 unsigned long addr = addr0;
12203@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12204 info.high_limit = mm->mmap_base;
12205 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12206 info.align_offset = 0;
12207+ info.threadstack_offset = offset;
12208 addr = vm_unmapped_area(&info);
12209
12210 /*
12211@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12212 VM_BUG_ON(addr != -ENOMEM);
12213 info.flags = 0;
12214 info.low_limit = TASK_UNMAPPED_BASE;
12215+
12216+#ifdef CONFIG_PAX_RANDMMAP
12217+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12218+ info.low_limit += mm->delta_mmap;
12219+#endif
12220+
12221 info.high_limit = STACK_TOP32;
12222 addr = vm_unmapped_area(&info);
12223 }
12224@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12225 struct mm_struct *mm = current->mm;
12226 struct vm_area_struct *vma;
12227 unsigned long task_size = TASK_SIZE;
12228+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12229
12230 if (test_thread_flag(TIF_32BIT))
12231 task_size = STACK_TOP32;
12232@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12233 return addr;
12234 }
12235
12236+#ifdef CONFIG_PAX_RANDMMAP
12237+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12238+#endif
12239+
12240 if (addr) {
12241 addr = ALIGN(addr, HPAGE_SIZE);
12242 vma = find_vma(mm, addr);
12243- if (task_size - len >= addr &&
12244- (!vma || addr + len <= vma->vm_start))
12245+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12246 return addr;
12247 }
12248 if (mm->get_unmapped_area == arch_get_unmapped_area)
12249 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12250- pgoff, flags);
12251+ pgoff, flags, offset);
12252 else
12253 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12254- pgoff, flags);
12255+ pgoff, flags, offset);
12256 }
12257
12258 pte_t *huge_pte_alloc(struct mm_struct *mm,
12259diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12260index 4ca0d6b..e89bca1 100644
12261--- a/arch/sparc/mm/init_64.c
12262+++ b/arch/sparc/mm/init_64.c
12263@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12264 int num_kernel_image_mappings;
12265
12266 #ifdef CONFIG_DEBUG_DCFLUSH
12267-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12268+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12269 #ifdef CONFIG_SMP
12270-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12271+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12272 #endif
12273 #endif
12274
12275@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12276 {
12277 BUG_ON(tlb_type == hypervisor);
12278 #ifdef CONFIG_DEBUG_DCFLUSH
12279- atomic_inc(&dcpage_flushes);
12280+ atomic_inc_unchecked(&dcpage_flushes);
12281 #endif
12282
12283 #ifdef DCACHE_ALIASING_POSSIBLE
12284@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12285
12286 #ifdef CONFIG_DEBUG_DCFLUSH
12287 seq_printf(m, "DCPageFlushes\t: %d\n",
12288- atomic_read(&dcpage_flushes));
12289+ atomic_read_unchecked(&dcpage_flushes));
12290 #ifdef CONFIG_SMP
12291 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12292- atomic_read(&dcpage_flushes_xcall));
12293+ atomic_read_unchecked(&dcpage_flushes_xcall));
12294 #endif /* CONFIG_SMP */
12295 #endif /* CONFIG_DEBUG_DCFLUSH */
12296 }
12297diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12298index 7cca418..53fc030 100644
12299--- a/arch/tile/Kconfig
12300+++ b/arch/tile/Kconfig
12301@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12302
12303 config KEXEC
12304 bool "kexec system call"
12305+ depends on !GRKERNSEC_KMEM
12306 ---help---
12307 kexec is a system call that implements the ability to shutdown your
12308 current kernel, and to start another kernel. It is like a reboot
12309diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12310index 7b11c5f..755a026 100644
12311--- a/arch/tile/include/asm/atomic_64.h
12312+++ b/arch/tile/include/asm/atomic_64.h
12313@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12314
12315 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12316
12317+#define atomic64_read_unchecked(v) atomic64_read(v)
12318+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12319+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12320+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12321+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12322+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12323+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12324+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12325+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12326+
12327 /* Define this to indicate that cmpxchg is an efficient operation. */
12328 #define __HAVE_ARCH_CMPXCHG
12329
12330diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12331index 6160761..00cac88 100644
12332--- a/arch/tile/include/asm/cache.h
12333+++ b/arch/tile/include/asm/cache.h
12334@@ -15,11 +15,12 @@
12335 #ifndef _ASM_TILE_CACHE_H
12336 #define _ASM_TILE_CACHE_H
12337
12338+#include <linux/const.h>
12339 #include <arch/chip.h>
12340
12341 /* bytes per L1 data cache line */
12342 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12343-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12344+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12345
12346 /* bytes per L2 cache line */
12347 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12348diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12349index f41cb53..31d3ab4 100644
12350--- a/arch/tile/include/asm/uaccess.h
12351+++ b/arch/tile/include/asm/uaccess.h
12352@@ -417,9 +417,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12353 const void __user *from,
12354 unsigned long n)
12355 {
12356- int sz = __compiletime_object_size(to);
12357+ size_t sz = __compiletime_object_size(to);
12358
12359- if (likely(sz == -1 || sz >= n))
12360+ if (likely(sz == (size_t)-1 || sz >= n))
12361 n = _copy_from_user(to, from, n);
12362 else
12363 copy_from_user_overflow();
12364diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12365index 8416240..a012fb7 100644
12366--- a/arch/tile/mm/hugetlbpage.c
12367+++ b/arch/tile/mm/hugetlbpage.c
12368@@ -179,6 +179,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12369 info.high_limit = TASK_SIZE;
12370 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12371 info.align_offset = 0;
12372+ info.threadstack_offset = 0;
12373 return vm_unmapped_area(&info);
12374 }
12375
12376@@ -196,6 +197,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12377 info.high_limit = current->mm->mmap_base;
12378 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12379 info.align_offset = 0;
12380+ info.threadstack_offset = 0;
12381 addr = vm_unmapped_area(&info);
12382
12383 /*
12384diff --git a/arch/um/Makefile b/arch/um/Makefile
12385index e4b1a96..16162f8 100644
12386--- a/arch/um/Makefile
12387+++ b/arch/um/Makefile
12388@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12389 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12390 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12391
12392+ifdef CONSTIFY_PLUGIN
12393+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12394+endif
12395+
12396 #This will adjust *FLAGS accordingly to the platform.
12397 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12398
12399diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12400index 19e1bdd..3665b77 100644
12401--- a/arch/um/include/asm/cache.h
12402+++ b/arch/um/include/asm/cache.h
12403@@ -1,6 +1,7 @@
12404 #ifndef __UM_CACHE_H
12405 #define __UM_CACHE_H
12406
12407+#include <linux/const.h>
12408
12409 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12410 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12411@@ -12,6 +13,6 @@
12412 # define L1_CACHE_SHIFT 5
12413 #endif
12414
12415-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12416+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12417
12418 #endif
12419diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12420index 2e0a6b1..a64d0f5 100644
12421--- a/arch/um/include/asm/kmap_types.h
12422+++ b/arch/um/include/asm/kmap_types.h
12423@@ -8,6 +8,6 @@
12424
12425 /* No more #include "asm/arch/kmap_types.h" ! */
12426
12427-#define KM_TYPE_NR 14
12428+#define KM_TYPE_NR 15
12429
12430 #endif
12431diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12432index 71c5d13..4c7b9f1 100644
12433--- a/arch/um/include/asm/page.h
12434+++ b/arch/um/include/asm/page.h
12435@@ -14,6 +14,9 @@
12436 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12437 #define PAGE_MASK (~(PAGE_SIZE-1))
12438
12439+#define ktla_ktva(addr) (addr)
12440+#define ktva_ktla(addr) (addr)
12441+
12442 #ifndef __ASSEMBLY__
12443
12444 struct page;
12445diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12446index 2b4274e..754fe06 100644
12447--- a/arch/um/include/asm/pgtable-3level.h
12448+++ b/arch/um/include/asm/pgtable-3level.h
12449@@ -58,6 +58,7 @@
12450 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12451 #define pud_populate(mm, pud, pmd) \
12452 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12453+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12454
12455 #ifdef CONFIG_64BIT
12456 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12457diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12458index f17bca8..48adb87 100644
12459--- a/arch/um/kernel/process.c
12460+++ b/arch/um/kernel/process.c
12461@@ -356,22 +356,6 @@ int singlestepping(void * t)
12462 return 2;
12463 }
12464
12465-/*
12466- * Only x86 and x86_64 have an arch_align_stack().
12467- * All other arches have "#define arch_align_stack(x) (x)"
12468- * in their asm/exec.h
12469- * As this is included in UML from asm-um/system-generic.h,
12470- * we can use it to behave as the subarch does.
12471- */
12472-#ifndef arch_align_stack
12473-unsigned long arch_align_stack(unsigned long sp)
12474-{
12475- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12476- sp -= get_random_int() % 8192;
12477- return sp & ~0xf;
12478-}
12479-#endif
12480-
12481 unsigned long get_wchan(struct task_struct *p)
12482 {
12483 unsigned long stack_page, sp, ip;
12484diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12485index ad8f795..2c7eec6 100644
12486--- a/arch/unicore32/include/asm/cache.h
12487+++ b/arch/unicore32/include/asm/cache.h
12488@@ -12,8 +12,10 @@
12489 #ifndef __UNICORE_CACHE_H__
12490 #define __UNICORE_CACHE_H__
12491
12492-#define L1_CACHE_SHIFT (5)
12493-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12494+#include <linux/const.h>
12495+
12496+#define L1_CACHE_SHIFT 5
12497+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12498
12499 /*
12500 * Memory returned by kmalloc() may be used for DMA, so we must make
12501diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12502index b7d31ca..9481ec5 100644
12503--- a/arch/x86/Kconfig
12504+++ b/arch/x86/Kconfig
12505@@ -132,7 +132,7 @@ config X86
12506 select RTC_LIB
12507 select HAVE_DEBUG_STACKOVERFLOW
12508 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12509- select HAVE_CC_STACKPROTECTOR
12510+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12511 select GENERIC_CPU_AUTOPROBE
12512 select HAVE_ARCH_AUDITSYSCALL
12513 select ARCH_SUPPORTS_ATOMIC_RMW
12514@@ -266,7 +266,7 @@ config X86_HT
12515
12516 config X86_32_LAZY_GS
12517 def_bool y
12518- depends on X86_32 && !CC_STACKPROTECTOR
12519+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12520
12521 config ARCH_HWEIGHT_CFLAGS
12522 string
12523@@ -632,6 +632,7 @@ config SCHED_OMIT_FRAME_POINTER
12524
12525 menuconfig HYPERVISOR_GUEST
12526 bool "Linux guest support"
12527+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12528 ---help---
12529 Say Y here to enable options for running Linux under various hyper-
12530 visors. This option enables basic hypervisor detection and platform
12531@@ -1013,6 +1014,7 @@ config VM86
12532
12533 config X86_16BIT
12534 bool "Enable support for 16-bit segments" if EXPERT
12535+ depends on !GRKERNSEC
12536 default y
12537 ---help---
12538 This option is required by programs like Wine to run 16-bit
12539@@ -1186,6 +1188,7 @@ choice
12540
12541 config NOHIGHMEM
12542 bool "off"
12543+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12544 ---help---
12545 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12546 However, the address space of 32-bit x86 processors is only 4
12547@@ -1222,6 +1225,7 @@ config NOHIGHMEM
12548
12549 config HIGHMEM4G
12550 bool "4GB"
12551+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12552 ---help---
12553 Select this if you have a 32-bit processor and between 1 and 4
12554 gigabytes of physical RAM.
12555@@ -1274,7 +1278,7 @@ config PAGE_OFFSET
12556 hex
12557 default 0xB0000000 if VMSPLIT_3G_OPT
12558 default 0x80000000 if VMSPLIT_2G
12559- default 0x78000000 if VMSPLIT_2G_OPT
12560+ default 0x70000000 if VMSPLIT_2G_OPT
12561 default 0x40000000 if VMSPLIT_1G
12562 default 0xC0000000
12563 depends on X86_32
12564@@ -1715,6 +1719,7 @@ source kernel/Kconfig.hz
12565
12566 config KEXEC
12567 bool "kexec system call"
12568+ depends on !GRKERNSEC_KMEM
12569 ---help---
12570 kexec is a system call that implements the ability to shutdown your
12571 current kernel, and to start another kernel. It is like a reboot
12572@@ -1900,7 +1905,9 @@ config X86_NEED_RELOCS
12573
12574 config PHYSICAL_ALIGN
12575 hex "Alignment value to which kernel should be aligned"
12576- default "0x200000"
12577+ default "0x1000000"
12578+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12579+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12580 range 0x2000 0x1000000 if X86_32
12581 range 0x200000 0x1000000 if X86_64
12582 ---help---
12583@@ -1983,6 +1990,7 @@ config COMPAT_VDSO
12584 def_bool n
12585 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12586 depends on X86_32 || IA32_EMULATION
12587+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12588 ---help---
12589 Certain buggy versions of glibc will crash if they are
12590 presented with a 32-bit vDSO that is not mapped at the address
12591diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12592index 6983314..54ad7e8 100644
12593--- a/arch/x86/Kconfig.cpu
12594+++ b/arch/x86/Kconfig.cpu
12595@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12596
12597 config X86_F00F_BUG
12598 def_bool y
12599- depends on M586MMX || M586TSC || M586 || M486
12600+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12601
12602 config X86_INVD_BUG
12603 def_bool y
12604@@ -327,7 +327,7 @@ config X86_INVD_BUG
12605
12606 config X86_ALIGNMENT_16
12607 def_bool y
12608- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12609+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12610
12611 config X86_INTEL_USERCOPY
12612 def_bool y
12613@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12614 # generates cmov.
12615 config X86_CMOV
12616 def_bool y
12617- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12618+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12619
12620 config X86_MINIMUM_CPU_FAMILY
12621 int
12622diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12623index 20028da..88d5946 100644
12624--- a/arch/x86/Kconfig.debug
12625+++ b/arch/x86/Kconfig.debug
12626@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12627 config DEBUG_RODATA
12628 bool "Write protect kernel read-only data structures"
12629 default y
12630- depends on DEBUG_KERNEL
12631+ depends on DEBUG_KERNEL && BROKEN
12632 ---help---
12633 Mark the kernel read-only data as write-protected in the pagetables,
12634 in order to catch accidental (and incorrect) writes to such const
12635@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12636
12637 config DEBUG_SET_MODULE_RONX
12638 bool "Set loadable kernel module data as NX and text as RO"
12639- depends on MODULES
12640+ depends on MODULES && BROKEN
12641 ---help---
12642 This option helps catch unintended modifications to loadable
12643 kernel module's text and read-only data. It also prevents execution
12644diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12645index 5ba2d9c..41e5bb6 100644
12646--- a/arch/x86/Makefile
12647+++ b/arch/x86/Makefile
12648@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12649 # CPU-specific tuning. Anything which can be shared with UML should go here.
12650 include $(srctree)/arch/x86/Makefile_32.cpu
12651 KBUILD_CFLAGS += $(cflags-y)
12652-
12653- # temporary until string.h is fixed
12654- KBUILD_CFLAGS += -ffreestanding
12655 else
12656 BITS := 64
12657 UTS_MACHINE := x86_64
12658@@ -107,6 +104,9 @@ else
12659 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12660 endif
12661
12662+# temporary until string.h is fixed
12663+KBUILD_CFLAGS += -ffreestanding
12664+
12665 # Make sure compiler does not have buggy stack-protector support.
12666 ifdef CONFIG_CC_STACKPROTECTOR
12667 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12668@@ -181,6 +181,7 @@ archheaders:
12669 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12670
12671 archprepare:
12672+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12673 ifeq ($(CONFIG_KEXEC_FILE),y)
12674 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12675 endif
12676@@ -264,3 +265,9 @@ define archhelp
12677 echo ' FDARGS="..." arguments for the booted kernel'
12678 echo ' FDINITRD=file initrd for the booted kernel'
12679 endef
12680+
12681+define OLD_LD
12682+
12683+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12684+*** Please upgrade your binutils to 2.18 or newer
12685+endef
12686diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12687index 57bbf2f..b100fce 100644
12688--- a/arch/x86/boot/Makefile
12689+++ b/arch/x86/boot/Makefile
12690@@ -58,6 +58,9 @@ clean-files += cpustr.h
12691 # ---------------------------------------------------------------------------
12692
12693 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12694+ifdef CONSTIFY_PLUGIN
12695+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12696+endif
12697 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12698 GCOV_PROFILE := n
12699
12700diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12701index 878e4b9..20537ab 100644
12702--- a/arch/x86/boot/bitops.h
12703+++ b/arch/x86/boot/bitops.h
12704@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12705 u8 v;
12706 const u32 *p = (const u32 *)addr;
12707
12708- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12709+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12710 return v;
12711 }
12712
12713@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12714
12715 static inline void set_bit(int nr, void *addr)
12716 {
12717- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12718+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12719 }
12720
12721 #endif /* BOOT_BITOPS_H */
12722diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12723index bd49ec6..94c7f58 100644
12724--- a/arch/x86/boot/boot.h
12725+++ b/arch/x86/boot/boot.h
12726@@ -84,7 +84,7 @@ static inline void io_delay(void)
12727 static inline u16 ds(void)
12728 {
12729 u16 seg;
12730- asm("movw %%ds,%0" : "=rm" (seg));
12731+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12732 return seg;
12733 }
12734
12735diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12736index 0a291cd..9686efc 100644
12737--- a/arch/x86/boot/compressed/Makefile
12738+++ b/arch/x86/boot/compressed/Makefile
12739@@ -30,6 +30,9 @@ KBUILD_CFLAGS += $(cflags-y)
12740 KBUILD_CFLAGS += -mno-mmx -mno-sse
12741 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12742 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12743+ifdef CONSTIFY_PLUGIN
12744+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12745+endif
12746
12747 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12748 GCOV_PROFILE := n
12749diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12750index a53440e..c3dbf1e 100644
12751--- a/arch/x86/boot/compressed/efi_stub_32.S
12752+++ b/arch/x86/boot/compressed/efi_stub_32.S
12753@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12754 * parameter 2, ..., param n. To make things easy, we save the return
12755 * address of efi_call_phys in a global variable.
12756 */
12757- popl %ecx
12758- movl %ecx, saved_return_addr(%edx)
12759- /* get the function pointer into ECX*/
12760- popl %ecx
12761- movl %ecx, efi_rt_function_ptr(%edx)
12762+ popl saved_return_addr(%edx)
12763+ popl efi_rt_function_ptr(%edx)
12764
12765 /*
12766 * 3. Call the physical function.
12767 */
12768- call *%ecx
12769+ call *efi_rt_function_ptr(%edx)
12770
12771 /*
12772 * 4. Balance the stack. And because EAX contain the return value,
12773@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12774 1: popl %edx
12775 subl $1b, %edx
12776
12777- movl efi_rt_function_ptr(%edx), %ecx
12778- pushl %ecx
12779+ pushl efi_rt_function_ptr(%edx)
12780
12781 /*
12782 * 10. Push the saved return address onto the stack and return.
12783 */
12784- movl saved_return_addr(%edx), %ecx
12785- pushl %ecx
12786- ret
12787+ jmpl *saved_return_addr(%edx)
12788 ENDPROC(efi_call_phys)
12789 .previous
12790
12791diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12792index 630384a..278e788 100644
12793--- a/arch/x86/boot/compressed/efi_thunk_64.S
12794+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12795@@ -189,8 +189,8 @@ efi_gdt64:
12796 .long 0 /* Filled out by user */
12797 .word 0
12798 .quad 0x0000000000000000 /* NULL descriptor */
12799- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12800- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12801+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12802+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12803 .quad 0x0080890000000000 /* TS descriptor */
12804 .quad 0x0000000000000000 /* TS continued */
12805 efi_gdt64_end:
12806diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12807index 1d7fbbc..36ecd58 100644
12808--- a/arch/x86/boot/compressed/head_32.S
12809+++ b/arch/x86/boot/compressed/head_32.S
12810@@ -140,10 +140,10 @@ preferred_addr:
12811 addl %eax, %ebx
12812 notl %eax
12813 andl %eax, %ebx
12814- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12815+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12816 jge 1f
12817 #endif
12818- movl $LOAD_PHYSICAL_ADDR, %ebx
12819+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12820 1:
12821
12822 /* Target address to relocate to for decompression */
12823diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12824index 6b1766c..ad465c9 100644
12825--- a/arch/x86/boot/compressed/head_64.S
12826+++ b/arch/x86/boot/compressed/head_64.S
12827@@ -94,10 +94,10 @@ ENTRY(startup_32)
12828 addl %eax, %ebx
12829 notl %eax
12830 andl %eax, %ebx
12831- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12832+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12833 jge 1f
12834 #endif
12835- movl $LOAD_PHYSICAL_ADDR, %ebx
12836+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12837 1:
12838
12839 /* Target address to relocate to for decompression */
12840@@ -322,10 +322,10 @@ preferred_addr:
12841 addq %rax, %rbp
12842 notq %rax
12843 andq %rax, %rbp
12844- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12845+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12846 jge 1f
12847 #endif
12848- movq $LOAD_PHYSICAL_ADDR, %rbp
12849+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12850 1:
12851
12852 /* Target address to relocate to for decompression */
12853@@ -434,8 +434,8 @@ gdt:
12854 .long gdt
12855 .word 0
12856 .quad 0x0000000000000000 /* NULL descriptor */
12857- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12858- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12859+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12860+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12861 .quad 0x0080890000000000 /* TS descriptor */
12862 .quad 0x0000000000000000 /* TS continued */
12863 gdt_end:
12864diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12865index a950864..c710239 100644
12866--- a/arch/x86/boot/compressed/misc.c
12867+++ b/arch/x86/boot/compressed/misc.c
12868@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12869 * Calculate the delta between where vmlinux was linked to load
12870 * and where it was actually loaded.
12871 */
12872- delta = min_addr - LOAD_PHYSICAL_ADDR;
12873+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12874 if (!delta) {
12875 debug_putstr("No relocation needed... ");
12876 return;
12877@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12878 Elf32_Ehdr ehdr;
12879 Elf32_Phdr *phdrs, *phdr;
12880 #endif
12881- void *dest;
12882+ void *dest, *prev;
12883 int i;
12884
12885 memcpy(&ehdr, output, sizeof(ehdr));
12886@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12887 case PT_LOAD:
12888 #ifdef CONFIG_RELOCATABLE
12889 dest = output;
12890- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12891+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12892 #else
12893 dest = (void *)(phdr->p_paddr);
12894 #endif
12895 memcpy(dest,
12896 output + phdr->p_offset,
12897 phdr->p_filesz);
12898+ if (i)
12899+ memset(prev, 0xff, dest - prev);
12900+ prev = dest + phdr->p_filesz;
12901 break;
12902 default: /* Ignore other PT_* */ break;
12903 }
12904@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12905 error("Destination address too large");
12906 #endif
12907 #ifndef CONFIG_RELOCATABLE
12908- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12909+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12910 error("Wrong destination address");
12911 #endif
12912
12913diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12914index 1fd7d57..0f7d096 100644
12915--- a/arch/x86/boot/cpucheck.c
12916+++ b/arch/x86/boot/cpucheck.c
12917@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12918 u32 ecx = MSR_K7_HWCR;
12919 u32 eax, edx;
12920
12921- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12922+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12923 eax &= ~(1 << 15);
12924- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12925+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12926
12927 get_cpuflags(); /* Make sure it really did something */
12928 err = check_cpuflags();
12929@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12930 u32 ecx = MSR_VIA_FCR;
12931 u32 eax, edx;
12932
12933- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12934+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12935 eax |= (1<<1)|(1<<7);
12936- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12937+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12938
12939 set_bit(X86_FEATURE_CX8, cpu.flags);
12940 err = check_cpuflags();
12941@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12942 u32 eax, edx;
12943 u32 level = 1;
12944
12945- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12946- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12947- asm("cpuid"
12948+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12949+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12950+ asm volatile("cpuid"
12951 : "+a" (level), "=d" (cpu.flags[0])
12952 : : "ecx", "ebx");
12953- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12954+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12955
12956 err = check_cpuflags();
12957 } else if (err == 0x01 &&
12958diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12959index 16ef025..91e033b 100644
12960--- a/arch/x86/boot/header.S
12961+++ b/arch/x86/boot/header.S
12962@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12963 # single linked list of
12964 # struct setup_data
12965
12966-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12967+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12968
12969 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12970+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12971+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12972+#else
12973 #define VO_INIT_SIZE (VO__end - VO__text)
12974+#endif
12975 #if ZO_INIT_SIZE > VO_INIT_SIZE
12976 #define INIT_SIZE ZO_INIT_SIZE
12977 #else
12978diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12979index db75d07..8e6d0af 100644
12980--- a/arch/x86/boot/memory.c
12981+++ b/arch/x86/boot/memory.c
12982@@ -19,7 +19,7 @@
12983
12984 static int detect_memory_e820(void)
12985 {
12986- int count = 0;
12987+ unsigned int count = 0;
12988 struct biosregs ireg, oreg;
12989 struct e820entry *desc = boot_params.e820_map;
12990 static struct e820entry buf; /* static so it is zeroed */
12991diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
12992index ba3e100..6501b8f 100644
12993--- a/arch/x86/boot/video-vesa.c
12994+++ b/arch/x86/boot/video-vesa.c
12995@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
12996
12997 boot_params.screen_info.vesapm_seg = oreg.es;
12998 boot_params.screen_info.vesapm_off = oreg.di;
12999+ boot_params.screen_info.vesapm_size = oreg.cx;
13000 }
13001
13002 /*
13003diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13004index 43eda28..5ab5fdb 100644
13005--- a/arch/x86/boot/video.c
13006+++ b/arch/x86/boot/video.c
13007@@ -96,7 +96,7 @@ static void store_mode_params(void)
13008 static unsigned int get_entry(void)
13009 {
13010 char entry_buf[4];
13011- int i, len = 0;
13012+ unsigned int i, len = 0;
13013 int key;
13014 unsigned int v;
13015
13016diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13017index 9105655..41779c1 100644
13018--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13019+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13020@@ -8,6 +8,8 @@
13021 * including this sentence is retained in full.
13022 */
13023
13024+#include <asm/alternative-asm.h>
13025+
13026 .extern crypto_ft_tab
13027 .extern crypto_it_tab
13028 .extern crypto_fl_tab
13029@@ -70,6 +72,8 @@
13030 je B192; \
13031 leaq 32(r9),r9;
13032
13033+#define ret pax_force_retaddr; ret
13034+
13035 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13036 movq r1,r2; \
13037 movq r3,r4; \
13038diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13039index 6bd2c6c..368c93e 100644
13040--- a/arch/x86/crypto/aesni-intel_asm.S
13041+++ b/arch/x86/crypto/aesni-intel_asm.S
13042@@ -31,6 +31,7 @@
13043
13044 #include <linux/linkage.h>
13045 #include <asm/inst.h>
13046+#include <asm/alternative-asm.h>
13047
13048 /*
13049 * The following macros are used to move an (un)aligned 16 byte value to/from
13050@@ -217,7 +218,7 @@ enc: .octa 0x2
13051 * num_initial_blocks = b mod 4
13052 * encrypt the initial num_initial_blocks blocks and apply ghash on
13053 * the ciphertext
13054-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13055+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13056 * are clobbered
13057 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13058 */
13059@@ -227,8 +228,8 @@ enc: .octa 0x2
13060 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13061 MOVADQ SHUF_MASK(%rip), %xmm14
13062 mov arg7, %r10 # %r10 = AAD
13063- mov arg8, %r12 # %r12 = aadLen
13064- mov %r12, %r11
13065+ mov arg8, %r15 # %r15 = aadLen
13066+ mov %r15, %r11
13067 pxor %xmm\i, %xmm\i
13068
13069 _get_AAD_loop\num_initial_blocks\operation:
13070@@ -237,17 +238,17 @@ _get_AAD_loop\num_initial_blocks\operation:
13071 psrldq $4, %xmm\i
13072 pxor \TMP1, %xmm\i
13073 add $4, %r10
13074- sub $4, %r12
13075+ sub $4, %r15
13076 jne _get_AAD_loop\num_initial_blocks\operation
13077
13078 cmp $16, %r11
13079 je _get_AAD_loop2_done\num_initial_blocks\operation
13080
13081- mov $16, %r12
13082+ mov $16, %r15
13083 _get_AAD_loop2\num_initial_blocks\operation:
13084 psrldq $4, %xmm\i
13085- sub $4, %r12
13086- cmp %r11, %r12
13087+ sub $4, %r15
13088+ cmp %r11, %r15
13089 jne _get_AAD_loop2\num_initial_blocks\operation
13090
13091 _get_AAD_loop2_done\num_initial_blocks\operation:
13092@@ -442,7 +443,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13093 * num_initial_blocks = b mod 4
13094 * encrypt the initial num_initial_blocks blocks and apply ghash on
13095 * the ciphertext
13096-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13097+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13098 * are clobbered
13099 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13100 */
13101@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13102 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13103 MOVADQ SHUF_MASK(%rip), %xmm14
13104 mov arg7, %r10 # %r10 = AAD
13105- mov arg8, %r12 # %r12 = aadLen
13106- mov %r12, %r11
13107+ mov arg8, %r15 # %r15 = aadLen
13108+ mov %r15, %r11
13109 pxor %xmm\i, %xmm\i
13110 _get_AAD_loop\num_initial_blocks\operation:
13111 movd (%r10), \TMP1
13112@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13113 psrldq $4, %xmm\i
13114 pxor \TMP1, %xmm\i
13115 add $4, %r10
13116- sub $4, %r12
13117+ sub $4, %r15
13118 jne _get_AAD_loop\num_initial_blocks\operation
13119 cmp $16, %r11
13120 je _get_AAD_loop2_done\num_initial_blocks\operation
13121- mov $16, %r12
13122+ mov $16, %r15
13123 _get_AAD_loop2\num_initial_blocks\operation:
13124 psrldq $4, %xmm\i
13125- sub $4, %r12
13126- cmp %r11, %r12
13127+ sub $4, %r15
13128+ cmp %r11, %r15
13129 jne _get_AAD_loop2\num_initial_blocks\operation
13130 _get_AAD_loop2_done\num_initial_blocks\operation:
13131 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
13132@@ -1280,7 +1281,7 @@ _esb_loop_\@:
13133 *
13134 *****************************************************************************/
13135 ENTRY(aesni_gcm_dec)
13136- push %r12
13137+ push %r15
13138 push %r13
13139 push %r14
13140 mov %rsp, %r14
13141@@ -1290,8 +1291,8 @@ ENTRY(aesni_gcm_dec)
13142 */
13143 sub $VARIABLE_OFFSET, %rsp
13144 and $~63, %rsp # align rsp to 64 bytes
13145- mov %arg6, %r12
13146- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13147+ mov %arg6, %r15
13148+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13149 movdqa SHUF_MASK(%rip), %xmm2
13150 PSHUFB_XMM %xmm2, %xmm13
13151
13152@@ -1319,10 +1320,10 @@ ENTRY(aesni_gcm_dec)
13153 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13154 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13155 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13156- mov %r13, %r12
13157- and $(3<<4), %r12
13158+ mov %r13, %r15
13159+ and $(3<<4), %r15
13160 jz _initial_num_blocks_is_0_decrypt
13161- cmp $(2<<4), %r12
13162+ cmp $(2<<4), %r15
13163 jb _initial_num_blocks_is_1_decrypt
13164 je _initial_num_blocks_is_2_decrypt
13165 _initial_num_blocks_is_3_decrypt:
13166@@ -1372,16 +1373,16 @@ _zero_cipher_left_decrypt:
13167 sub $16, %r11
13168 add %r13, %r11
13169 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13170- lea SHIFT_MASK+16(%rip), %r12
13171- sub %r13, %r12
13172+ lea SHIFT_MASK+16(%rip), %r15
13173+ sub %r13, %r15
13174 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13175 # (%r13 is the number of bytes in plaintext mod 16)
13176- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13177+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13178 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13179
13180 movdqa %xmm1, %xmm2
13181 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13182- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13183+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13184 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13185 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13186 pand %xmm1, %xmm2
13187@@ -1410,9 +1411,9 @@ _less_than_8_bytes_left_decrypt:
13188 sub $1, %r13
13189 jne _less_than_8_bytes_left_decrypt
13190 _multiple_of_16_bytes_decrypt:
13191- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13192- shl $3, %r12 # convert into number of bits
13193- movd %r12d, %xmm15 # len(A) in %xmm15
13194+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13195+ shl $3, %r15 # convert into number of bits
13196+ movd %r15d, %xmm15 # len(A) in %xmm15
13197 shl $3, %arg4 # len(C) in bits (*128)
13198 MOVQ_R64_XMM %arg4, %xmm1
13199 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13200@@ -1451,7 +1452,8 @@ _return_T_done_decrypt:
13201 mov %r14, %rsp
13202 pop %r14
13203 pop %r13
13204- pop %r12
13205+ pop %r15
13206+ pax_force_retaddr
13207 ret
13208 ENDPROC(aesni_gcm_dec)
13209
13210@@ -1540,7 +1542,7 @@ ENDPROC(aesni_gcm_dec)
13211 * poly = x^128 + x^127 + x^126 + x^121 + 1
13212 ***************************************************************************/
13213 ENTRY(aesni_gcm_enc)
13214- push %r12
13215+ push %r15
13216 push %r13
13217 push %r14
13218 mov %rsp, %r14
13219@@ -1550,8 +1552,8 @@ ENTRY(aesni_gcm_enc)
13220 #
13221 sub $VARIABLE_OFFSET, %rsp
13222 and $~63, %rsp
13223- mov %arg6, %r12
13224- movdqu (%r12), %xmm13
13225+ mov %arg6, %r15
13226+ movdqu (%r15), %xmm13
13227 movdqa SHUF_MASK(%rip), %xmm2
13228 PSHUFB_XMM %xmm2, %xmm13
13229
13230@@ -1575,13 +1577,13 @@ ENTRY(aesni_gcm_enc)
13231 movdqa %xmm13, HashKey(%rsp)
13232 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13233 and $-16, %r13
13234- mov %r13, %r12
13235+ mov %r13, %r15
13236
13237 # Encrypt first few blocks
13238
13239- and $(3<<4), %r12
13240+ and $(3<<4), %r15
13241 jz _initial_num_blocks_is_0_encrypt
13242- cmp $(2<<4), %r12
13243+ cmp $(2<<4), %r15
13244 jb _initial_num_blocks_is_1_encrypt
13245 je _initial_num_blocks_is_2_encrypt
13246 _initial_num_blocks_is_3_encrypt:
13247@@ -1634,14 +1636,14 @@ _zero_cipher_left_encrypt:
13248 sub $16, %r11
13249 add %r13, %r11
13250 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13251- lea SHIFT_MASK+16(%rip), %r12
13252- sub %r13, %r12
13253+ lea SHIFT_MASK+16(%rip), %r15
13254+ sub %r13, %r15
13255 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13256 # (%r13 is the number of bytes in plaintext mod 16)
13257- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13258+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13259 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13260 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13261- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13262+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13263 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13264 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13265 movdqa SHUF_MASK(%rip), %xmm10
13266@@ -1674,9 +1676,9 @@ _less_than_8_bytes_left_encrypt:
13267 sub $1, %r13
13268 jne _less_than_8_bytes_left_encrypt
13269 _multiple_of_16_bytes_encrypt:
13270- mov arg8, %r12 # %r12 = addLen (number of bytes)
13271- shl $3, %r12
13272- movd %r12d, %xmm15 # len(A) in %xmm15
13273+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13274+ shl $3, %r15
13275+ movd %r15d, %xmm15 # len(A) in %xmm15
13276 shl $3, %arg4 # len(C) in bits (*128)
13277 MOVQ_R64_XMM %arg4, %xmm1
13278 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13279@@ -1715,7 +1717,8 @@ _return_T_done_encrypt:
13280 mov %r14, %rsp
13281 pop %r14
13282 pop %r13
13283- pop %r12
13284+ pop %r15
13285+ pax_force_retaddr
13286 ret
13287 ENDPROC(aesni_gcm_enc)
13288
13289@@ -1733,6 +1736,7 @@ _key_expansion_256a:
13290 pxor %xmm1, %xmm0
13291 movaps %xmm0, (TKEYP)
13292 add $0x10, TKEYP
13293+ pax_force_retaddr
13294 ret
13295 ENDPROC(_key_expansion_128)
13296 ENDPROC(_key_expansion_256a)
13297@@ -1759,6 +1763,7 @@ _key_expansion_192a:
13298 shufps $0b01001110, %xmm2, %xmm1
13299 movaps %xmm1, 0x10(TKEYP)
13300 add $0x20, TKEYP
13301+ pax_force_retaddr
13302 ret
13303 ENDPROC(_key_expansion_192a)
13304
13305@@ -1779,6 +1784,7 @@ _key_expansion_192b:
13306
13307 movaps %xmm0, (TKEYP)
13308 add $0x10, TKEYP
13309+ pax_force_retaddr
13310 ret
13311 ENDPROC(_key_expansion_192b)
13312
13313@@ -1792,6 +1798,7 @@ _key_expansion_256b:
13314 pxor %xmm1, %xmm2
13315 movaps %xmm2, (TKEYP)
13316 add $0x10, TKEYP
13317+ pax_force_retaddr
13318 ret
13319 ENDPROC(_key_expansion_256b)
13320
13321@@ -1905,6 +1912,7 @@ ENTRY(aesni_set_key)
13322 #ifndef __x86_64__
13323 popl KEYP
13324 #endif
13325+ pax_force_retaddr
13326 ret
13327 ENDPROC(aesni_set_key)
13328
13329@@ -1927,6 +1935,7 @@ ENTRY(aesni_enc)
13330 popl KLEN
13331 popl KEYP
13332 #endif
13333+ pax_force_retaddr
13334 ret
13335 ENDPROC(aesni_enc)
13336
13337@@ -1985,6 +1994,7 @@ _aesni_enc1:
13338 AESENC KEY STATE
13339 movaps 0x70(TKEYP), KEY
13340 AESENCLAST KEY STATE
13341+ pax_force_retaddr
13342 ret
13343 ENDPROC(_aesni_enc1)
13344
13345@@ -2094,6 +2104,7 @@ _aesni_enc4:
13346 AESENCLAST KEY STATE2
13347 AESENCLAST KEY STATE3
13348 AESENCLAST KEY STATE4
13349+ pax_force_retaddr
13350 ret
13351 ENDPROC(_aesni_enc4)
13352
13353@@ -2117,6 +2128,7 @@ ENTRY(aesni_dec)
13354 popl KLEN
13355 popl KEYP
13356 #endif
13357+ pax_force_retaddr
13358 ret
13359 ENDPROC(aesni_dec)
13360
13361@@ -2175,6 +2187,7 @@ _aesni_dec1:
13362 AESDEC KEY STATE
13363 movaps 0x70(TKEYP), KEY
13364 AESDECLAST KEY STATE
13365+ pax_force_retaddr
13366 ret
13367 ENDPROC(_aesni_dec1)
13368
13369@@ -2284,6 +2297,7 @@ _aesni_dec4:
13370 AESDECLAST KEY STATE2
13371 AESDECLAST KEY STATE3
13372 AESDECLAST KEY STATE4
13373+ pax_force_retaddr
13374 ret
13375 ENDPROC(_aesni_dec4)
13376
13377@@ -2342,6 +2356,7 @@ ENTRY(aesni_ecb_enc)
13378 popl KEYP
13379 popl LEN
13380 #endif
13381+ pax_force_retaddr
13382 ret
13383 ENDPROC(aesni_ecb_enc)
13384
13385@@ -2401,6 +2416,7 @@ ENTRY(aesni_ecb_dec)
13386 popl KEYP
13387 popl LEN
13388 #endif
13389+ pax_force_retaddr
13390 ret
13391 ENDPROC(aesni_ecb_dec)
13392
13393@@ -2443,6 +2459,7 @@ ENTRY(aesni_cbc_enc)
13394 popl LEN
13395 popl IVP
13396 #endif
13397+ pax_force_retaddr
13398 ret
13399 ENDPROC(aesni_cbc_enc)
13400
13401@@ -2534,6 +2551,7 @@ ENTRY(aesni_cbc_dec)
13402 popl LEN
13403 popl IVP
13404 #endif
13405+ pax_force_retaddr
13406 ret
13407 ENDPROC(aesni_cbc_dec)
13408
13409@@ -2561,6 +2579,7 @@ _aesni_inc_init:
13410 mov $1, TCTR_LOW
13411 MOVQ_R64_XMM TCTR_LOW INC
13412 MOVQ_R64_XMM CTR TCTR_LOW
13413+ pax_force_retaddr
13414 ret
13415 ENDPROC(_aesni_inc_init)
13416
13417@@ -2590,6 +2609,7 @@ _aesni_inc:
13418 .Linc_low:
13419 movaps CTR, IV
13420 PSHUFB_XMM BSWAP_MASK IV
13421+ pax_force_retaddr
13422 ret
13423 ENDPROC(_aesni_inc)
13424
13425@@ -2651,6 +2671,7 @@ ENTRY(aesni_ctr_enc)
13426 .Lctr_enc_ret:
13427 movups IV, (IVP)
13428 .Lctr_enc_just_ret:
13429+ pax_force_retaddr
13430 ret
13431 ENDPROC(aesni_ctr_enc)
13432
13433@@ -2777,6 +2798,7 @@ ENTRY(aesni_xts_crypt8)
13434 pxor INC, STATE4
13435 movdqu STATE4, 0x70(OUTP)
13436
13437+ pax_force_retaddr
13438 ret
13439 ENDPROC(aesni_xts_crypt8)
13440
13441diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13442index 246c670..466e2d6 100644
13443--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13444+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13445@@ -21,6 +21,7 @@
13446 */
13447
13448 #include <linux/linkage.h>
13449+#include <asm/alternative-asm.h>
13450
13451 .file "blowfish-x86_64-asm.S"
13452 .text
13453@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13454 jnz .L__enc_xor;
13455
13456 write_block();
13457+ pax_force_retaddr
13458 ret;
13459 .L__enc_xor:
13460 xor_block();
13461+ pax_force_retaddr
13462 ret;
13463 ENDPROC(__blowfish_enc_blk)
13464
13465@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13466
13467 movq %r11, %rbp;
13468
13469+ pax_force_retaddr
13470 ret;
13471 ENDPROC(blowfish_dec_blk)
13472
13473@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13474
13475 popq %rbx;
13476 popq %rbp;
13477+ pax_force_retaddr
13478 ret;
13479
13480 .L__enc_xor4:
13481@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13482
13483 popq %rbx;
13484 popq %rbp;
13485+ pax_force_retaddr
13486 ret;
13487 ENDPROC(__blowfish_enc_blk_4way)
13488
13489@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13490 popq %rbx;
13491 popq %rbp;
13492
13493+ pax_force_retaddr
13494 ret;
13495 ENDPROC(blowfish_dec_blk_4way)
13496diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13497index ce71f92..1dce7ec 100644
13498--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13499+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13500@@ -16,6 +16,7 @@
13501 */
13502
13503 #include <linux/linkage.h>
13504+#include <asm/alternative-asm.h>
13505
13506 #define CAMELLIA_TABLE_BYTE_LEN 272
13507
13508@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13509 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13510 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13511 %rcx, (%r9));
13512+ pax_force_retaddr
13513 ret;
13514 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13515
13516@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13517 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13518 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13519 %rax, (%r9));
13520+ pax_force_retaddr
13521 ret;
13522 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13523
13524@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13525 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13526 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13527
13528+ pax_force_retaddr
13529 ret;
13530
13531 .align 8
13532@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13533 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13534 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13535
13536+ pax_force_retaddr
13537 ret;
13538
13539 .align 8
13540@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13541 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13542 %xmm8, %rsi);
13543
13544+ pax_force_retaddr
13545 ret;
13546 ENDPROC(camellia_ecb_enc_16way)
13547
13548@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13549 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13550 %xmm8, %rsi);
13551
13552+ pax_force_retaddr
13553 ret;
13554 ENDPROC(camellia_ecb_dec_16way)
13555
13556@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13557 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13558 %xmm8, %rsi);
13559
13560+ pax_force_retaddr
13561 ret;
13562 ENDPROC(camellia_cbc_dec_16way)
13563
13564@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13565 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13566 %xmm8, %rsi);
13567
13568+ pax_force_retaddr
13569 ret;
13570 ENDPROC(camellia_ctr_16way)
13571
13572@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13573 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13574 %xmm8, %rsi);
13575
13576+ pax_force_retaddr
13577 ret;
13578 ENDPROC(camellia_xts_crypt_16way)
13579
13580diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13581index 0e0b886..5a3123c 100644
13582--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13583+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13584@@ -11,6 +11,7 @@
13585 */
13586
13587 #include <linux/linkage.h>
13588+#include <asm/alternative-asm.h>
13589
13590 #define CAMELLIA_TABLE_BYTE_LEN 272
13591
13592@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13593 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13594 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13595 %rcx, (%r9));
13596+ pax_force_retaddr
13597 ret;
13598 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13599
13600@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13601 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13602 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13603 %rax, (%r9));
13604+ pax_force_retaddr
13605 ret;
13606 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13607
13608@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13609 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13610 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13611
13612+ pax_force_retaddr
13613 ret;
13614
13615 .align 8
13616@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13617 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13618 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13619
13620+ pax_force_retaddr
13621 ret;
13622
13623 .align 8
13624@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13625
13626 vzeroupper;
13627
13628+ pax_force_retaddr
13629 ret;
13630 ENDPROC(camellia_ecb_enc_32way)
13631
13632@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13633
13634 vzeroupper;
13635
13636+ pax_force_retaddr
13637 ret;
13638 ENDPROC(camellia_ecb_dec_32way)
13639
13640@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13641
13642 vzeroupper;
13643
13644+ pax_force_retaddr
13645 ret;
13646 ENDPROC(camellia_cbc_dec_32way)
13647
13648@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13649
13650 vzeroupper;
13651
13652+ pax_force_retaddr
13653 ret;
13654 ENDPROC(camellia_ctr_32way)
13655
13656@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13657
13658 vzeroupper;
13659
13660+ pax_force_retaddr
13661 ret;
13662 ENDPROC(camellia_xts_crypt_32way)
13663
13664diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13665index 310319c..db3d7b5 100644
13666--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13667+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13668@@ -21,6 +21,7 @@
13669 */
13670
13671 #include <linux/linkage.h>
13672+#include <asm/alternative-asm.h>
13673
13674 .file "camellia-x86_64-asm_64.S"
13675 .text
13676@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13677 enc_outunpack(mov, RT1);
13678
13679 movq RRBP, %rbp;
13680+ pax_force_retaddr
13681 ret;
13682
13683 .L__enc_xor:
13684 enc_outunpack(xor, RT1);
13685
13686 movq RRBP, %rbp;
13687+ pax_force_retaddr
13688 ret;
13689 ENDPROC(__camellia_enc_blk)
13690
13691@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13692 dec_outunpack();
13693
13694 movq RRBP, %rbp;
13695+ pax_force_retaddr
13696 ret;
13697 ENDPROC(camellia_dec_blk)
13698
13699@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13700
13701 movq RRBP, %rbp;
13702 popq %rbx;
13703+ pax_force_retaddr
13704 ret;
13705
13706 .L__enc2_xor:
13707@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13708
13709 movq RRBP, %rbp;
13710 popq %rbx;
13711+ pax_force_retaddr
13712 ret;
13713 ENDPROC(__camellia_enc_blk_2way)
13714
13715@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13716
13717 movq RRBP, %rbp;
13718 movq RXOR, %rbx;
13719+ pax_force_retaddr
13720 ret;
13721 ENDPROC(camellia_dec_blk_2way)
13722diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13723index c35fd5d..2d8c7db 100644
13724--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13725+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13726@@ -24,6 +24,7 @@
13727 */
13728
13729 #include <linux/linkage.h>
13730+#include <asm/alternative-asm.h>
13731
13732 .file "cast5-avx-x86_64-asm_64.S"
13733
13734@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13735 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13736 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13737
13738+ pax_force_retaddr
13739 ret;
13740 ENDPROC(__cast5_enc_blk16)
13741
13742@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13743 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13744 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13745
13746+ pax_force_retaddr
13747 ret;
13748
13749 .L__skip_dec:
13750@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13751 vmovdqu RR4, (6*4*4)(%r11);
13752 vmovdqu RL4, (7*4*4)(%r11);
13753
13754+ pax_force_retaddr
13755 ret;
13756 ENDPROC(cast5_ecb_enc_16way)
13757
13758@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13759 vmovdqu RR4, (6*4*4)(%r11);
13760 vmovdqu RL4, (7*4*4)(%r11);
13761
13762+ pax_force_retaddr
13763 ret;
13764 ENDPROC(cast5_ecb_dec_16way)
13765
13766@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13767 * %rdx: src
13768 */
13769
13770- pushq %r12;
13771+ pushq %r14;
13772
13773 movq %rsi, %r11;
13774- movq %rdx, %r12;
13775+ movq %rdx, %r14;
13776
13777 vmovdqu (0*16)(%rdx), RL1;
13778 vmovdqu (1*16)(%rdx), RR1;
13779@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13780 call __cast5_dec_blk16;
13781
13782 /* xor with src */
13783- vmovq (%r12), RX;
13784+ vmovq (%r14), RX;
13785 vpshufd $0x4f, RX, RX;
13786 vpxor RX, RR1, RR1;
13787- vpxor 0*16+8(%r12), RL1, RL1;
13788- vpxor 1*16+8(%r12), RR2, RR2;
13789- vpxor 2*16+8(%r12), RL2, RL2;
13790- vpxor 3*16+8(%r12), RR3, RR3;
13791- vpxor 4*16+8(%r12), RL3, RL3;
13792- vpxor 5*16+8(%r12), RR4, RR4;
13793- vpxor 6*16+8(%r12), RL4, RL4;
13794+ vpxor 0*16+8(%r14), RL1, RL1;
13795+ vpxor 1*16+8(%r14), RR2, RR2;
13796+ vpxor 2*16+8(%r14), RL2, RL2;
13797+ vpxor 3*16+8(%r14), RR3, RR3;
13798+ vpxor 4*16+8(%r14), RL3, RL3;
13799+ vpxor 5*16+8(%r14), RR4, RR4;
13800+ vpxor 6*16+8(%r14), RL4, RL4;
13801
13802 vmovdqu RR1, (0*16)(%r11);
13803 vmovdqu RL1, (1*16)(%r11);
13804@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13805 vmovdqu RR4, (6*16)(%r11);
13806 vmovdqu RL4, (7*16)(%r11);
13807
13808- popq %r12;
13809+ popq %r14;
13810
13811+ pax_force_retaddr
13812 ret;
13813 ENDPROC(cast5_cbc_dec_16way)
13814
13815@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13816 * %rcx: iv (big endian, 64bit)
13817 */
13818
13819- pushq %r12;
13820+ pushq %r14;
13821
13822 movq %rsi, %r11;
13823- movq %rdx, %r12;
13824+ movq %rdx, %r14;
13825
13826 vpcmpeqd RTMP, RTMP, RTMP;
13827 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13828@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13829 call __cast5_enc_blk16;
13830
13831 /* dst = src ^ iv */
13832- vpxor (0*16)(%r12), RR1, RR1;
13833- vpxor (1*16)(%r12), RL1, RL1;
13834- vpxor (2*16)(%r12), RR2, RR2;
13835- vpxor (3*16)(%r12), RL2, RL2;
13836- vpxor (4*16)(%r12), RR3, RR3;
13837- vpxor (5*16)(%r12), RL3, RL3;
13838- vpxor (6*16)(%r12), RR4, RR4;
13839- vpxor (7*16)(%r12), RL4, RL4;
13840+ vpxor (0*16)(%r14), RR1, RR1;
13841+ vpxor (1*16)(%r14), RL1, RL1;
13842+ vpxor (2*16)(%r14), RR2, RR2;
13843+ vpxor (3*16)(%r14), RL2, RL2;
13844+ vpxor (4*16)(%r14), RR3, RR3;
13845+ vpxor (5*16)(%r14), RL3, RL3;
13846+ vpxor (6*16)(%r14), RR4, RR4;
13847+ vpxor (7*16)(%r14), RL4, RL4;
13848 vmovdqu RR1, (0*16)(%r11);
13849 vmovdqu RL1, (1*16)(%r11);
13850 vmovdqu RR2, (2*16)(%r11);
13851@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13852 vmovdqu RR4, (6*16)(%r11);
13853 vmovdqu RL4, (7*16)(%r11);
13854
13855- popq %r12;
13856+ popq %r14;
13857
13858+ pax_force_retaddr
13859 ret;
13860 ENDPROC(cast5_ctr_16way)
13861diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13862index e3531f8..e123f35 100644
13863--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13864+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13865@@ -24,6 +24,7 @@
13866 */
13867
13868 #include <linux/linkage.h>
13869+#include <asm/alternative-asm.h>
13870 #include "glue_helper-asm-avx.S"
13871
13872 .file "cast6-avx-x86_64-asm_64.S"
13873@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13874 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13875 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13876
13877+ pax_force_retaddr
13878 ret;
13879 ENDPROC(__cast6_enc_blk8)
13880
13881@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13882 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13883 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13884
13885+ pax_force_retaddr
13886 ret;
13887 ENDPROC(__cast6_dec_blk8)
13888
13889@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13890
13891 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13892
13893+ pax_force_retaddr
13894 ret;
13895 ENDPROC(cast6_ecb_enc_8way)
13896
13897@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13898
13899 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13900
13901+ pax_force_retaddr
13902 ret;
13903 ENDPROC(cast6_ecb_dec_8way)
13904
13905@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13906 * %rdx: src
13907 */
13908
13909- pushq %r12;
13910+ pushq %r14;
13911
13912 movq %rsi, %r11;
13913- movq %rdx, %r12;
13914+ movq %rdx, %r14;
13915
13916 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13917
13918 call __cast6_dec_blk8;
13919
13920- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13921+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13922
13923- popq %r12;
13924+ popq %r14;
13925
13926+ pax_force_retaddr
13927 ret;
13928 ENDPROC(cast6_cbc_dec_8way)
13929
13930@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13931 * %rcx: iv (little endian, 128bit)
13932 */
13933
13934- pushq %r12;
13935+ pushq %r14;
13936
13937 movq %rsi, %r11;
13938- movq %rdx, %r12;
13939+ movq %rdx, %r14;
13940
13941 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13942 RD2, RX, RKR, RKM);
13943
13944 call __cast6_enc_blk8;
13945
13946- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13947+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13948
13949- popq %r12;
13950+ popq %r14;
13951
13952+ pax_force_retaddr
13953 ret;
13954 ENDPROC(cast6_ctr_8way)
13955
13956@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13957 /* dst <= regs xor IVs(in dst) */
13958 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13959
13960+ pax_force_retaddr
13961 ret;
13962 ENDPROC(cast6_xts_enc_8way)
13963
13964@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13965 /* dst <= regs xor IVs(in dst) */
13966 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13967
13968+ pax_force_retaddr
13969 ret;
13970 ENDPROC(cast6_xts_dec_8way)
13971diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13972index 26d49eb..c0a8c84 100644
13973--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13974+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13975@@ -45,6 +45,7 @@
13976
13977 #include <asm/inst.h>
13978 #include <linux/linkage.h>
13979+#include <asm/alternative-asm.h>
13980
13981 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13982
13983@@ -309,6 +310,7 @@ do_return:
13984 popq %rsi
13985 popq %rdi
13986 popq %rbx
13987+ pax_force_retaddr
13988 ret
13989
13990 ################################################################
13991diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13992index 5d1e007..098cb4f 100644
13993--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
13994+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13995@@ -18,6 +18,7 @@
13996
13997 #include <linux/linkage.h>
13998 #include <asm/inst.h>
13999+#include <asm/alternative-asm.h>
14000
14001 .data
14002
14003@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14004 psrlq $1, T2
14005 pxor T2, T1
14006 pxor T1, DATA
14007+ pax_force_retaddr
14008 ret
14009 ENDPROC(__clmul_gf128mul_ble)
14010
14011@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14012 call __clmul_gf128mul_ble
14013 PSHUFB_XMM BSWAP DATA
14014 movups DATA, (%rdi)
14015+ pax_force_retaddr
14016 ret
14017 ENDPROC(clmul_ghash_mul)
14018
14019@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14020 PSHUFB_XMM BSWAP DATA
14021 movups DATA, (%rdi)
14022 .Lupdate_just_ret:
14023+ pax_force_retaddr
14024 ret
14025 ENDPROC(clmul_ghash_update)
14026diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14027index 9279e0b..c4b3d2c 100644
14028--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14029+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14030@@ -1,4 +1,5 @@
14031 #include <linux/linkage.h>
14032+#include <asm/alternative-asm.h>
14033
14034 # enter salsa20_encrypt_bytes
14035 ENTRY(salsa20_encrypt_bytes)
14036@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14037 add %r11,%rsp
14038 mov %rdi,%rax
14039 mov %rsi,%rdx
14040+ pax_force_retaddr
14041 ret
14042 # bytesatleast65:
14043 ._bytesatleast65:
14044@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14045 add %r11,%rsp
14046 mov %rdi,%rax
14047 mov %rsi,%rdx
14048+ pax_force_retaddr
14049 ret
14050 ENDPROC(salsa20_keysetup)
14051
14052@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14053 add %r11,%rsp
14054 mov %rdi,%rax
14055 mov %rsi,%rdx
14056+ pax_force_retaddr
14057 ret
14058 ENDPROC(salsa20_ivsetup)
14059diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14060index 2f202f4..d9164d6 100644
14061--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14062+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14063@@ -24,6 +24,7 @@
14064 */
14065
14066 #include <linux/linkage.h>
14067+#include <asm/alternative-asm.h>
14068 #include "glue_helper-asm-avx.S"
14069
14070 .file "serpent-avx-x86_64-asm_64.S"
14071@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14072 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14073 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14074
14075+ pax_force_retaddr
14076 ret;
14077 ENDPROC(__serpent_enc_blk8_avx)
14078
14079@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14080 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14081 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14082
14083+ pax_force_retaddr
14084 ret;
14085 ENDPROC(__serpent_dec_blk8_avx)
14086
14087@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14088
14089 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14090
14091+ pax_force_retaddr
14092 ret;
14093 ENDPROC(serpent_ecb_enc_8way_avx)
14094
14095@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14096
14097 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14098
14099+ pax_force_retaddr
14100 ret;
14101 ENDPROC(serpent_ecb_dec_8way_avx)
14102
14103@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14104
14105 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14106
14107+ pax_force_retaddr
14108 ret;
14109 ENDPROC(serpent_cbc_dec_8way_avx)
14110
14111@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14112
14113 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14114
14115+ pax_force_retaddr
14116 ret;
14117 ENDPROC(serpent_ctr_8way_avx)
14118
14119@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14120 /* dst <= regs xor IVs(in dst) */
14121 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14122
14123+ pax_force_retaddr
14124 ret;
14125 ENDPROC(serpent_xts_enc_8way_avx)
14126
14127@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14128 /* dst <= regs xor IVs(in dst) */
14129 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14130
14131+ pax_force_retaddr
14132 ret;
14133 ENDPROC(serpent_xts_dec_8way_avx)
14134diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14135index b222085..abd483c 100644
14136--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14137+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14138@@ -15,6 +15,7 @@
14139 */
14140
14141 #include <linux/linkage.h>
14142+#include <asm/alternative-asm.h>
14143 #include "glue_helper-asm-avx2.S"
14144
14145 .file "serpent-avx2-asm_64.S"
14146@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14147 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14148 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14149
14150+ pax_force_retaddr
14151 ret;
14152 ENDPROC(__serpent_enc_blk16)
14153
14154@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14155 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14156 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14157
14158+ pax_force_retaddr
14159 ret;
14160 ENDPROC(__serpent_dec_blk16)
14161
14162@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14163
14164 vzeroupper;
14165
14166+ pax_force_retaddr
14167 ret;
14168 ENDPROC(serpent_ecb_enc_16way)
14169
14170@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14171
14172 vzeroupper;
14173
14174+ pax_force_retaddr
14175 ret;
14176 ENDPROC(serpent_ecb_dec_16way)
14177
14178@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14179
14180 vzeroupper;
14181
14182+ pax_force_retaddr
14183 ret;
14184 ENDPROC(serpent_cbc_dec_16way)
14185
14186@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14187
14188 vzeroupper;
14189
14190+ pax_force_retaddr
14191 ret;
14192 ENDPROC(serpent_ctr_16way)
14193
14194@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14195
14196 vzeroupper;
14197
14198+ pax_force_retaddr
14199 ret;
14200 ENDPROC(serpent_xts_enc_16way)
14201
14202@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14203
14204 vzeroupper;
14205
14206+ pax_force_retaddr
14207 ret;
14208 ENDPROC(serpent_xts_dec_16way)
14209diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14210index acc066c..1559cc4 100644
14211--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14212+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14213@@ -25,6 +25,7 @@
14214 */
14215
14216 #include <linux/linkage.h>
14217+#include <asm/alternative-asm.h>
14218
14219 .file "serpent-sse2-x86_64-asm_64.S"
14220 .text
14221@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14222 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14223 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14224
14225+ pax_force_retaddr
14226 ret;
14227
14228 .L__enc_xor8:
14229 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14230 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14231
14232+ pax_force_retaddr
14233 ret;
14234 ENDPROC(__serpent_enc_blk_8way)
14235
14236@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14237 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14238 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14239
14240+ pax_force_retaddr
14241 ret;
14242 ENDPROC(serpent_dec_blk_8way)
14243diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14244index a410950..9dfe7ad 100644
14245--- a/arch/x86/crypto/sha1_ssse3_asm.S
14246+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14247@@ -29,6 +29,7 @@
14248 */
14249
14250 #include <linux/linkage.h>
14251+#include <asm/alternative-asm.h>
14252
14253 #define CTX %rdi // arg1
14254 #define BUF %rsi // arg2
14255@@ -75,9 +76,9 @@
14256
14257 push %rbx
14258 push %rbp
14259- push %r12
14260+ push %r14
14261
14262- mov %rsp, %r12
14263+ mov %rsp, %r14
14264 sub $64, %rsp # allocate workspace
14265 and $~15, %rsp # align stack
14266
14267@@ -99,11 +100,12 @@
14268 xor %rax, %rax
14269 rep stosq
14270
14271- mov %r12, %rsp # deallocate workspace
14272+ mov %r14, %rsp # deallocate workspace
14273
14274- pop %r12
14275+ pop %r14
14276 pop %rbp
14277 pop %rbx
14278+ pax_force_retaddr
14279 ret
14280
14281 ENDPROC(\name)
14282diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14283index 642f156..51a513c 100644
14284--- a/arch/x86/crypto/sha256-avx-asm.S
14285+++ b/arch/x86/crypto/sha256-avx-asm.S
14286@@ -49,6 +49,7 @@
14287
14288 #ifdef CONFIG_AS_AVX
14289 #include <linux/linkage.h>
14290+#include <asm/alternative-asm.h>
14291
14292 ## assume buffers not aligned
14293 #define VMOVDQ vmovdqu
14294@@ -460,6 +461,7 @@ done_hash:
14295 popq %r13
14296 popq %rbp
14297 popq %rbx
14298+ pax_force_retaddr
14299 ret
14300 ENDPROC(sha256_transform_avx)
14301
14302diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14303index 9e86944..3795e6a 100644
14304--- a/arch/x86/crypto/sha256-avx2-asm.S
14305+++ b/arch/x86/crypto/sha256-avx2-asm.S
14306@@ -50,6 +50,7 @@
14307
14308 #ifdef CONFIG_AS_AVX2
14309 #include <linux/linkage.h>
14310+#include <asm/alternative-asm.h>
14311
14312 ## assume buffers not aligned
14313 #define VMOVDQ vmovdqu
14314@@ -720,6 +721,7 @@ done_hash:
14315 popq %r12
14316 popq %rbp
14317 popq %rbx
14318+ pax_force_retaddr
14319 ret
14320 ENDPROC(sha256_transform_rorx)
14321
14322diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14323index f833b74..8c62a9e 100644
14324--- a/arch/x86/crypto/sha256-ssse3-asm.S
14325+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14326@@ -47,6 +47,7 @@
14327 ########################################################################
14328
14329 #include <linux/linkage.h>
14330+#include <asm/alternative-asm.h>
14331
14332 ## assume buffers not aligned
14333 #define MOVDQ movdqu
14334@@ -471,6 +472,7 @@ done_hash:
14335 popq %rbp
14336 popq %rbx
14337
14338+ pax_force_retaddr
14339 ret
14340 ENDPROC(sha256_transform_ssse3)
14341
14342diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14343index 974dde9..a823ff9 100644
14344--- a/arch/x86/crypto/sha512-avx-asm.S
14345+++ b/arch/x86/crypto/sha512-avx-asm.S
14346@@ -49,6 +49,7 @@
14347
14348 #ifdef CONFIG_AS_AVX
14349 #include <linux/linkage.h>
14350+#include <asm/alternative-asm.h>
14351
14352 .text
14353
14354@@ -364,6 +365,7 @@ updateblock:
14355 mov frame_RSPSAVE(%rsp), %rsp
14356
14357 nowork:
14358+ pax_force_retaddr
14359 ret
14360 ENDPROC(sha512_transform_avx)
14361
14362diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14363index 568b961..ed20c37 100644
14364--- a/arch/x86/crypto/sha512-avx2-asm.S
14365+++ b/arch/x86/crypto/sha512-avx2-asm.S
14366@@ -51,6 +51,7 @@
14367
14368 #ifdef CONFIG_AS_AVX2
14369 #include <linux/linkage.h>
14370+#include <asm/alternative-asm.h>
14371
14372 .text
14373
14374@@ -678,6 +679,7 @@ done_hash:
14375
14376 # Restore Stack Pointer
14377 mov frame_RSPSAVE(%rsp), %rsp
14378+ pax_force_retaddr
14379 ret
14380 ENDPROC(sha512_transform_rorx)
14381
14382diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14383index fb56855..6edd768 100644
14384--- a/arch/x86/crypto/sha512-ssse3-asm.S
14385+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14386@@ -48,6 +48,7 @@
14387 ########################################################################
14388
14389 #include <linux/linkage.h>
14390+#include <asm/alternative-asm.h>
14391
14392 .text
14393
14394@@ -363,6 +364,7 @@ updateblock:
14395 mov frame_RSPSAVE(%rsp), %rsp
14396
14397 nowork:
14398+ pax_force_retaddr
14399 ret
14400 ENDPROC(sha512_transform_ssse3)
14401
14402diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14403index 0505813..b067311 100644
14404--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14405+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14406@@ -24,6 +24,7 @@
14407 */
14408
14409 #include <linux/linkage.h>
14410+#include <asm/alternative-asm.h>
14411 #include "glue_helper-asm-avx.S"
14412
14413 .file "twofish-avx-x86_64-asm_64.S"
14414@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14415 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14416 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14417
14418+ pax_force_retaddr
14419 ret;
14420 ENDPROC(__twofish_enc_blk8)
14421
14422@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14423 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14424 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14425
14426+ pax_force_retaddr
14427 ret;
14428 ENDPROC(__twofish_dec_blk8)
14429
14430@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14431
14432 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14433
14434+ pax_force_retaddr
14435 ret;
14436 ENDPROC(twofish_ecb_enc_8way)
14437
14438@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14439
14440 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14441
14442+ pax_force_retaddr
14443 ret;
14444 ENDPROC(twofish_ecb_dec_8way)
14445
14446@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14447 * %rdx: src
14448 */
14449
14450- pushq %r12;
14451+ pushq %r14;
14452
14453 movq %rsi, %r11;
14454- movq %rdx, %r12;
14455+ movq %rdx, %r14;
14456
14457 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14458
14459 call __twofish_dec_blk8;
14460
14461- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14462+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14463
14464- popq %r12;
14465+ popq %r14;
14466
14467+ pax_force_retaddr
14468 ret;
14469 ENDPROC(twofish_cbc_dec_8way)
14470
14471@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14472 * %rcx: iv (little endian, 128bit)
14473 */
14474
14475- pushq %r12;
14476+ pushq %r14;
14477
14478 movq %rsi, %r11;
14479- movq %rdx, %r12;
14480+ movq %rdx, %r14;
14481
14482 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14483 RD2, RX0, RX1, RY0);
14484
14485 call __twofish_enc_blk8;
14486
14487- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14488+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14489
14490- popq %r12;
14491+ popq %r14;
14492
14493+ pax_force_retaddr
14494 ret;
14495 ENDPROC(twofish_ctr_8way)
14496
14497@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14498 /* dst <= regs xor IVs(in dst) */
14499 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14500
14501+ pax_force_retaddr
14502 ret;
14503 ENDPROC(twofish_xts_enc_8way)
14504
14505@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14506 /* dst <= regs xor IVs(in dst) */
14507 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14508
14509+ pax_force_retaddr
14510 ret;
14511 ENDPROC(twofish_xts_dec_8way)
14512diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14513index 1c3b7ce..02f578d 100644
14514--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14515+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14516@@ -21,6 +21,7 @@
14517 */
14518
14519 #include <linux/linkage.h>
14520+#include <asm/alternative-asm.h>
14521
14522 .file "twofish-x86_64-asm-3way.S"
14523 .text
14524@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14525 popq %r13;
14526 popq %r14;
14527 popq %r15;
14528+ pax_force_retaddr
14529 ret;
14530
14531 .L__enc_xor3:
14532@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14533 popq %r13;
14534 popq %r14;
14535 popq %r15;
14536+ pax_force_retaddr
14537 ret;
14538 ENDPROC(__twofish_enc_blk_3way)
14539
14540@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14541 popq %r13;
14542 popq %r14;
14543 popq %r15;
14544+ pax_force_retaddr
14545 ret;
14546 ENDPROC(twofish_dec_blk_3way)
14547diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14548index a039d21..524b8b2 100644
14549--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14550+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14551@@ -22,6 +22,7 @@
14552
14553 #include <linux/linkage.h>
14554 #include <asm/asm-offsets.h>
14555+#include <asm/alternative-asm.h>
14556
14557 #define a_offset 0
14558 #define b_offset 4
14559@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14560
14561 popq R1
14562 movq $1,%rax
14563+ pax_force_retaddr
14564 ret
14565 ENDPROC(twofish_enc_blk)
14566
14567@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14568
14569 popq R1
14570 movq $1,%rax
14571+ pax_force_retaddr
14572 ret
14573 ENDPROC(twofish_dec_blk)
14574diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14575index ae6aad1..719d6d9 100644
14576--- a/arch/x86/ia32/ia32_aout.c
14577+++ b/arch/x86/ia32/ia32_aout.c
14578@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14579 unsigned long dump_start, dump_size;
14580 struct user32 dump;
14581
14582+ memset(&dump, 0, sizeof(dump));
14583+
14584 fs = get_fs();
14585 set_fs(KERNEL_DS);
14586 has_dumped = 1;
14587diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14588index d0165c9..0d5639b 100644
14589--- a/arch/x86/ia32/ia32_signal.c
14590+++ b/arch/x86/ia32/ia32_signal.c
14591@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14592 if (__get_user(set.sig[0], &frame->sc.oldmask)
14593 || (_COMPAT_NSIG_WORDS > 1
14594 && __copy_from_user((((char *) &set.sig) + 4),
14595- &frame->extramask,
14596+ frame->extramask,
14597 sizeof(frame->extramask))))
14598 goto badframe;
14599
14600@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14601 sp -= frame_size;
14602 /* Align the stack pointer according to the i386 ABI,
14603 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14604- sp = ((sp + 4) & -16ul) - 4;
14605+ sp = ((sp - 12) & -16ul) - 4;
14606 return (void __user *) sp;
14607 }
14608
14609@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14610 } else {
14611 /* Return stub is in 32bit vsyscall page */
14612 if (current->mm->context.vdso)
14613- restorer = current->mm->context.vdso +
14614- selected_vdso32->sym___kernel_sigreturn;
14615+ restorer = (void __force_user *)(current->mm->context.vdso +
14616+ selected_vdso32->sym___kernel_sigreturn);
14617 else
14618- restorer = &frame->retcode;
14619+ restorer = frame->retcode;
14620 }
14621
14622 put_user_try {
14623@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14624 * These are actually not used anymore, but left because some
14625 * gdb versions depend on them as a marker.
14626 */
14627- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14628+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14629 } put_user_catch(err);
14630
14631 if (err)
14632@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14633 0xb8,
14634 __NR_ia32_rt_sigreturn,
14635 0x80cd,
14636- 0,
14637+ 0
14638 };
14639
14640 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14641@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14642
14643 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14644 restorer = ksig->ka.sa.sa_restorer;
14645+ else if (current->mm->context.vdso)
14646+ /* Return stub is in 32bit vsyscall page */
14647+ restorer = (void __force_user *)(current->mm->context.vdso +
14648+ selected_vdso32->sym___kernel_rt_sigreturn);
14649 else
14650- restorer = current->mm->context.vdso +
14651- selected_vdso32->sym___kernel_rt_sigreturn;
14652+ restorer = frame->retcode;
14653 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14654
14655 /*
14656 * Not actually used anymore, but left because some gdb
14657 * versions need it.
14658 */
14659- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14660+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14661 } put_user_catch(err);
14662
14663 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14664diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14665index 156ebca..9591cf0 100644
14666--- a/arch/x86/ia32/ia32entry.S
14667+++ b/arch/x86/ia32/ia32entry.S
14668@@ -15,8 +15,10 @@
14669 #include <asm/irqflags.h>
14670 #include <asm/asm.h>
14671 #include <asm/smap.h>
14672+#include <asm/pgtable.h>
14673 #include <linux/linkage.h>
14674 #include <linux/err.h>
14675+#include <asm/alternative-asm.h>
14676
14677 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14678 #include <linux/elf-em.h>
14679@@ -62,12 +64,12 @@
14680 */
14681 .macro LOAD_ARGS32 offset, _r9=0
14682 .if \_r9
14683- movl \offset+16(%rsp),%r9d
14684+ movl \offset+R9(%rsp),%r9d
14685 .endif
14686- movl \offset+40(%rsp),%ecx
14687- movl \offset+48(%rsp),%edx
14688- movl \offset+56(%rsp),%esi
14689- movl \offset+64(%rsp),%edi
14690+ movl \offset+RCX(%rsp),%ecx
14691+ movl \offset+RDX(%rsp),%edx
14692+ movl \offset+RSI(%rsp),%esi
14693+ movl \offset+RDI(%rsp),%edi
14694 movl %eax,%eax /* zero extension */
14695 .endm
14696
14697@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14698 ENDPROC(native_irq_enable_sysexit)
14699 #endif
14700
14701+ .macro pax_enter_kernel_user
14702+ pax_set_fptr_mask
14703+#ifdef CONFIG_PAX_MEMORY_UDEREF
14704+ call pax_enter_kernel_user
14705+#endif
14706+ .endm
14707+
14708+ .macro pax_exit_kernel_user
14709+#ifdef CONFIG_PAX_MEMORY_UDEREF
14710+ call pax_exit_kernel_user
14711+#endif
14712+#ifdef CONFIG_PAX_RANDKSTACK
14713+ pushq %rax
14714+ pushq %r11
14715+ call pax_randomize_kstack
14716+ popq %r11
14717+ popq %rax
14718+#endif
14719+ .endm
14720+
14721+ .macro pax_erase_kstack
14722+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14723+ call pax_erase_kstack
14724+#endif
14725+ .endm
14726+
14727 /*
14728 * 32bit SYSENTER instruction entry.
14729 *
14730@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14731 CFI_REGISTER rsp,rbp
14732 SWAPGS_UNSAFE_STACK
14733 movq PER_CPU_VAR(kernel_stack), %rsp
14734- addq $(KERNEL_STACK_OFFSET),%rsp
14735- /*
14736- * No need to follow this irqs on/off section: the syscall
14737- * disabled irqs, here we enable it straight after entry:
14738- */
14739- ENABLE_INTERRUPTS(CLBR_NONE)
14740 movl %ebp,%ebp /* zero extension */
14741 pushq_cfi $__USER32_DS
14742 /*CFI_REL_OFFSET ss,0*/
14743@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14744 CFI_REL_OFFSET rsp,0
14745 pushfq_cfi
14746 /*CFI_REL_OFFSET rflags,0*/
14747- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14748- CFI_REGISTER rip,r10
14749+ orl $X86_EFLAGS_IF,(%rsp)
14750+ GET_THREAD_INFO(%r11)
14751+ movl TI_sysenter_return(%r11), %r11d
14752+ CFI_REGISTER rip,r11
14753 pushq_cfi $__USER32_CS
14754 /*CFI_REL_OFFSET cs,0*/
14755 movl %eax, %eax
14756- pushq_cfi %r10
14757+ pushq_cfi %r11
14758 CFI_REL_OFFSET rip,0
14759 pushq_cfi %rax
14760 cld
14761 SAVE_ARGS 0,1,0
14762+ pax_enter_kernel_user
14763+
14764+#ifdef CONFIG_PAX_RANDKSTACK
14765+ pax_erase_kstack
14766+#endif
14767+
14768+ /*
14769+ * No need to follow this irqs on/off section: the syscall
14770+ * disabled irqs, here we enable it straight after entry:
14771+ */
14772+ ENABLE_INTERRUPTS(CLBR_NONE)
14773 /* no need to do an access_ok check here because rbp has been
14774 32bit zero extended */
14775+
14776+#ifdef CONFIG_PAX_MEMORY_UDEREF
14777+ addq pax_user_shadow_base,%rbp
14778+ ASM_PAX_OPEN_USERLAND
14779+#endif
14780+
14781 ASM_STAC
14782 1: movl (%rbp),%ebp
14783 _ASM_EXTABLE(1b,ia32_badarg)
14784 ASM_CLAC
14785
14786+#ifdef CONFIG_PAX_MEMORY_UDEREF
14787+ ASM_PAX_CLOSE_USERLAND
14788+#endif
14789+
14790 /*
14791 * Sysenter doesn't filter flags, so we need to clear NT
14792 * ourselves. To save a few cycles, we can check whether
14793@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14794 jnz sysenter_fix_flags
14795 sysenter_flags_fixed:
14796
14797- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14798- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14799+ GET_THREAD_INFO(%r11)
14800+ orl $TS_COMPAT,TI_status(%r11)
14801+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14802 CFI_REMEMBER_STATE
14803 jnz sysenter_tracesys
14804 cmpq $(IA32_NR_syscalls-1),%rax
14805@@ -172,14 +218,17 @@ sysenter_do_call:
14806 sysenter_dispatch:
14807 call *ia32_sys_call_table(,%rax,8)
14808 movq %rax,RAX-ARGOFFSET(%rsp)
14809+ GET_THREAD_INFO(%r11)
14810 DISABLE_INTERRUPTS(CLBR_NONE)
14811 TRACE_IRQS_OFF
14812- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14813+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14814 jnz sysexit_audit
14815 sysexit_from_sys_call:
14816- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14817+ pax_exit_kernel_user
14818+ pax_erase_kstack
14819+ andl $~TS_COMPAT,TI_status(%r11)
14820 /* clear IF, that popfq doesn't enable interrupts early */
14821- andl $~0x200,EFLAGS-ARGOFFSET(%rsp)
14822+ andl $~X86_EFLAGS_IF,EFLAGS-ARGOFFSET(%rsp)
14823 movl RIP-ARGOFFSET(%rsp),%edx /* User %eip */
14824 CFI_REGISTER rip,rdx
14825 RESTORE_ARGS 0,24,0,0,0,0
14826@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14827 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14828 movl %eax,%edi /* 1st arg: syscall number */
14829 call __audit_syscall_entry
14830+
14831+ pax_erase_kstack
14832+
14833 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14834 cmpq $(IA32_NR_syscalls-1),%rax
14835 ja ia32_badsys
14836@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14837 .endm
14838
14839 .macro auditsys_exit exit
14840- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14841+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14842 jnz ia32_ret_from_sys_call
14843 TRACE_IRQS_ON
14844 ENABLE_INTERRUPTS(CLBR_NONE)
14845@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14846 1: setbe %al /* 1 if error, 0 if not */
14847 movzbl %al,%edi /* zero-extend that into %edi */
14848 call __audit_syscall_exit
14849+ GET_THREAD_INFO(%r11)
14850 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14851 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14852 DISABLE_INTERRUPTS(CLBR_NONE)
14853 TRACE_IRQS_OFF
14854- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14855+ testl %edi,TI_flags(%r11)
14856 jz \exit
14857 CLEAR_RREGS -ARGOFFSET
14858 jmp int_with_check
14859@@ -253,7 +306,7 @@ sysenter_fix_flags:
14860
14861 sysenter_tracesys:
14862 #ifdef CONFIG_AUDITSYSCALL
14863- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14864+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14865 jz sysenter_auditsys
14866 #endif
14867 SAVE_REST
14868@@ -265,6 +318,9 @@ sysenter_tracesys:
14869 RESTORE_REST
14870 cmpq $(IA32_NR_syscalls-1),%rax
14871 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14872+
14873+ pax_erase_kstack
14874+
14875 jmp sysenter_do_call
14876 CFI_ENDPROC
14877 ENDPROC(ia32_sysenter_target)
14878@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14879 ENTRY(ia32_cstar_target)
14880 CFI_STARTPROC32 simple
14881 CFI_SIGNAL_FRAME
14882- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14883+ CFI_DEF_CFA rsp,0
14884 CFI_REGISTER rip,rcx
14885 /*CFI_REGISTER rflags,r11*/
14886 SWAPGS_UNSAFE_STACK
14887 movl %esp,%r8d
14888 CFI_REGISTER rsp,r8
14889 movq PER_CPU_VAR(kernel_stack),%rsp
14890+ SAVE_ARGS 8*6,0,0
14891+ pax_enter_kernel_user
14892+
14893+#ifdef CONFIG_PAX_RANDKSTACK
14894+ pax_erase_kstack
14895+#endif
14896+
14897 /*
14898 * No need to follow this irqs on/off section: the syscall
14899 * disabled irqs and here we enable it straight after entry:
14900 */
14901 ENABLE_INTERRUPTS(CLBR_NONE)
14902- SAVE_ARGS 8,0,0
14903 movl %eax,%eax /* zero extension */
14904 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14905 movq %rcx,RIP-ARGOFFSET(%rsp)
14906@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14907 /* no need to do an access_ok check here because r8 has been
14908 32bit zero extended */
14909 /* hardware stack frame is complete now */
14910+
14911+#ifdef CONFIG_PAX_MEMORY_UDEREF
14912+ ASM_PAX_OPEN_USERLAND
14913+ movq pax_user_shadow_base,%r8
14914+ addq RSP-ARGOFFSET(%rsp),%r8
14915+#endif
14916+
14917 ASM_STAC
14918 1: movl (%r8),%r9d
14919 _ASM_EXTABLE(1b,ia32_badarg)
14920 ASM_CLAC
14921- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14922- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14923+
14924+#ifdef CONFIG_PAX_MEMORY_UDEREF
14925+ ASM_PAX_CLOSE_USERLAND
14926+#endif
14927+
14928+ GET_THREAD_INFO(%r11)
14929+ orl $TS_COMPAT,TI_status(%r11)
14930+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14931 CFI_REMEMBER_STATE
14932 jnz cstar_tracesys
14933 cmpq $IA32_NR_syscalls-1,%rax
14934@@ -335,13 +410,16 @@ cstar_do_call:
14935 cstar_dispatch:
14936 call *ia32_sys_call_table(,%rax,8)
14937 movq %rax,RAX-ARGOFFSET(%rsp)
14938+ GET_THREAD_INFO(%r11)
14939 DISABLE_INTERRUPTS(CLBR_NONE)
14940 TRACE_IRQS_OFF
14941- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14942+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14943 jnz sysretl_audit
14944 sysretl_from_sys_call:
14945- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14946- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14947+ pax_exit_kernel_user
14948+ pax_erase_kstack
14949+ andl $~TS_COMPAT,TI_status(%r11)
14950+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14951 movl RIP-ARGOFFSET(%rsp),%ecx
14952 CFI_REGISTER rip,rcx
14953 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14954@@ -368,7 +446,7 @@ sysretl_audit:
14955
14956 cstar_tracesys:
14957 #ifdef CONFIG_AUDITSYSCALL
14958- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14959+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14960 jz cstar_auditsys
14961 #endif
14962 xchgl %r9d,%ebp
14963@@ -382,11 +460,19 @@ cstar_tracesys:
14964 xchgl %ebp,%r9d
14965 cmpq $(IA32_NR_syscalls-1),%rax
14966 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14967+
14968+ pax_erase_kstack
14969+
14970 jmp cstar_do_call
14971 END(ia32_cstar_target)
14972
14973 ia32_badarg:
14974 ASM_CLAC
14975+
14976+#ifdef CONFIG_PAX_MEMORY_UDEREF
14977+ ASM_PAX_CLOSE_USERLAND
14978+#endif
14979+
14980 movq $-EFAULT,%rax
14981 jmp ia32_sysret
14982 CFI_ENDPROC
14983@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
14984 CFI_REL_OFFSET rip,RIP-RIP
14985 PARAVIRT_ADJUST_EXCEPTION_FRAME
14986 SWAPGS
14987- /*
14988- * No need to follow this irqs on/off section: the syscall
14989- * disabled irqs and here we enable it straight after entry:
14990- */
14991- ENABLE_INTERRUPTS(CLBR_NONE)
14992 movl %eax,%eax
14993 pushq_cfi %rax
14994 cld
14995 /* note the registers are not zero extended to the sf.
14996 this could be a problem. */
14997 SAVE_ARGS 0,1,0
14998- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14999- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15000+ pax_enter_kernel_user
15001+
15002+#ifdef CONFIG_PAX_RANDKSTACK
15003+ pax_erase_kstack
15004+#endif
15005+
15006+ /*
15007+ * No need to follow this irqs on/off section: the syscall
15008+ * disabled irqs and here we enable it straight after entry:
15009+ */
15010+ ENABLE_INTERRUPTS(CLBR_NONE)
15011+ GET_THREAD_INFO(%r11)
15012+ orl $TS_COMPAT,TI_status(%r11)
15013+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15014 jnz ia32_tracesys
15015 cmpq $(IA32_NR_syscalls-1),%rax
15016 ja ia32_badsys
15017@@ -458,6 +551,9 @@ ia32_tracesys:
15018 RESTORE_REST
15019 cmpq $(IA32_NR_syscalls-1),%rax
15020 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15021+
15022+ pax_erase_kstack
15023+
15024 jmp ia32_do_call
15025 END(ia32_syscall)
15026
15027diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15028index 8e0ceec..af13504 100644
15029--- a/arch/x86/ia32/sys_ia32.c
15030+++ b/arch/x86/ia32/sys_ia32.c
15031@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15032 */
15033 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15034 {
15035- typeof(ubuf->st_uid) uid = 0;
15036- typeof(ubuf->st_gid) gid = 0;
15037+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15038+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15039 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15040 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15041 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15042diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15043index 372231c..51b537d 100644
15044--- a/arch/x86/include/asm/alternative-asm.h
15045+++ b/arch/x86/include/asm/alternative-asm.h
15046@@ -18,6 +18,45 @@
15047 .endm
15048 #endif
15049
15050+#ifdef KERNEXEC_PLUGIN
15051+ .macro pax_force_retaddr_bts rip=0
15052+ btsq $63,\rip(%rsp)
15053+ .endm
15054+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15055+ .macro pax_force_retaddr rip=0, reload=0
15056+ btsq $63,\rip(%rsp)
15057+ .endm
15058+ .macro pax_force_fptr ptr
15059+ btsq $63,\ptr
15060+ .endm
15061+ .macro pax_set_fptr_mask
15062+ .endm
15063+#endif
15064+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15065+ .macro pax_force_retaddr rip=0, reload=0
15066+ .if \reload
15067+ pax_set_fptr_mask
15068+ .endif
15069+ orq %r12,\rip(%rsp)
15070+ .endm
15071+ .macro pax_force_fptr ptr
15072+ orq %r12,\ptr
15073+ .endm
15074+ .macro pax_set_fptr_mask
15075+ movabs $0x8000000000000000,%r12
15076+ .endm
15077+#endif
15078+#else
15079+ .macro pax_force_retaddr rip=0, reload=0
15080+ .endm
15081+ .macro pax_force_fptr ptr
15082+ .endm
15083+ .macro pax_force_retaddr_bts rip=0
15084+ .endm
15085+ .macro pax_set_fptr_mask
15086+ .endm
15087+#endif
15088+
15089 .macro altinstruction_entry orig alt feature orig_len alt_len
15090 .long \orig - .
15091 .long \alt - .
15092diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15093index 473bdbe..b1e3377 100644
15094--- a/arch/x86/include/asm/alternative.h
15095+++ b/arch/x86/include/asm/alternative.h
15096@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15097 ".pushsection .discard,\"aw\",@progbits\n" \
15098 DISCARD_ENTRY(1) \
15099 ".popsection\n" \
15100- ".pushsection .altinstr_replacement, \"ax\"\n" \
15101+ ".pushsection .altinstr_replacement, \"a\"\n" \
15102 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15103 ".popsection"
15104
15105@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15106 DISCARD_ENTRY(1) \
15107 DISCARD_ENTRY(2) \
15108 ".popsection\n" \
15109- ".pushsection .altinstr_replacement, \"ax\"\n" \
15110+ ".pushsection .altinstr_replacement, \"a\"\n" \
15111 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15112 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15113 ".popsection"
15114diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15115index efc3b22..85c4f3a 100644
15116--- a/arch/x86/include/asm/apic.h
15117+++ b/arch/x86/include/asm/apic.h
15118@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15119
15120 #ifdef CONFIG_X86_LOCAL_APIC
15121
15122-extern unsigned int apic_verbosity;
15123+extern int apic_verbosity;
15124 extern int local_apic_timer_c2_ok;
15125
15126 extern int disable_apic;
15127diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15128index 20370c6..a2eb9b0 100644
15129--- a/arch/x86/include/asm/apm.h
15130+++ b/arch/x86/include/asm/apm.h
15131@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15132 __asm__ __volatile__(APM_DO_ZERO_SEGS
15133 "pushl %%edi\n\t"
15134 "pushl %%ebp\n\t"
15135- "lcall *%%cs:apm_bios_entry\n\t"
15136+ "lcall *%%ss:apm_bios_entry\n\t"
15137 "setc %%al\n\t"
15138 "popl %%ebp\n\t"
15139 "popl %%edi\n\t"
15140@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15141 __asm__ __volatile__(APM_DO_ZERO_SEGS
15142 "pushl %%edi\n\t"
15143 "pushl %%ebp\n\t"
15144- "lcall *%%cs:apm_bios_entry\n\t"
15145+ "lcall *%%ss:apm_bios_entry\n\t"
15146 "setc %%bl\n\t"
15147 "popl %%ebp\n\t"
15148 "popl %%edi\n\t"
15149diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15150index 5e5cd12..51cdc93 100644
15151--- a/arch/x86/include/asm/atomic.h
15152+++ b/arch/x86/include/asm/atomic.h
15153@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15154 }
15155
15156 /**
15157+ * atomic_read_unchecked - read atomic variable
15158+ * @v: pointer of type atomic_unchecked_t
15159+ *
15160+ * Atomically reads the value of @v.
15161+ */
15162+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15163+{
15164+ return ACCESS_ONCE((v)->counter);
15165+}
15166+
15167+/**
15168 * atomic_set - set atomic variable
15169 * @v: pointer of type atomic_t
15170 * @i: required value
15171@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15172 }
15173
15174 /**
15175+ * atomic_set_unchecked - set atomic variable
15176+ * @v: pointer of type atomic_unchecked_t
15177+ * @i: required value
15178+ *
15179+ * Atomically sets the value of @v to @i.
15180+ */
15181+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15182+{
15183+ v->counter = i;
15184+}
15185+
15186+/**
15187 * atomic_add - add integer to atomic variable
15188 * @i: integer value to add
15189 * @v: pointer of type atomic_t
15190@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15191 */
15192 static inline void atomic_add(int i, atomic_t *v)
15193 {
15194- asm volatile(LOCK_PREFIX "addl %1,%0"
15195+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15196+
15197+#ifdef CONFIG_PAX_REFCOUNT
15198+ "jno 0f\n"
15199+ LOCK_PREFIX "subl %1,%0\n"
15200+ "int $4\n0:\n"
15201+ _ASM_EXTABLE(0b, 0b)
15202+#endif
15203+
15204+ : "+m" (v->counter)
15205+ : "ir" (i));
15206+}
15207+
15208+/**
15209+ * atomic_add_unchecked - add integer to atomic variable
15210+ * @i: integer value to add
15211+ * @v: pointer of type atomic_unchecked_t
15212+ *
15213+ * Atomically adds @i to @v.
15214+ */
15215+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15216+{
15217+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15218 : "+m" (v->counter)
15219 : "ir" (i));
15220 }
15221@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15222 */
15223 static inline void atomic_sub(int i, atomic_t *v)
15224 {
15225- asm volatile(LOCK_PREFIX "subl %1,%0"
15226+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15227+
15228+#ifdef CONFIG_PAX_REFCOUNT
15229+ "jno 0f\n"
15230+ LOCK_PREFIX "addl %1,%0\n"
15231+ "int $4\n0:\n"
15232+ _ASM_EXTABLE(0b, 0b)
15233+#endif
15234+
15235+ : "+m" (v->counter)
15236+ : "ir" (i));
15237+}
15238+
15239+/**
15240+ * atomic_sub_unchecked - subtract integer from atomic variable
15241+ * @i: integer value to subtract
15242+ * @v: pointer of type atomic_unchecked_t
15243+ *
15244+ * Atomically subtracts @i from @v.
15245+ */
15246+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15247+{
15248+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15249 : "+m" (v->counter)
15250 : "ir" (i));
15251 }
15252@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15253 */
15254 static inline int atomic_sub_and_test(int i, atomic_t *v)
15255 {
15256- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15257+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15258 }
15259
15260 /**
15261@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15262 */
15263 static inline void atomic_inc(atomic_t *v)
15264 {
15265- asm volatile(LOCK_PREFIX "incl %0"
15266+ asm volatile(LOCK_PREFIX "incl %0\n"
15267+
15268+#ifdef CONFIG_PAX_REFCOUNT
15269+ "jno 0f\n"
15270+ LOCK_PREFIX "decl %0\n"
15271+ "int $4\n0:\n"
15272+ _ASM_EXTABLE(0b, 0b)
15273+#endif
15274+
15275+ : "+m" (v->counter));
15276+}
15277+
15278+/**
15279+ * atomic_inc_unchecked - increment atomic variable
15280+ * @v: pointer of type atomic_unchecked_t
15281+ *
15282+ * Atomically increments @v by 1.
15283+ */
15284+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15285+{
15286+ asm volatile(LOCK_PREFIX "incl %0\n"
15287 : "+m" (v->counter));
15288 }
15289
15290@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15291 */
15292 static inline void atomic_dec(atomic_t *v)
15293 {
15294- asm volatile(LOCK_PREFIX "decl %0"
15295+ asm volatile(LOCK_PREFIX "decl %0\n"
15296+
15297+#ifdef CONFIG_PAX_REFCOUNT
15298+ "jno 0f\n"
15299+ LOCK_PREFIX "incl %0\n"
15300+ "int $4\n0:\n"
15301+ _ASM_EXTABLE(0b, 0b)
15302+#endif
15303+
15304+ : "+m" (v->counter));
15305+}
15306+
15307+/**
15308+ * atomic_dec_unchecked - decrement atomic variable
15309+ * @v: pointer of type atomic_unchecked_t
15310+ *
15311+ * Atomically decrements @v by 1.
15312+ */
15313+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15314+{
15315+ asm volatile(LOCK_PREFIX "decl %0\n"
15316 : "+m" (v->counter));
15317 }
15318
15319@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15320 */
15321 static inline int atomic_dec_and_test(atomic_t *v)
15322 {
15323- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15324+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15325 }
15326
15327 /**
15328@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15329 */
15330 static inline int atomic_inc_and_test(atomic_t *v)
15331 {
15332- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15333+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15334+}
15335+
15336+/**
15337+ * atomic_inc_and_test_unchecked - increment and test
15338+ * @v: pointer of type atomic_unchecked_t
15339+ *
15340+ * Atomically increments @v by 1
15341+ * and returns true if the result is zero, or false for all
15342+ * other cases.
15343+ */
15344+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15345+{
15346+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15347 }
15348
15349 /**
15350@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15351 */
15352 static inline int atomic_add_negative(int i, atomic_t *v)
15353 {
15354- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15355+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15356 }
15357
15358 /**
15359@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15360 *
15361 * Atomically adds @i to @v and returns @i + @v
15362 */
15363-static inline int atomic_add_return(int i, atomic_t *v)
15364+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15365+{
15366+ return i + xadd_check_overflow(&v->counter, i);
15367+}
15368+
15369+/**
15370+ * atomic_add_return_unchecked - add integer and return
15371+ * @i: integer value to add
15372+ * @v: pointer of type atomic_unchecked_t
15373+ *
15374+ * Atomically adds @i to @v and returns @i + @v
15375+ */
15376+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15377 {
15378 return i + xadd(&v->counter, i);
15379 }
15380@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15381 *
15382 * Atomically subtracts @i from @v and returns @v - @i
15383 */
15384-static inline int atomic_sub_return(int i, atomic_t *v)
15385+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15386 {
15387 return atomic_add_return(-i, v);
15388 }
15389
15390 #define atomic_inc_return(v) (atomic_add_return(1, v))
15391+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15392+{
15393+ return atomic_add_return_unchecked(1, v);
15394+}
15395 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15396
15397-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15398+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15399+{
15400+ return cmpxchg(&v->counter, old, new);
15401+}
15402+
15403+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15404 {
15405 return cmpxchg(&v->counter, old, new);
15406 }
15407@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15408 return xchg(&v->counter, new);
15409 }
15410
15411+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15412+{
15413+ return xchg(&v->counter, new);
15414+}
15415+
15416 /**
15417 * __atomic_add_unless - add unless the number is already a given value
15418 * @v: pointer of type atomic_t
15419@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15420 */
15421 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15422 {
15423- int c, old;
15424+ int c, old, new;
15425 c = atomic_read(v);
15426 for (;;) {
15427- if (unlikely(c == (u)))
15428+ if (unlikely(c == u))
15429 break;
15430- old = atomic_cmpxchg((v), c, c + (a));
15431+
15432+ asm volatile("addl %2,%0\n"
15433+
15434+#ifdef CONFIG_PAX_REFCOUNT
15435+ "jno 0f\n"
15436+ "subl %2,%0\n"
15437+ "int $4\n0:\n"
15438+ _ASM_EXTABLE(0b, 0b)
15439+#endif
15440+
15441+ : "=r" (new)
15442+ : "0" (c), "ir" (a));
15443+
15444+ old = atomic_cmpxchg(v, c, new);
15445 if (likely(old == c))
15446 break;
15447 c = old;
15448@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15449 }
15450
15451 /**
15452+ * atomic_inc_not_zero_hint - increment if not null
15453+ * @v: pointer of type atomic_t
15454+ * @hint: probable value of the atomic before the increment
15455+ *
15456+ * This version of atomic_inc_not_zero() gives a hint of probable
15457+ * value of the atomic. This helps processor to not read the memory
15458+ * before doing the atomic read/modify/write cycle, lowering
15459+ * number of bus transactions on some arches.
15460+ *
15461+ * Returns: 0 if increment was not done, 1 otherwise.
15462+ */
15463+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15464+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15465+{
15466+ int val, c = hint, new;
15467+
15468+ /* sanity test, should be removed by compiler if hint is a constant */
15469+ if (!hint)
15470+ return __atomic_add_unless(v, 1, 0);
15471+
15472+ do {
15473+ asm volatile("incl %0\n"
15474+
15475+#ifdef CONFIG_PAX_REFCOUNT
15476+ "jno 0f\n"
15477+ "decl %0\n"
15478+ "int $4\n0:\n"
15479+ _ASM_EXTABLE(0b, 0b)
15480+#endif
15481+
15482+ : "=r" (new)
15483+ : "0" (c));
15484+
15485+ val = atomic_cmpxchg(v, c, new);
15486+ if (val == c)
15487+ return 1;
15488+ c = val;
15489+ } while (c);
15490+
15491+ return 0;
15492+}
15493+
15494+/**
15495 * atomic_inc_short - increment of a short integer
15496 * @v: pointer to type int
15497 *
15498@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15499 }
15500
15501 /* These are x86-specific, used by some header files */
15502-#define atomic_clear_mask(mask, addr) \
15503- asm volatile(LOCK_PREFIX "andl %0,%1" \
15504- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15505+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15506+{
15507+ asm volatile(LOCK_PREFIX "andl %1,%0"
15508+ : "+m" (v->counter)
15509+ : "r" (~(mask))
15510+ : "memory");
15511+}
15512
15513-#define atomic_set_mask(mask, addr) \
15514- asm volatile(LOCK_PREFIX "orl %0,%1" \
15515- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15516- : "memory")
15517+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15518+{
15519+ asm volatile(LOCK_PREFIX "andl %1,%0"
15520+ : "+m" (v->counter)
15521+ : "r" (~(mask))
15522+ : "memory");
15523+}
15524+
15525+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15526+{
15527+ asm volatile(LOCK_PREFIX "orl %1,%0"
15528+ : "+m" (v->counter)
15529+ : "r" (mask)
15530+ : "memory");
15531+}
15532+
15533+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15534+{
15535+ asm volatile(LOCK_PREFIX "orl %1,%0"
15536+ : "+m" (v->counter)
15537+ : "r" (mask)
15538+ : "memory");
15539+}
15540
15541 #ifdef CONFIG_X86_32
15542 # include <asm/atomic64_32.h>
15543diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15544index b154de7..bf18a5a 100644
15545--- a/arch/x86/include/asm/atomic64_32.h
15546+++ b/arch/x86/include/asm/atomic64_32.h
15547@@ -12,6 +12,14 @@ typedef struct {
15548 u64 __aligned(8) counter;
15549 } atomic64_t;
15550
15551+#ifdef CONFIG_PAX_REFCOUNT
15552+typedef struct {
15553+ u64 __aligned(8) counter;
15554+} atomic64_unchecked_t;
15555+#else
15556+typedef atomic64_t atomic64_unchecked_t;
15557+#endif
15558+
15559 #define ATOMIC64_INIT(val) { (val) }
15560
15561 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15562@@ -37,21 +45,31 @@ typedef struct {
15563 ATOMIC64_DECL_ONE(sym##_386)
15564
15565 ATOMIC64_DECL_ONE(add_386);
15566+ATOMIC64_DECL_ONE(add_unchecked_386);
15567 ATOMIC64_DECL_ONE(sub_386);
15568+ATOMIC64_DECL_ONE(sub_unchecked_386);
15569 ATOMIC64_DECL_ONE(inc_386);
15570+ATOMIC64_DECL_ONE(inc_unchecked_386);
15571 ATOMIC64_DECL_ONE(dec_386);
15572+ATOMIC64_DECL_ONE(dec_unchecked_386);
15573 #endif
15574
15575 #define alternative_atomic64(f, out, in...) \
15576 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15577
15578 ATOMIC64_DECL(read);
15579+ATOMIC64_DECL(read_unchecked);
15580 ATOMIC64_DECL(set);
15581+ATOMIC64_DECL(set_unchecked);
15582 ATOMIC64_DECL(xchg);
15583 ATOMIC64_DECL(add_return);
15584+ATOMIC64_DECL(add_return_unchecked);
15585 ATOMIC64_DECL(sub_return);
15586+ATOMIC64_DECL(sub_return_unchecked);
15587 ATOMIC64_DECL(inc_return);
15588+ATOMIC64_DECL(inc_return_unchecked);
15589 ATOMIC64_DECL(dec_return);
15590+ATOMIC64_DECL(dec_return_unchecked);
15591 ATOMIC64_DECL(dec_if_positive);
15592 ATOMIC64_DECL(inc_not_zero);
15593 ATOMIC64_DECL(add_unless);
15594@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15595 }
15596
15597 /**
15598+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15599+ * @p: pointer to type atomic64_unchecked_t
15600+ * @o: expected value
15601+ * @n: new value
15602+ *
15603+ * Atomically sets @v to @n if it was equal to @o and returns
15604+ * the old value.
15605+ */
15606+
15607+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15608+{
15609+ return cmpxchg64(&v->counter, o, n);
15610+}
15611+
15612+/**
15613 * atomic64_xchg - xchg atomic64 variable
15614 * @v: pointer to type atomic64_t
15615 * @n: value to assign
15616@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15617 }
15618
15619 /**
15620+ * atomic64_set_unchecked - set atomic64 variable
15621+ * @v: pointer to type atomic64_unchecked_t
15622+ * @n: value to assign
15623+ *
15624+ * Atomically sets the value of @v to @n.
15625+ */
15626+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15627+{
15628+ unsigned high = (unsigned)(i >> 32);
15629+ unsigned low = (unsigned)i;
15630+ alternative_atomic64(set, /* no output */,
15631+ "S" (v), "b" (low), "c" (high)
15632+ : "eax", "edx", "memory");
15633+}
15634+
15635+/**
15636 * atomic64_read - read atomic64 variable
15637 * @v: pointer to type atomic64_t
15638 *
15639@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15640 }
15641
15642 /**
15643+ * atomic64_read_unchecked - read atomic64 variable
15644+ * @v: pointer to type atomic64_unchecked_t
15645+ *
15646+ * Atomically reads the value of @v and returns it.
15647+ */
15648+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15649+{
15650+ long long r;
15651+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15652+ return r;
15653+ }
15654+
15655+/**
15656 * atomic64_add_return - add and return
15657 * @i: integer value to add
15658 * @v: pointer to type atomic64_t
15659@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15660 return i;
15661 }
15662
15663+/**
15664+ * atomic64_add_return_unchecked - add and return
15665+ * @i: integer value to add
15666+ * @v: pointer to type atomic64_unchecked_t
15667+ *
15668+ * Atomically adds @i to @v and returns @i + *@v
15669+ */
15670+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15671+{
15672+ alternative_atomic64(add_return_unchecked,
15673+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15674+ ASM_NO_INPUT_CLOBBER("memory"));
15675+ return i;
15676+}
15677+
15678 /*
15679 * Other variants with different arithmetic operators:
15680 */
15681@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15682 return a;
15683 }
15684
15685+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15686+{
15687+ long long a;
15688+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15689+ "S" (v) : "memory", "ecx");
15690+ return a;
15691+}
15692+
15693 static inline long long atomic64_dec_return(atomic64_t *v)
15694 {
15695 long long a;
15696@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15697 }
15698
15699 /**
15700+ * atomic64_add_unchecked - add integer to atomic64 variable
15701+ * @i: integer value to add
15702+ * @v: pointer to type atomic64_unchecked_t
15703+ *
15704+ * Atomically adds @i to @v.
15705+ */
15706+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15707+{
15708+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15709+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15710+ ASM_NO_INPUT_CLOBBER("memory"));
15711+ return i;
15712+}
15713+
15714+/**
15715 * atomic64_sub - subtract the atomic64 variable
15716 * @i: integer value to subtract
15717 * @v: pointer to type atomic64_t
15718diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15719index f8d273e..02f39f3 100644
15720--- a/arch/x86/include/asm/atomic64_64.h
15721+++ b/arch/x86/include/asm/atomic64_64.h
15722@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15723 }
15724
15725 /**
15726+ * atomic64_read_unchecked - read atomic64 variable
15727+ * @v: pointer of type atomic64_unchecked_t
15728+ *
15729+ * Atomically reads the value of @v.
15730+ * Doesn't imply a read memory barrier.
15731+ */
15732+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15733+{
15734+ return ACCESS_ONCE((v)->counter);
15735+}
15736+
15737+/**
15738 * atomic64_set - set atomic64 variable
15739 * @v: pointer to type atomic64_t
15740 * @i: required value
15741@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15742 }
15743
15744 /**
15745+ * atomic64_set_unchecked - set atomic64 variable
15746+ * @v: pointer to type atomic64_unchecked_t
15747+ * @i: required value
15748+ *
15749+ * Atomically sets the value of @v to @i.
15750+ */
15751+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15752+{
15753+ v->counter = i;
15754+}
15755+
15756+/**
15757 * atomic64_add - add integer to atomic64 variable
15758 * @i: integer value to add
15759 * @v: pointer to type atomic64_t
15760@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15761 */
15762 static inline void atomic64_add(long i, atomic64_t *v)
15763 {
15764+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15765+
15766+#ifdef CONFIG_PAX_REFCOUNT
15767+ "jno 0f\n"
15768+ LOCK_PREFIX "subq %1,%0\n"
15769+ "int $4\n0:\n"
15770+ _ASM_EXTABLE(0b, 0b)
15771+#endif
15772+
15773+ : "=m" (v->counter)
15774+ : "er" (i), "m" (v->counter));
15775+}
15776+
15777+/**
15778+ * atomic64_add_unchecked - add integer to atomic64 variable
15779+ * @i: integer value to add
15780+ * @v: pointer to type atomic64_unchecked_t
15781+ *
15782+ * Atomically adds @i to @v.
15783+ */
15784+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15785+{
15786 asm volatile(LOCK_PREFIX "addq %1,%0"
15787 : "=m" (v->counter)
15788 : "er" (i), "m" (v->counter));
15789@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15790 */
15791 static inline void atomic64_sub(long i, atomic64_t *v)
15792 {
15793- asm volatile(LOCK_PREFIX "subq %1,%0"
15794+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15795+
15796+#ifdef CONFIG_PAX_REFCOUNT
15797+ "jno 0f\n"
15798+ LOCK_PREFIX "addq %1,%0\n"
15799+ "int $4\n0:\n"
15800+ _ASM_EXTABLE(0b, 0b)
15801+#endif
15802+
15803+ : "=m" (v->counter)
15804+ : "er" (i), "m" (v->counter));
15805+}
15806+
15807+/**
15808+ * atomic64_sub_unchecked - subtract the atomic64 variable
15809+ * @i: integer value to subtract
15810+ * @v: pointer to type atomic64_unchecked_t
15811+ *
15812+ * Atomically subtracts @i from @v.
15813+ */
15814+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15815+{
15816+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15817 : "=m" (v->counter)
15818 : "er" (i), "m" (v->counter));
15819 }
15820@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15821 */
15822 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15823 {
15824- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15825+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15826 }
15827
15828 /**
15829@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15830 */
15831 static inline void atomic64_inc(atomic64_t *v)
15832 {
15833+ asm volatile(LOCK_PREFIX "incq %0\n"
15834+
15835+#ifdef CONFIG_PAX_REFCOUNT
15836+ "jno 0f\n"
15837+ LOCK_PREFIX "decq %0\n"
15838+ "int $4\n0:\n"
15839+ _ASM_EXTABLE(0b, 0b)
15840+#endif
15841+
15842+ : "=m" (v->counter)
15843+ : "m" (v->counter));
15844+}
15845+
15846+/**
15847+ * atomic64_inc_unchecked - increment atomic64 variable
15848+ * @v: pointer to type atomic64_unchecked_t
15849+ *
15850+ * Atomically increments @v by 1.
15851+ */
15852+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15853+{
15854 asm volatile(LOCK_PREFIX "incq %0"
15855 : "=m" (v->counter)
15856 : "m" (v->counter));
15857@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15858 */
15859 static inline void atomic64_dec(atomic64_t *v)
15860 {
15861- asm volatile(LOCK_PREFIX "decq %0"
15862+ asm volatile(LOCK_PREFIX "decq %0\n"
15863+
15864+#ifdef CONFIG_PAX_REFCOUNT
15865+ "jno 0f\n"
15866+ LOCK_PREFIX "incq %0\n"
15867+ "int $4\n0:\n"
15868+ _ASM_EXTABLE(0b, 0b)
15869+#endif
15870+
15871+ : "=m" (v->counter)
15872+ : "m" (v->counter));
15873+}
15874+
15875+/**
15876+ * atomic64_dec_unchecked - decrement atomic64 variable
15877+ * @v: pointer to type atomic64_t
15878+ *
15879+ * Atomically decrements @v by 1.
15880+ */
15881+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15882+{
15883+ asm volatile(LOCK_PREFIX "decq %0\n"
15884 : "=m" (v->counter)
15885 : "m" (v->counter));
15886 }
15887@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15888 */
15889 static inline int atomic64_dec_and_test(atomic64_t *v)
15890 {
15891- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15892+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15893 }
15894
15895 /**
15896@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15897 */
15898 static inline int atomic64_inc_and_test(atomic64_t *v)
15899 {
15900- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15901+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15902 }
15903
15904 /**
15905@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15906 */
15907 static inline int atomic64_add_negative(long i, atomic64_t *v)
15908 {
15909- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15910+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15911 }
15912
15913 /**
15914@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15915 */
15916 static inline long atomic64_add_return(long i, atomic64_t *v)
15917 {
15918+ return i + xadd_check_overflow(&v->counter, i);
15919+}
15920+
15921+/**
15922+ * atomic64_add_return_unchecked - add and return
15923+ * @i: integer value to add
15924+ * @v: pointer to type atomic64_unchecked_t
15925+ *
15926+ * Atomically adds @i to @v and returns @i + @v
15927+ */
15928+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15929+{
15930 return i + xadd(&v->counter, i);
15931 }
15932
15933@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15934 }
15935
15936 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15937+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15938+{
15939+ return atomic64_add_return_unchecked(1, v);
15940+}
15941 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15942
15943 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15944@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15945 return cmpxchg(&v->counter, old, new);
15946 }
15947
15948+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15949+{
15950+ return cmpxchg(&v->counter, old, new);
15951+}
15952+
15953 static inline long atomic64_xchg(atomic64_t *v, long new)
15954 {
15955 return xchg(&v->counter, new);
15956@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15957 */
15958 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15959 {
15960- long c, old;
15961+ long c, old, new;
15962 c = atomic64_read(v);
15963 for (;;) {
15964- if (unlikely(c == (u)))
15965+ if (unlikely(c == u))
15966 break;
15967- old = atomic64_cmpxchg((v), c, c + (a));
15968+
15969+ asm volatile("add %2,%0\n"
15970+
15971+#ifdef CONFIG_PAX_REFCOUNT
15972+ "jno 0f\n"
15973+ "sub %2,%0\n"
15974+ "int $4\n0:\n"
15975+ _ASM_EXTABLE(0b, 0b)
15976+#endif
15977+
15978+ : "=r" (new)
15979+ : "0" (c), "ir" (a));
15980+
15981+ old = atomic64_cmpxchg(v, c, new);
15982 if (likely(old == c))
15983 break;
15984 c = old;
15985 }
15986- return c != (u);
15987+ return c != u;
15988 }
15989
15990 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
15991diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
15992index 2ab1eb3..1e8cc5d 100644
15993--- a/arch/x86/include/asm/barrier.h
15994+++ b/arch/x86/include/asm/barrier.h
15995@@ -57,7 +57,7 @@
15996 do { \
15997 compiletime_assert_atomic_type(*p); \
15998 smp_mb(); \
15999- ACCESS_ONCE(*p) = (v); \
16000+ ACCESS_ONCE_RW(*p) = (v); \
16001 } while (0)
16002
16003 #define smp_load_acquire(p) \
16004@@ -74,7 +74,7 @@ do { \
16005 do { \
16006 compiletime_assert_atomic_type(*p); \
16007 barrier(); \
16008- ACCESS_ONCE(*p) = (v); \
16009+ ACCESS_ONCE_RW(*p) = (v); \
16010 } while (0)
16011
16012 #define smp_load_acquire(p) \
16013diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16014index cfe3b95..d01b118 100644
16015--- a/arch/x86/include/asm/bitops.h
16016+++ b/arch/x86/include/asm/bitops.h
16017@@ -50,7 +50,7 @@
16018 * a mask operation on a byte.
16019 */
16020 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16021-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16022+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16023 #define CONST_MASK(nr) (1 << ((nr) & 7))
16024
16025 /**
16026@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16027 */
16028 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16029 {
16030- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16031+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16032 }
16033
16034 /**
16035@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16036 */
16037 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16038 {
16039- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16040+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16041 }
16042
16043 /**
16044@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16045 */
16046 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16047 {
16048- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16049+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16050 }
16051
16052 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16053@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16054 *
16055 * Undefined if no bit exists, so code should check against 0 first.
16056 */
16057-static inline unsigned long __ffs(unsigned long word)
16058+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16059 {
16060 asm("rep; bsf %1,%0"
16061 : "=r" (word)
16062@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16063 *
16064 * Undefined if no zero exists, so code should check against ~0UL first.
16065 */
16066-static inline unsigned long ffz(unsigned long word)
16067+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16068 {
16069 asm("rep; bsf %1,%0"
16070 : "=r" (word)
16071@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16072 *
16073 * Undefined if no set bit exists, so code should check against 0 first.
16074 */
16075-static inline unsigned long __fls(unsigned long word)
16076+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16077 {
16078 asm("bsr %1,%0"
16079 : "=r" (word)
16080@@ -434,7 +434,7 @@ static inline int ffs(int x)
16081 * set bit if value is nonzero. The last (most significant) bit is
16082 * at position 32.
16083 */
16084-static inline int fls(int x)
16085+static inline int __intentional_overflow(-1) fls(int x)
16086 {
16087 int r;
16088
16089@@ -476,7 +476,7 @@ static inline int fls(int x)
16090 * at position 64.
16091 */
16092 #ifdef CONFIG_X86_64
16093-static __always_inline int fls64(__u64 x)
16094+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16095 {
16096 int bitpos = -1;
16097 /*
16098diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16099index 4fa687a..60f2d39 100644
16100--- a/arch/x86/include/asm/boot.h
16101+++ b/arch/x86/include/asm/boot.h
16102@@ -6,10 +6,15 @@
16103 #include <uapi/asm/boot.h>
16104
16105 /* Physical address where kernel should be loaded. */
16106-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16107+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16108 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16109 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16110
16111+#ifndef __ASSEMBLY__
16112+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16113+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16114+#endif
16115+
16116 /* Minimum kernel alignment, as a power of two */
16117 #ifdef CONFIG_X86_64
16118 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16119diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16120index 48f99f1..d78ebf9 100644
16121--- a/arch/x86/include/asm/cache.h
16122+++ b/arch/x86/include/asm/cache.h
16123@@ -5,12 +5,13 @@
16124
16125 /* L1 cache line size */
16126 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16127-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16128+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16129
16130 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16131+#define __read_only __attribute__((__section__(".data..read_only")))
16132
16133 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16134-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16135+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16136
16137 #ifdef CONFIG_X86_VSMP
16138 #ifdef CONFIG_SMP
16139diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16140index 1f1297b..72b8439 100644
16141--- a/arch/x86/include/asm/calling.h
16142+++ b/arch/x86/include/asm/calling.h
16143@@ -82,106 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16144 #define RSP 152
16145 #define SS 160
16146
16147-#define ARGOFFSET R11
16148+#define ARGOFFSET R15
16149
16150 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16151- subq $9*8+\addskip, %rsp
16152- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16153- movq_cfi rdi, 8*8
16154- movq_cfi rsi, 7*8
16155- movq_cfi rdx, 6*8
16156+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16157+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16158+ movq_cfi rdi, RDI
16159+ movq_cfi rsi, RSI
16160+ movq_cfi rdx, RDX
16161
16162 .if \save_rcx
16163- movq_cfi rcx, 5*8
16164+ movq_cfi rcx, RCX
16165 .endif
16166
16167 .if \rax_enosys
16168- movq $-ENOSYS, 4*8(%rsp)
16169+ movq $-ENOSYS, RAX(%rsp)
16170 .else
16171- movq_cfi rax, 4*8
16172+ movq_cfi rax, RAX
16173 .endif
16174
16175 .if \save_r891011
16176- movq_cfi r8, 3*8
16177- movq_cfi r9, 2*8
16178- movq_cfi r10, 1*8
16179- movq_cfi r11, 0*8
16180+ movq_cfi r8, R8
16181+ movq_cfi r9, R9
16182+ movq_cfi r10, R10
16183+ movq_cfi r11, R11
16184 .endif
16185
16186+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16187+ movq_cfi r12, R12
16188+#endif
16189+
16190 .endm
16191
16192-#define ARG_SKIP (9*8)
16193+#define ARG_SKIP ORIG_RAX
16194
16195 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16196 rstor_r8910=1, rstor_rdx=1
16197+
16198+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16199+ movq_cfi_restore R12, r12
16200+#endif
16201+
16202 .if \rstor_r11
16203- movq_cfi_restore 0*8, r11
16204+ movq_cfi_restore R11, r11
16205 .endif
16206
16207 .if \rstor_r8910
16208- movq_cfi_restore 1*8, r10
16209- movq_cfi_restore 2*8, r9
16210- movq_cfi_restore 3*8, r8
16211+ movq_cfi_restore R10, r10
16212+ movq_cfi_restore R9, r9
16213+ movq_cfi_restore R8, r8
16214 .endif
16215
16216 .if \rstor_rax
16217- movq_cfi_restore 4*8, rax
16218+ movq_cfi_restore RAX, rax
16219 .endif
16220
16221 .if \rstor_rcx
16222- movq_cfi_restore 5*8, rcx
16223+ movq_cfi_restore RCX, rcx
16224 .endif
16225
16226 .if \rstor_rdx
16227- movq_cfi_restore 6*8, rdx
16228+ movq_cfi_restore RDX, rdx
16229 .endif
16230
16231- movq_cfi_restore 7*8, rsi
16232- movq_cfi_restore 8*8, rdi
16233+ movq_cfi_restore RSI, rsi
16234+ movq_cfi_restore RDI, rdi
16235
16236- .if ARG_SKIP+\addskip > 0
16237- addq $ARG_SKIP+\addskip, %rsp
16238- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16239+ .if ORIG_RAX+\addskip > 0
16240+ addq $ORIG_RAX+\addskip, %rsp
16241+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16242 .endif
16243 .endm
16244
16245- .macro LOAD_ARGS offset, skiprax=0
16246- movq \offset(%rsp), %r11
16247- movq \offset+8(%rsp), %r10
16248- movq \offset+16(%rsp), %r9
16249- movq \offset+24(%rsp), %r8
16250- movq \offset+40(%rsp), %rcx
16251- movq \offset+48(%rsp), %rdx
16252- movq \offset+56(%rsp), %rsi
16253- movq \offset+64(%rsp), %rdi
16254+ .macro LOAD_ARGS skiprax=0
16255+ movq R11(%rsp), %r11
16256+ movq R10(%rsp), %r10
16257+ movq R9(%rsp), %r9
16258+ movq R8(%rsp), %r8
16259+ movq RCX(%rsp), %rcx
16260+ movq RDX(%rsp), %rdx
16261+ movq RSI(%rsp), %rsi
16262+ movq RDI(%rsp), %rdi
16263 .if \skiprax
16264 .else
16265- movq \offset+72(%rsp), %rax
16266+ movq ORIG_RAX(%rsp), %rax
16267 .endif
16268 .endm
16269
16270-#define REST_SKIP (6*8)
16271-
16272 .macro SAVE_REST
16273- subq $REST_SKIP, %rsp
16274- CFI_ADJUST_CFA_OFFSET REST_SKIP
16275- movq_cfi rbx, 5*8
16276- movq_cfi rbp, 4*8
16277- movq_cfi r12, 3*8
16278- movq_cfi r13, 2*8
16279- movq_cfi r14, 1*8
16280- movq_cfi r15, 0*8
16281+ movq_cfi rbx, RBX
16282+ movq_cfi rbp, RBP
16283+
16284+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16285+ movq_cfi r12, R12
16286+#endif
16287+
16288+ movq_cfi r13, R13
16289+ movq_cfi r14, R14
16290+ movq_cfi r15, R15
16291 .endm
16292
16293 .macro RESTORE_REST
16294- movq_cfi_restore 0*8, r15
16295- movq_cfi_restore 1*8, r14
16296- movq_cfi_restore 2*8, r13
16297- movq_cfi_restore 3*8, r12
16298- movq_cfi_restore 4*8, rbp
16299- movq_cfi_restore 5*8, rbx
16300- addq $REST_SKIP, %rsp
16301- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16302+ movq_cfi_restore R15, r15
16303+ movq_cfi_restore R14, r14
16304+ movq_cfi_restore R13, r13
16305+
16306+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16307+ movq_cfi_restore R12, r12
16308+#endif
16309+
16310+ movq_cfi_restore RBP, rbp
16311+ movq_cfi_restore RBX, rbx
16312 .endm
16313
16314 .macro SAVE_ALL
16315diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16316index f50de69..2b0a458 100644
16317--- a/arch/x86/include/asm/checksum_32.h
16318+++ b/arch/x86/include/asm/checksum_32.h
16319@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16320 int len, __wsum sum,
16321 int *src_err_ptr, int *dst_err_ptr);
16322
16323+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16324+ int len, __wsum sum,
16325+ int *src_err_ptr, int *dst_err_ptr);
16326+
16327+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16328+ int len, __wsum sum,
16329+ int *src_err_ptr, int *dst_err_ptr);
16330+
16331 /*
16332 * Note: when you get a NULL pointer exception here this means someone
16333 * passed in an incorrect kernel address to one of these functions.
16334@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16335
16336 might_sleep();
16337 stac();
16338- ret = csum_partial_copy_generic((__force void *)src, dst,
16339+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16340 len, sum, err_ptr, NULL);
16341 clac();
16342
16343@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16344 might_sleep();
16345 if (access_ok(VERIFY_WRITE, dst, len)) {
16346 stac();
16347- ret = csum_partial_copy_generic(src, (__force void *)dst,
16348+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16349 len, sum, NULL, err_ptr);
16350 clac();
16351 return ret;
16352diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16353index 99c105d7..2f667ac 100644
16354--- a/arch/x86/include/asm/cmpxchg.h
16355+++ b/arch/x86/include/asm/cmpxchg.h
16356@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16357 __compiletime_error("Bad argument size for cmpxchg");
16358 extern void __xadd_wrong_size(void)
16359 __compiletime_error("Bad argument size for xadd");
16360+extern void __xadd_check_overflow_wrong_size(void)
16361+ __compiletime_error("Bad argument size for xadd_check_overflow");
16362 extern void __add_wrong_size(void)
16363 __compiletime_error("Bad argument size for add");
16364+extern void __add_check_overflow_wrong_size(void)
16365+ __compiletime_error("Bad argument size for add_check_overflow");
16366
16367 /*
16368 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16369@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16370 __ret; \
16371 })
16372
16373+#ifdef CONFIG_PAX_REFCOUNT
16374+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16375+ ({ \
16376+ __typeof__ (*(ptr)) __ret = (arg); \
16377+ switch (sizeof(*(ptr))) { \
16378+ case __X86_CASE_L: \
16379+ asm volatile (lock #op "l %0, %1\n" \
16380+ "jno 0f\n" \
16381+ "mov %0,%1\n" \
16382+ "int $4\n0:\n" \
16383+ _ASM_EXTABLE(0b, 0b) \
16384+ : "+r" (__ret), "+m" (*(ptr)) \
16385+ : : "memory", "cc"); \
16386+ break; \
16387+ case __X86_CASE_Q: \
16388+ asm volatile (lock #op "q %q0, %1\n" \
16389+ "jno 0f\n" \
16390+ "mov %0,%1\n" \
16391+ "int $4\n0:\n" \
16392+ _ASM_EXTABLE(0b, 0b) \
16393+ : "+r" (__ret), "+m" (*(ptr)) \
16394+ : : "memory", "cc"); \
16395+ break; \
16396+ default: \
16397+ __ ## op ## _check_overflow_wrong_size(); \
16398+ } \
16399+ __ret; \
16400+ })
16401+#else
16402+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16403+#endif
16404+
16405 /*
16406 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16407 * Since this is generally used to protect other memory information, we
16408@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16409 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16410 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16411
16412+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16413+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16414+
16415 #define __add(ptr, inc, lock) \
16416 ({ \
16417 __typeof__ (*(ptr)) __ret = (inc); \
16418diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16419index 59c6c40..5e0b22c 100644
16420--- a/arch/x86/include/asm/compat.h
16421+++ b/arch/x86/include/asm/compat.h
16422@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16423 typedef u32 compat_uint_t;
16424 typedef u32 compat_ulong_t;
16425 typedef u64 __attribute__((aligned(4))) compat_u64;
16426-typedef u32 compat_uptr_t;
16427+typedef u32 __user compat_uptr_t;
16428
16429 struct compat_timespec {
16430 compat_time_t tv_sec;
16431diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16432index 90a5485..43b6211 100644
16433--- a/arch/x86/include/asm/cpufeature.h
16434+++ b/arch/x86/include/asm/cpufeature.h
16435@@ -213,7 +213,7 @@
16436 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16437 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16438 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16439-
16440+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16441
16442 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16443 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16444@@ -221,7 +221,7 @@
16445 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16446 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16447 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16448-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16449+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16450 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16451 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16452 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16453@@ -390,6 +390,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16454 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16455 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16456 #define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
16457+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16458
16459 #if __GNUC__ >= 4
16460 extern void warn_pre_alternatives(void);
16461@@ -441,7 +442,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16462
16463 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16464 t_warn:
16465- warn_pre_alternatives();
16466+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16467+ warn_pre_alternatives();
16468 return false;
16469 #endif
16470
16471@@ -461,7 +463,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16472 ".section .discard,\"aw\",@progbits\n"
16473 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16474 ".previous\n"
16475- ".section .altinstr_replacement,\"ax\"\n"
16476+ ".section .altinstr_replacement,\"a\"\n"
16477 "3: movb $1,%0\n"
16478 "4:\n"
16479 ".previous\n"
16480@@ -498,7 +500,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16481 " .byte 2b - 1b\n" /* src len */
16482 " .byte 4f - 3f\n" /* repl len */
16483 ".previous\n"
16484- ".section .altinstr_replacement,\"ax\"\n"
16485+ ".section .altinstr_replacement,\"a\"\n"
16486 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16487 "4:\n"
16488 ".previous\n"
16489@@ -531,7 +533,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16490 ".section .discard,\"aw\",@progbits\n"
16491 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16492 ".previous\n"
16493- ".section .altinstr_replacement,\"ax\"\n"
16494+ ".section .altinstr_replacement,\"a\"\n"
16495 "3: movb $0,%0\n"
16496 "4:\n"
16497 ".previous\n"
16498@@ -545,7 +547,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16499 ".section .discard,\"aw\",@progbits\n"
16500 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16501 ".previous\n"
16502- ".section .altinstr_replacement,\"ax\"\n"
16503+ ".section .altinstr_replacement,\"a\"\n"
16504 "5: movb $1,%0\n"
16505 "6:\n"
16506 ".previous\n"
16507diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16508index a94b82e..59ecefa 100644
16509--- a/arch/x86/include/asm/desc.h
16510+++ b/arch/x86/include/asm/desc.h
16511@@ -4,6 +4,7 @@
16512 #include <asm/desc_defs.h>
16513 #include <asm/ldt.h>
16514 #include <asm/mmu.h>
16515+#include <asm/pgtable.h>
16516
16517 #include <linux/smp.h>
16518 #include <linux/percpu.h>
16519@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16520
16521 desc->type = (info->read_exec_only ^ 1) << 1;
16522 desc->type |= info->contents << 2;
16523+ desc->type |= info->seg_not_present ^ 1;
16524
16525 desc->s = 1;
16526 desc->dpl = 0x3;
16527@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16528 }
16529
16530 extern struct desc_ptr idt_descr;
16531-extern gate_desc idt_table[];
16532-extern struct desc_ptr debug_idt_descr;
16533-extern gate_desc debug_idt_table[];
16534-
16535-struct gdt_page {
16536- struct desc_struct gdt[GDT_ENTRIES];
16537-} __attribute__((aligned(PAGE_SIZE)));
16538-
16539-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16540+extern gate_desc idt_table[IDT_ENTRIES];
16541+extern const struct desc_ptr debug_idt_descr;
16542+extern gate_desc debug_idt_table[IDT_ENTRIES];
16543
16544+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16545 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16546 {
16547- return per_cpu(gdt_page, cpu).gdt;
16548+ return cpu_gdt_table[cpu];
16549 }
16550
16551 #ifdef CONFIG_X86_64
16552@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16553 unsigned long base, unsigned dpl, unsigned flags,
16554 unsigned short seg)
16555 {
16556- gate->a = (seg << 16) | (base & 0xffff);
16557- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16558+ gate->gate.offset_low = base;
16559+ gate->gate.seg = seg;
16560+ gate->gate.reserved = 0;
16561+ gate->gate.type = type;
16562+ gate->gate.s = 0;
16563+ gate->gate.dpl = dpl;
16564+ gate->gate.p = 1;
16565+ gate->gate.offset_high = base >> 16;
16566 }
16567
16568 #endif
16569@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16570
16571 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16572 {
16573+ pax_open_kernel();
16574 memcpy(&idt[entry], gate, sizeof(*gate));
16575+ pax_close_kernel();
16576 }
16577
16578 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16579 {
16580+ pax_open_kernel();
16581 memcpy(&ldt[entry], desc, 8);
16582+ pax_close_kernel();
16583 }
16584
16585 static inline void
16586@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16587 default: size = sizeof(*gdt); break;
16588 }
16589
16590+ pax_open_kernel();
16591 memcpy(&gdt[entry], desc, size);
16592+ pax_close_kernel();
16593 }
16594
16595 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16596@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16597
16598 static inline void native_load_tr_desc(void)
16599 {
16600+ pax_open_kernel();
16601 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16602+ pax_close_kernel();
16603 }
16604
16605 static inline void native_load_gdt(const struct desc_ptr *dtr)
16606@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16607 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16608 unsigned int i;
16609
16610+ pax_open_kernel();
16611 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16612 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16613+ pax_close_kernel();
16614 }
16615
16616 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16617@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16618 preempt_enable();
16619 }
16620
16621-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16622+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16623 {
16624 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16625 }
16626@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16627 }
16628
16629 #ifdef CONFIG_X86_64
16630-static inline void set_nmi_gate(int gate, void *addr)
16631+static inline void set_nmi_gate(int gate, const void *addr)
16632 {
16633 gate_desc s;
16634
16635@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16636 #endif
16637
16638 #ifdef CONFIG_TRACING
16639-extern struct desc_ptr trace_idt_descr;
16640-extern gate_desc trace_idt_table[];
16641+extern const struct desc_ptr trace_idt_descr;
16642+extern gate_desc trace_idt_table[IDT_ENTRIES];
16643 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16644 {
16645 write_idt_entry(trace_idt_table, entry, gate);
16646 }
16647
16648-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16649+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16650 unsigned dpl, unsigned ist, unsigned seg)
16651 {
16652 gate_desc s;
16653@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16654 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16655 #endif
16656
16657-static inline void _set_gate(int gate, unsigned type, void *addr,
16658+static inline void _set_gate(int gate, unsigned type, const void *addr,
16659 unsigned dpl, unsigned ist, unsigned seg)
16660 {
16661 gate_desc s;
16662@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16663 #define set_intr_gate(n, addr) \
16664 do { \
16665 BUG_ON((unsigned)n > 0xFF); \
16666- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16667+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16668 __KERNEL_CS); \
16669- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16670+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16671 0, 0, __KERNEL_CS); \
16672 } while (0)
16673
16674@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16675 /*
16676 * This routine sets up an interrupt gate at directory privilege level 3.
16677 */
16678-static inline void set_system_intr_gate(unsigned int n, void *addr)
16679+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16680 {
16681 BUG_ON((unsigned)n > 0xFF);
16682 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16683 }
16684
16685-static inline void set_system_trap_gate(unsigned int n, void *addr)
16686+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16687 {
16688 BUG_ON((unsigned)n > 0xFF);
16689 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16690 }
16691
16692-static inline void set_trap_gate(unsigned int n, void *addr)
16693+static inline void set_trap_gate(unsigned int n, const void *addr)
16694 {
16695 BUG_ON((unsigned)n > 0xFF);
16696 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16697@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16698 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16699 {
16700 BUG_ON((unsigned)n > 0xFF);
16701- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16702+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16703 }
16704
16705-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16706+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16707 {
16708 BUG_ON((unsigned)n > 0xFF);
16709 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16710 }
16711
16712-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16713+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16714 {
16715 BUG_ON((unsigned)n > 0xFF);
16716 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16717@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16718 else
16719 load_idt((const struct desc_ptr *)&idt_descr);
16720 }
16721+
16722+#ifdef CONFIG_X86_32
16723+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16724+{
16725+ struct desc_struct d;
16726+
16727+ if (likely(limit))
16728+ limit = (limit - 1UL) >> PAGE_SHIFT;
16729+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16730+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16731+}
16732+#endif
16733+
16734 #endif /* _ASM_X86_DESC_H */
16735diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16736index 278441f..b95a174 100644
16737--- a/arch/x86/include/asm/desc_defs.h
16738+++ b/arch/x86/include/asm/desc_defs.h
16739@@ -31,6 +31,12 @@ struct desc_struct {
16740 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16741 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16742 };
16743+ struct {
16744+ u16 offset_low;
16745+ u16 seg;
16746+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16747+ unsigned offset_high: 16;
16748+ } gate;
16749 };
16750 } __attribute__((packed));
16751
16752diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16753index ced283a..ffe04cc 100644
16754--- a/arch/x86/include/asm/div64.h
16755+++ b/arch/x86/include/asm/div64.h
16756@@ -39,7 +39,7 @@
16757 __mod; \
16758 })
16759
16760-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16761+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16762 {
16763 union {
16764 u64 v64;
16765diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16766index ca3347a..1a5082a 100644
16767--- a/arch/x86/include/asm/elf.h
16768+++ b/arch/x86/include/asm/elf.h
16769@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16770
16771 #include <asm/vdso.h>
16772
16773-#ifdef CONFIG_X86_64
16774-extern unsigned int vdso64_enabled;
16775-#endif
16776 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16777 extern unsigned int vdso32_enabled;
16778 #endif
16779@@ -249,7 +246,25 @@ extern int force_personality32;
16780 the loader. We need to make sure that it is out of the way of the program
16781 that it will "exec", and that there is sufficient room for the brk. */
16782
16783+#ifdef CONFIG_PAX_SEGMEXEC
16784+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16785+#else
16786 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16787+#endif
16788+
16789+#ifdef CONFIG_PAX_ASLR
16790+#ifdef CONFIG_X86_32
16791+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16792+
16793+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16794+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16795+#else
16796+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16797+
16798+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16799+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16800+#endif
16801+#endif
16802
16803 /* This yields a mask that user programs can use to figure out what
16804 instruction set this CPU supports. This could be done in user space,
16805@@ -298,17 +313,13 @@ do { \
16806
16807 #define ARCH_DLINFO \
16808 do { \
16809- if (vdso64_enabled) \
16810- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16811- (unsigned long __force)current->mm->context.vdso); \
16812+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16813 } while (0)
16814
16815 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16816 #define ARCH_DLINFO_X32 \
16817 do { \
16818- if (vdso64_enabled) \
16819- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16820- (unsigned long __force)current->mm->context.vdso); \
16821+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16822 } while (0)
16823
16824 #define AT_SYSINFO 32
16825@@ -323,10 +334,10 @@ else \
16826
16827 #endif /* !CONFIG_X86_32 */
16828
16829-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16830+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16831
16832 #define VDSO_ENTRY \
16833- ((unsigned long)current->mm->context.vdso + \
16834+ (current->mm->context.vdso + \
16835 selected_vdso32->sym___kernel_vsyscall)
16836
16837 struct linux_binprm;
16838@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16839 int uses_interp);
16840 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16841
16842-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16843-#define arch_randomize_brk arch_randomize_brk
16844-
16845 /*
16846 * True on X86_32 or when emulating IA32 on X86_64
16847 */
16848diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16849index 77a99ac..39ff7f5 100644
16850--- a/arch/x86/include/asm/emergency-restart.h
16851+++ b/arch/x86/include/asm/emergency-restart.h
16852@@ -1,6 +1,6 @@
16853 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16854 #define _ASM_X86_EMERGENCY_RESTART_H
16855
16856-extern void machine_emergency_restart(void);
16857+extern void machine_emergency_restart(void) __noreturn;
16858
16859 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16860diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16861index 1c7eefe..d0e4702 100644
16862--- a/arch/x86/include/asm/floppy.h
16863+++ b/arch/x86/include/asm/floppy.h
16864@@ -229,18 +229,18 @@ static struct fd_routine_l {
16865 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16866 } fd_routine[] = {
16867 {
16868- request_dma,
16869- free_dma,
16870- get_dma_residue,
16871- dma_mem_alloc,
16872- hard_dma_setup
16873+ ._request_dma = request_dma,
16874+ ._free_dma = free_dma,
16875+ ._get_dma_residue = get_dma_residue,
16876+ ._dma_mem_alloc = dma_mem_alloc,
16877+ ._dma_setup = hard_dma_setup
16878 },
16879 {
16880- vdma_request_dma,
16881- vdma_nop,
16882- vdma_get_dma_residue,
16883- vdma_mem_alloc,
16884- vdma_dma_setup
16885+ ._request_dma = vdma_request_dma,
16886+ ._free_dma = vdma_nop,
16887+ ._get_dma_residue = vdma_get_dma_residue,
16888+ ._dma_mem_alloc = vdma_mem_alloc,
16889+ ._dma_setup = vdma_dma_setup
16890 }
16891 };
16892
16893diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16894index 72ba21a..79f3f66 100644
16895--- a/arch/x86/include/asm/fpu-internal.h
16896+++ b/arch/x86/include/asm/fpu-internal.h
16897@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16898 #define user_insn(insn, output, input...) \
16899 ({ \
16900 int err; \
16901+ pax_open_userland(); \
16902 asm volatile(ASM_STAC "\n" \
16903- "1:" #insn "\n\t" \
16904+ "1:" \
16905+ __copyuser_seg \
16906+ #insn "\n\t" \
16907 "2: " ASM_CLAC "\n" \
16908 ".section .fixup,\"ax\"\n" \
16909 "3: movl $-1,%[err]\n" \
16910@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16911 _ASM_EXTABLE(1b, 3b) \
16912 : [err] "=r" (err), output \
16913 : "0"(0), input); \
16914+ pax_close_userland(); \
16915 err; \
16916 })
16917
16918@@ -300,7 +304,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16919 "fnclex\n\t"
16920 "emms\n\t"
16921 "fildl %P[addr]" /* set F?P to defined value */
16922- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16923+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16924 }
16925
16926 return fpu_restore_checking(&tsk->thread.fpu);
16927diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16928index b4c1f54..e290c08 100644
16929--- a/arch/x86/include/asm/futex.h
16930+++ b/arch/x86/include/asm/futex.h
16931@@ -12,6 +12,7 @@
16932 #include <asm/smap.h>
16933
16934 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16935+ typecheck(u32 __user *, uaddr); \
16936 asm volatile("\t" ASM_STAC "\n" \
16937 "1:\t" insn "\n" \
16938 "2:\t" ASM_CLAC "\n" \
16939@@ -20,15 +21,16 @@
16940 "\tjmp\t2b\n" \
16941 "\t.previous\n" \
16942 _ASM_EXTABLE(1b, 3b) \
16943- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16944+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16945 : "i" (-EFAULT), "0" (oparg), "1" (0))
16946
16947 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16948+ typecheck(u32 __user *, uaddr); \
16949 asm volatile("\t" ASM_STAC "\n" \
16950 "1:\tmovl %2, %0\n" \
16951 "\tmovl\t%0, %3\n" \
16952 "\t" insn "\n" \
16953- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16954+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16955 "\tjnz\t1b\n" \
16956 "3:\t" ASM_CLAC "\n" \
16957 "\t.section .fixup,\"ax\"\n" \
16958@@ -38,7 +40,7 @@
16959 _ASM_EXTABLE(1b, 4b) \
16960 _ASM_EXTABLE(2b, 4b) \
16961 : "=&a" (oldval), "=&r" (ret), \
16962- "+m" (*uaddr), "=&r" (tem) \
16963+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16964 : "r" (oparg), "i" (-EFAULT), "1" (0))
16965
16966 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16967@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16968
16969 pagefault_disable();
16970
16971+ pax_open_userland();
16972 switch (op) {
16973 case FUTEX_OP_SET:
16974- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16975+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16976 break;
16977 case FUTEX_OP_ADD:
16978- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16979+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16980 uaddr, oparg);
16981 break;
16982 case FUTEX_OP_OR:
16983@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16984 default:
16985 ret = -ENOSYS;
16986 }
16987+ pax_close_userland();
16988
16989 pagefault_enable();
16990
16991diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
16992index 9662290..49ca5e5 100644
16993--- a/arch/x86/include/asm/hw_irq.h
16994+++ b/arch/x86/include/asm/hw_irq.h
16995@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
16996 #endif /* CONFIG_X86_LOCAL_APIC */
16997
16998 /* Statistics */
16999-extern atomic_t irq_err_count;
17000-extern atomic_t irq_mis_count;
17001+extern atomic_unchecked_t irq_err_count;
17002+extern atomic_unchecked_t irq_mis_count;
17003
17004 /* EISA */
17005 extern void eisa_set_level_irq(unsigned int irq);
17006diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17007index ccffa53..3c90c87 100644
17008--- a/arch/x86/include/asm/i8259.h
17009+++ b/arch/x86/include/asm/i8259.h
17010@@ -62,7 +62,7 @@ struct legacy_pic {
17011 void (*init)(int auto_eoi);
17012 int (*irq_pending)(unsigned int irq);
17013 void (*make_irq)(unsigned int irq);
17014-};
17015+} __do_const;
17016
17017 extern struct legacy_pic *legacy_pic;
17018 extern struct legacy_pic null_legacy_pic;
17019diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17020index 34a5b93..27e40a6 100644
17021--- a/arch/x86/include/asm/io.h
17022+++ b/arch/x86/include/asm/io.h
17023@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17024 "m" (*(volatile type __force *)addr) barrier); }
17025
17026 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17027-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17028-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17029+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17030+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17031
17032 build_mmio_read(__readb, "b", unsigned char, "=q", )
17033-build_mmio_read(__readw, "w", unsigned short, "=r", )
17034-build_mmio_read(__readl, "l", unsigned int, "=r", )
17035+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17036+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17037
17038 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17039 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17040@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17041 * this function
17042 */
17043
17044-static inline phys_addr_t virt_to_phys(volatile void *address)
17045+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17046 {
17047 return __pa(address);
17048 }
17049@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17050 return ioremap_nocache(offset, size);
17051 }
17052
17053-extern void iounmap(volatile void __iomem *addr);
17054+extern void iounmap(const volatile void __iomem *addr);
17055
17056 extern void set_iounmap_nonlazy(void);
17057
17058@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17059
17060 #include <linux/vmalloc.h>
17061
17062+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17063+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17064+{
17065+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17066+}
17067+
17068+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17069+{
17070+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17071+}
17072+
17073 /*
17074 * Convert a virtual cached pointer to an uncached pointer
17075 */
17076diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17077index 0a8b519..80e7d5b 100644
17078--- a/arch/x86/include/asm/irqflags.h
17079+++ b/arch/x86/include/asm/irqflags.h
17080@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17081 sti; \
17082 sysexit
17083
17084+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17085+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17086+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17087+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17088+
17089 #else
17090 #define INTERRUPT_RETURN iret
17091 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17092diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17093index 4421b5d..8543006 100644
17094--- a/arch/x86/include/asm/kprobes.h
17095+++ b/arch/x86/include/asm/kprobes.h
17096@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17097 #define RELATIVEJUMP_SIZE 5
17098 #define RELATIVECALL_OPCODE 0xe8
17099 #define RELATIVE_ADDR_SIZE 4
17100-#define MAX_STACK_SIZE 64
17101-#define MIN_STACK_SIZE(ADDR) \
17102- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17103- THREAD_SIZE - (unsigned long)(ADDR))) \
17104- ? (MAX_STACK_SIZE) \
17105- : (((unsigned long)current_thread_info()) + \
17106- THREAD_SIZE - (unsigned long)(ADDR)))
17107+#define MAX_STACK_SIZE 64UL
17108+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17109
17110 #define flush_insn_slot(p) do { } while (0)
17111
17112diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17113index 4ad6560..75c7bdd 100644
17114--- a/arch/x86/include/asm/local.h
17115+++ b/arch/x86/include/asm/local.h
17116@@ -10,33 +10,97 @@ typedef struct {
17117 atomic_long_t a;
17118 } local_t;
17119
17120+typedef struct {
17121+ atomic_long_unchecked_t a;
17122+} local_unchecked_t;
17123+
17124 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17125
17126 #define local_read(l) atomic_long_read(&(l)->a)
17127+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17128 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17129+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17130
17131 static inline void local_inc(local_t *l)
17132 {
17133- asm volatile(_ASM_INC "%0"
17134+ asm volatile(_ASM_INC "%0\n"
17135+
17136+#ifdef CONFIG_PAX_REFCOUNT
17137+ "jno 0f\n"
17138+ _ASM_DEC "%0\n"
17139+ "int $4\n0:\n"
17140+ _ASM_EXTABLE(0b, 0b)
17141+#endif
17142+
17143+ : "+m" (l->a.counter));
17144+}
17145+
17146+static inline void local_inc_unchecked(local_unchecked_t *l)
17147+{
17148+ asm volatile(_ASM_INC "%0\n"
17149 : "+m" (l->a.counter));
17150 }
17151
17152 static inline void local_dec(local_t *l)
17153 {
17154- asm volatile(_ASM_DEC "%0"
17155+ asm volatile(_ASM_DEC "%0\n"
17156+
17157+#ifdef CONFIG_PAX_REFCOUNT
17158+ "jno 0f\n"
17159+ _ASM_INC "%0\n"
17160+ "int $4\n0:\n"
17161+ _ASM_EXTABLE(0b, 0b)
17162+#endif
17163+
17164+ : "+m" (l->a.counter));
17165+}
17166+
17167+static inline void local_dec_unchecked(local_unchecked_t *l)
17168+{
17169+ asm volatile(_ASM_DEC "%0\n"
17170 : "+m" (l->a.counter));
17171 }
17172
17173 static inline void local_add(long i, local_t *l)
17174 {
17175- asm volatile(_ASM_ADD "%1,%0"
17176+ asm volatile(_ASM_ADD "%1,%0\n"
17177+
17178+#ifdef CONFIG_PAX_REFCOUNT
17179+ "jno 0f\n"
17180+ _ASM_SUB "%1,%0\n"
17181+ "int $4\n0:\n"
17182+ _ASM_EXTABLE(0b, 0b)
17183+#endif
17184+
17185+ : "+m" (l->a.counter)
17186+ : "ir" (i));
17187+}
17188+
17189+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17190+{
17191+ asm volatile(_ASM_ADD "%1,%0\n"
17192 : "+m" (l->a.counter)
17193 : "ir" (i));
17194 }
17195
17196 static inline void local_sub(long i, local_t *l)
17197 {
17198- asm volatile(_ASM_SUB "%1,%0"
17199+ asm volatile(_ASM_SUB "%1,%0\n"
17200+
17201+#ifdef CONFIG_PAX_REFCOUNT
17202+ "jno 0f\n"
17203+ _ASM_ADD "%1,%0\n"
17204+ "int $4\n0:\n"
17205+ _ASM_EXTABLE(0b, 0b)
17206+#endif
17207+
17208+ : "+m" (l->a.counter)
17209+ : "ir" (i));
17210+}
17211+
17212+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17213+{
17214+ asm volatile(_ASM_SUB "%1,%0\n"
17215 : "+m" (l->a.counter)
17216 : "ir" (i));
17217 }
17218@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17219 */
17220 static inline int local_sub_and_test(long i, local_t *l)
17221 {
17222- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17223+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17224 }
17225
17226 /**
17227@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17228 */
17229 static inline int local_dec_and_test(local_t *l)
17230 {
17231- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17232+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17233 }
17234
17235 /**
17236@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17237 */
17238 static inline int local_inc_and_test(local_t *l)
17239 {
17240- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17241+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17242 }
17243
17244 /**
17245@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17246 */
17247 static inline int local_add_negative(long i, local_t *l)
17248 {
17249- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17250+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17251 }
17252
17253 /**
17254@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17255 static inline long local_add_return(long i, local_t *l)
17256 {
17257 long __i = i;
17258+ asm volatile(_ASM_XADD "%0, %1\n"
17259+
17260+#ifdef CONFIG_PAX_REFCOUNT
17261+ "jno 0f\n"
17262+ _ASM_MOV "%0,%1\n"
17263+ "int $4\n0:\n"
17264+ _ASM_EXTABLE(0b, 0b)
17265+#endif
17266+
17267+ : "+r" (i), "+m" (l->a.counter)
17268+ : : "memory");
17269+ return i + __i;
17270+}
17271+
17272+/**
17273+ * local_add_return_unchecked - add and return
17274+ * @i: integer value to add
17275+ * @l: pointer to type local_unchecked_t
17276+ *
17277+ * Atomically adds @i to @l and returns @i + @l
17278+ */
17279+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17280+{
17281+ long __i = i;
17282 asm volatile(_ASM_XADD "%0, %1;"
17283 : "+r" (i), "+m" (l->a.counter)
17284 : : "memory");
17285@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17286
17287 #define local_cmpxchg(l, o, n) \
17288 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17289+#define local_cmpxchg_unchecked(l, o, n) \
17290+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17291 /* Always has a lock prefix */
17292 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17293
17294diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17295new file mode 100644
17296index 0000000..2bfd3ba
17297--- /dev/null
17298+++ b/arch/x86/include/asm/mman.h
17299@@ -0,0 +1,15 @@
17300+#ifndef _X86_MMAN_H
17301+#define _X86_MMAN_H
17302+
17303+#include <uapi/asm/mman.h>
17304+
17305+#ifdef __KERNEL__
17306+#ifndef __ASSEMBLY__
17307+#ifdef CONFIG_X86_32
17308+#define arch_mmap_check i386_mmap_check
17309+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17310+#endif
17311+#endif
17312+#endif
17313+
17314+#endif /* X86_MMAN_H */
17315diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17316index 09b9620..923aecd 100644
17317--- a/arch/x86/include/asm/mmu.h
17318+++ b/arch/x86/include/asm/mmu.h
17319@@ -9,7 +9,7 @@
17320 * we put the segment information here.
17321 */
17322 typedef struct {
17323- void *ldt;
17324+ struct desc_struct *ldt;
17325 int size;
17326
17327 #ifdef CONFIG_X86_64
17328@@ -18,7 +18,19 @@ typedef struct {
17329 #endif
17330
17331 struct mutex lock;
17332- void __user *vdso;
17333+ unsigned long vdso;
17334+
17335+#ifdef CONFIG_X86_32
17336+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17337+ unsigned long user_cs_base;
17338+ unsigned long user_cs_limit;
17339+
17340+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17341+ cpumask_t cpu_user_cs_mask;
17342+#endif
17343+
17344+#endif
17345+#endif
17346
17347 atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
17348 } mm_context_t;
17349diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17350index 883f6b93..6869d96 100644
17351--- a/arch/x86/include/asm/mmu_context.h
17352+++ b/arch/x86/include/asm/mmu_context.h
17353@@ -42,6 +42,20 @@ void destroy_context(struct mm_struct *mm);
17354
17355 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17356 {
17357+
17358+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17359+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17360+ unsigned int i;
17361+ pgd_t *pgd;
17362+
17363+ pax_open_kernel();
17364+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17365+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17366+ set_pgd_batched(pgd+i, native_make_pgd(0));
17367+ pax_close_kernel();
17368+ }
17369+#endif
17370+
17371 #ifdef CONFIG_SMP
17372 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17373 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17374@@ -52,16 +66,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17375 struct task_struct *tsk)
17376 {
17377 unsigned cpu = smp_processor_id();
17378+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17379+ int tlbstate = TLBSTATE_OK;
17380+#endif
17381
17382 if (likely(prev != next)) {
17383 #ifdef CONFIG_SMP
17384+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17385+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17386+#endif
17387 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17388 this_cpu_write(cpu_tlbstate.active_mm, next);
17389 #endif
17390 cpumask_set_cpu(cpu, mm_cpumask(next));
17391
17392 /* Re-load page tables */
17393+#ifdef CONFIG_PAX_PER_CPU_PGD
17394+ pax_open_kernel();
17395+
17396+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17397+ if (static_cpu_has(X86_FEATURE_PCID))
17398+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17399+ else
17400+#endif
17401+
17402+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17403+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17404+ pax_close_kernel();
17405+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17406+
17407+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17408+ if (static_cpu_has(X86_FEATURE_PCID)) {
17409+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17410+ u64 descriptor[2];
17411+ descriptor[0] = PCID_USER;
17412+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17413+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17414+ descriptor[0] = PCID_KERNEL;
17415+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17416+ }
17417+ } else {
17418+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17419+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17420+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17421+ else
17422+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17423+ }
17424+ } else
17425+#endif
17426+
17427+ load_cr3(get_cpu_pgd(cpu, kernel));
17428+#else
17429 load_cr3(next->pgd);
17430+#endif
17431 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17432
17433 /* Stop flush ipis for the previous mm */
17434@@ -84,9 +141,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17435 */
17436 if (unlikely(prev->context.ldt != next->context.ldt))
17437 load_LDT_nolock(&next->context);
17438+
17439+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17440+ if (!(__supported_pte_mask & _PAGE_NX)) {
17441+ smp_mb__before_atomic();
17442+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17443+ smp_mb__after_atomic();
17444+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17445+ }
17446+#endif
17447+
17448+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17449+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17450+ prev->context.user_cs_limit != next->context.user_cs_limit))
17451+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17452+#ifdef CONFIG_SMP
17453+ else if (unlikely(tlbstate != TLBSTATE_OK))
17454+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17455+#endif
17456+#endif
17457+
17458 }
17459+ else {
17460+
17461+#ifdef CONFIG_PAX_PER_CPU_PGD
17462+ pax_open_kernel();
17463+
17464+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17465+ if (static_cpu_has(X86_FEATURE_PCID))
17466+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17467+ else
17468+#endif
17469+
17470+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17471+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17472+ pax_close_kernel();
17473+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17474+
17475+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17476+ if (static_cpu_has(X86_FEATURE_PCID)) {
17477+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17478+ u64 descriptor[2];
17479+ descriptor[0] = PCID_USER;
17480+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17481+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17482+ descriptor[0] = PCID_KERNEL;
17483+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17484+ }
17485+ } else {
17486+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17487+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17488+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17489+ else
17490+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17491+ }
17492+ } else
17493+#endif
17494+
17495+ load_cr3(get_cpu_pgd(cpu, kernel));
17496+#endif
17497+
17498 #ifdef CONFIG_SMP
17499- else {
17500 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17501 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17502
17503@@ -103,13 +218,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17504 * tlb flush IPI delivery. We must reload CR3
17505 * to make sure to use no freed page tables.
17506 */
17507+
17508+#ifndef CONFIG_PAX_PER_CPU_PGD
17509 load_cr3(next->pgd);
17510 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17511+#endif
17512+
17513 load_mm_cr4(next);
17514 load_LDT_nolock(&next->context);
17515+
17516+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17517+ if (!(__supported_pte_mask & _PAGE_NX))
17518+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17519+#endif
17520+
17521+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17522+#ifdef CONFIG_PAX_PAGEEXEC
17523+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17524+#endif
17525+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17526+#endif
17527+
17528 }
17529+#endif
17530 }
17531-#endif
17532 }
17533
17534 #define activate_mm(prev, next) \
17535diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17536index e3b7819..b257c64 100644
17537--- a/arch/x86/include/asm/module.h
17538+++ b/arch/x86/include/asm/module.h
17539@@ -5,6 +5,7 @@
17540
17541 #ifdef CONFIG_X86_64
17542 /* X86_64 does not define MODULE_PROC_FAMILY */
17543+#define MODULE_PROC_FAMILY ""
17544 #elif defined CONFIG_M486
17545 #define MODULE_PROC_FAMILY "486 "
17546 #elif defined CONFIG_M586
17547@@ -57,8 +58,20 @@
17548 #error unknown processor family
17549 #endif
17550
17551-#ifdef CONFIG_X86_32
17552-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17553+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17554+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17555+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17556+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17557+#else
17558+#define MODULE_PAX_KERNEXEC ""
17559 #endif
17560
17561+#ifdef CONFIG_PAX_MEMORY_UDEREF
17562+#define MODULE_PAX_UDEREF "UDEREF "
17563+#else
17564+#define MODULE_PAX_UDEREF ""
17565+#endif
17566+
17567+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17568+
17569 #endif /* _ASM_X86_MODULE_H */
17570diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17571index 5f2fc44..106caa6 100644
17572--- a/arch/x86/include/asm/nmi.h
17573+++ b/arch/x86/include/asm/nmi.h
17574@@ -36,26 +36,35 @@ enum {
17575
17576 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17577
17578+struct nmiaction;
17579+
17580+struct nmiwork {
17581+ const struct nmiaction *action;
17582+ u64 max_duration;
17583+ struct irq_work irq_work;
17584+};
17585+
17586 struct nmiaction {
17587 struct list_head list;
17588 nmi_handler_t handler;
17589- u64 max_duration;
17590- struct irq_work irq_work;
17591 unsigned long flags;
17592 const char *name;
17593-};
17594+ struct nmiwork *work;
17595+} __do_const;
17596
17597 #define register_nmi_handler(t, fn, fg, n, init...) \
17598 ({ \
17599- static struct nmiaction init fn##_na = { \
17600+ static struct nmiwork fn##_nw; \
17601+ static const struct nmiaction init fn##_na = { \
17602 .handler = (fn), \
17603 .name = (n), \
17604 .flags = (fg), \
17605+ .work = &fn##_nw, \
17606 }; \
17607 __register_nmi_handler((t), &fn##_na); \
17608 })
17609
17610-int __register_nmi_handler(unsigned int, struct nmiaction *);
17611+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17612
17613 void unregister_nmi_handler(unsigned int, const char *);
17614
17615diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17616index 802dde3..9183e68 100644
17617--- a/arch/x86/include/asm/page.h
17618+++ b/arch/x86/include/asm/page.h
17619@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17620 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17621
17622 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17623+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17624
17625 #define __boot_va(x) __va(x)
17626 #define __boot_pa(x) __pa(x)
17627@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17628 * virt_to_page(kaddr) returns a valid pointer if and only if
17629 * virt_addr_valid(kaddr) returns true.
17630 */
17631-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17632 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17633 extern bool __virt_addr_valid(unsigned long kaddr);
17634 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17635
17636+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17637+#define virt_to_page(kaddr) \
17638+ ({ \
17639+ const void *__kaddr = (const void *)(kaddr); \
17640+ BUG_ON(!virt_addr_valid(__kaddr)); \
17641+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17642+ })
17643+#else
17644+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17645+#endif
17646+
17647 #endif /* __ASSEMBLY__ */
17648
17649 #include <asm-generic/memory_model.h>
17650diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17651index b3bebf9..13ac22e 100644
17652--- a/arch/x86/include/asm/page_64.h
17653+++ b/arch/x86/include/asm/page_64.h
17654@@ -7,9 +7,9 @@
17655
17656 /* duplicated to the one in bootmem.h */
17657 extern unsigned long max_pfn;
17658-extern unsigned long phys_base;
17659+extern const unsigned long phys_base;
17660
17661-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17662+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17663 {
17664 unsigned long y = x - __START_KERNEL_map;
17665
17666@@ -20,8 +20,8 @@ static inline unsigned long __phys_addr_nodebug(unsigned long x)
17667 }
17668
17669 #ifdef CONFIG_DEBUG_VIRTUAL
17670-extern unsigned long __phys_addr(unsigned long);
17671-extern unsigned long __phys_addr_symbol(unsigned long);
17672+extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
17673+extern unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long);
17674 #else
17675 #define __phys_addr(x) __phys_addr_nodebug(x)
17676 #define __phys_addr_symbol(x) \
17677diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17678index 965c47d..ffe0af8 100644
17679--- a/arch/x86/include/asm/paravirt.h
17680+++ b/arch/x86/include/asm/paravirt.h
17681@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17682 return (pmd_t) { ret };
17683 }
17684
17685-static inline pmdval_t pmd_val(pmd_t pmd)
17686+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17687 {
17688 pmdval_t ret;
17689
17690@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17691 val);
17692 }
17693
17694+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17695+{
17696+ pgdval_t val = native_pgd_val(pgd);
17697+
17698+ if (sizeof(pgdval_t) > sizeof(long))
17699+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17700+ val, (u64)val >> 32);
17701+ else
17702+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17703+ val);
17704+}
17705+
17706 static inline void pgd_clear(pgd_t *pgdp)
17707 {
17708 set_pgd(pgdp, __pgd(0));
17709@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17710 pv_mmu_ops.set_fixmap(idx, phys, flags);
17711 }
17712
17713+#ifdef CONFIG_PAX_KERNEXEC
17714+static inline unsigned long pax_open_kernel(void)
17715+{
17716+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17717+}
17718+
17719+static inline unsigned long pax_close_kernel(void)
17720+{
17721+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17722+}
17723+#else
17724+static inline unsigned long pax_open_kernel(void) { return 0; }
17725+static inline unsigned long pax_close_kernel(void) { return 0; }
17726+#endif
17727+
17728 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17729
17730 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17731@@ -906,7 +933,7 @@ extern void default_banner(void);
17732
17733 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17734 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17735-#define PARA_INDIRECT(addr) *%cs:addr
17736+#define PARA_INDIRECT(addr) *%ss:addr
17737 #endif
17738
17739 #define INTERRUPT_RETURN \
17740@@ -981,6 +1008,21 @@ extern void default_banner(void);
17741 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17742 CLBR_NONE, \
17743 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17744+
17745+#define GET_CR0_INTO_RDI \
17746+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17747+ mov %rax,%rdi
17748+
17749+#define SET_RDI_INTO_CR0 \
17750+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17751+
17752+#define GET_CR3_INTO_RDI \
17753+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17754+ mov %rax,%rdi
17755+
17756+#define SET_RDI_INTO_CR3 \
17757+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17758+
17759 #endif /* CONFIG_X86_32 */
17760
17761 #endif /* __ASSEMBLY__ */
17762diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17763index 7549b8b..f0edfda 100644
17764--- a/arch/x86/include/asm/paravirt_types.h
17765+++ b/arch/x86/include/asm/paravirt_types.h
17766@@ -84,7 +84,7 @@ struct pv_init_ops {
17767 */
17768 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17769 unsigned long addr, unsigned len);
17770-};
17771+} __no_const __no_randomize_layout;
17772
17773
17774 struct pv_lazy_ops {
17775@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17776 void (*enter)(void);
17777 void (*leave)(void);
17778 void (*flush)(void);
17779-};
17780+} __no_randomize_layout;
17781
17782 struct pv_time_ops {
17783 unsigned long long (*sched_clock)(void);
17784 unsigned long long (*steal_clock)(int cpu);
17785 unsigned long (*get_tsc_khz)(void);
17786-};
17787+} __no_const __no_randomize_layout;
17788
17789 struct pv_cpu_ops {
17790 /* hooks for various privileged instructions */
17791@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17792
17793 void (*start_context_switch)(struct task_struct *prev);
17794 void (*end_context_switch)(struct task_struct *next);
17795-};
17796+} __no_const __no_randomize_layout;
17797
17798 struct pv_irq_ops {
17799 /*
17800@@ -215,7 +215,7 @@ struct pv_irq_ops {
17801 #ifdef CONFIG_X86_64
17802 void (*adjust_exception_frame)(void);
17803 #endif
17804-};
17805+} __no_randomize_layout;
17806
17807 struct pv_apic_ops {
17808 #ifdef CONFIG_X86_LOCAL_APIC
17809@@ -223,7 +223,7 @@ struct pv_apic_ops {
17810 unsigned long start_eip,
17811 unsigned long start_esp);
17812 #endif
17813-};
17814+} __no_const __no_randomize_layout;
17815
17816 struct pv_mmu_ops {
17817 unsigned long (*read_cr2)(void);
17818@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17819 struct paravirt_callee_save make_pud;
17820
17821 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17822+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17823 #endif /* PAGETABLE_LEVELS == 4 */
17824 #endif /* PAGETABLE_LEVELS >= 3 */
17825
17826@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17827 an mfn. We can tell which is which from the index. */
17828 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17829 phys_addr_t phys, pgprot_t flags);
17830-};
17831+
17832+#ifdef CONFIG_PAX_KERNEXEC
17833+ unsigned long (*pax_open_kernel)(void);
17834+ unsigned long (*pax_close_kernel)(void);
17835+#endif
17836+
17837+} __no_randomize_layout;
17838
17839 struct arch_spinlock;
17840 #ifdef CONFIG_SMP
17841@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17842 struct pv_lock_ops {
17843 struct paravirt_callee_save lock_spinning;
17844 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17845-};
17846+} __no_randomize_layout;
17847
17848 /* This contains all the paravirt structures: we get a convenient
17849 * number for each function using the offset which we use to indicate
17850- * what to patch. */
17851+ * what to patch.
17852+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17853+ */
17854+
17855 struct paravirt_patch_template {
17856 struct pv_init_ops pv_init_ops;
17857 struct pv_time_ops pv_time_ops;
17858@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17859 struct pv_apic_ops pv_apic_ops;
17860 struct pv_mmu_ops pv_mmu_ops;
17861 struct pv_lock_ops pv_lock_ops;
17862-};
17863+} __no_randomize_layout;
17864
17865 extern struct pv_info pv_info;
17866 extern struct pv_init_ops pv_init_ops;
17867diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17868index c4412e9..90e88c5 100644
17869--- a/arch/x86/include/asm/pgalloc.h
17870+++ b/arch/x86/include/asm/pgalloc.h
17871@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17872 pmd_t *pmd, pte_t *pte)
17873 {
17874 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17875+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17876+}
17877+
17878+static inline void pmd_populate_user(struct mm_struct *mm,
17879+ pmd_t *pmd, pte_t *pte)
17880+{
17881+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17882 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17883 }
17884
17885@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17886
17887 #ifdef CONFIG_X86_PAE
17888 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17889+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17890+{
17891+ pud_populate(mm, pudp, pmd);
17892+}
17893 #else /* !CONFIG_X86_PAE */
17894 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17895 {
17896 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17897 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17898 }
17899+
17900+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17901+{
17902+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17903+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17904+}
17905 #endif /* CONFIG_X86_PAE */
17906
17907 #if PAGETABLE_LEVELS > 3
17908@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17909 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17910 }
17911
17912+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17913+{
17914+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17915+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17916+}
17917+
17918 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17919 {
17920 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17921diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17922index fd74a11..35fd5af 100644
17923--- a/arch/x86/include/asm/pgtable-2level.h
17924+++ b/arch/x86/include/asm/pgtable-2level.h
17925@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17926
17927 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17928 {
17929+ pax_open_kernel();
17930 *pmdp = pmd;
17931+ pax_close_kernel();
17932 }
17933
17934 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17935diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17936index cdaa58c..e61122b 100644
17937--- a/arch/x86/include/asm/pgtable-3level.h
17938+++ b/arch/x86/include/asm/pgtable-3level.h
17939@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17940
17941 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17942 {
17943+ pax_open_kernel();
17944 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17945+ pax_close_kernel();
17946 }
17947
17948 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17949 {
17950+ pax_open_kernel();
17951 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17952+ pax_close_kernel();
17953 }
17954
17955 /*
17956diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17957index a0c35bf..7045c6a 100644
17958--- a/arch/x86/include/asm/pgtable.h
17959+++ b/arch/x86/include/asm/pgtable.h
17960@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17961
17962 #ifndef __PAGETABLE_PUD_FOLDED
17963 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17964+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17965 #define pgd_clear(pgd) native_pgd_clear(pgd)
17966 #endif
17967
17968@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17969
17970 #define arch_end_context_switch(prev) do {} while(0)
17971
17972+#define pax_open_kernel() native_pax_open_kernel()
17973+#define pax_close_kernel() native_pax_close_kernel()
17974 #endif /* CONFIG_PARAVIRT */
17975
17976+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17977+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17978+
17979+#ifdef CONFIG_PAX_KERNEXEC
17980+static inline unsigned long native_pax_open_kernel(void)
17981+{
17982+ unsigned long cr0;
17983+
17984+ preempt_disable();
17985+ barrier();
17986+ cr0 = read_cr0() ^ X86_CR0_WP;
17987+ BUG_ON(cr0 & X86_CR0_WP);
17988+ write_cr0(cr0);
17989+ barrier();
17990+ return cr0 ^ X86_CR0_WP;
17991+}
17992+
17993+static inline unsigned long native_pax_close_kernel(void)
17994+{
17995+ unsigned long cr0;
17996+
17997+ barrier();
17998+ cr0 = read_cr0() ^ X86_CR0_WP;
17999+ BUG_ON(!(cr0 & X86_CR0_WP));
18000+ write_cr0(cr0);
18001+ barrier();
18002+ preempt_enable_no_resched();
18003+ return cr0 ^ X86_CR0_WP;
18004+}
18005+#else
18006+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18007+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18008+#endif
18009+
18010 /*
18011 * The following only work if pte_present() is true.
18012 * Undefined behaviour if not..
18013 */
18014+static inline int pte_user(pte_t pte)
18015+{
18016+ return pte_val(pte) & _PAGE_USER;
18017+}
18018+
18019 static inline int pte_dirty(pte_t pte)
18020 {
18021 return pte_flags(pte) & _PAGE_DIRTY;
18022@@ -150,6 +192,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18023 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18024 }
18025
18026+static inline unsigned long pgd_pfn(pgd_t pgd)
18027+{
18028+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18029+}
18030+
18031 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18032
18033 static inline int pmd_large(pmd_t pte)
18034@@ -203,9 +250,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18035 return pte_clear_flags(pte, _PAGE_RW);
18036 }
18037
18038+static inline pte_t pte_mkread(pte_t pte)
18039+{
18040+ return __pte(pte_val(pte) | _PAGE_USER);
18041+}
18042+
18043 static inline pte_t pte_mkexec(pte_t pte)
18044 {
18045- return pte_clear_flags(pte, _PAGE_NX);
18046+#ifdef CONFIG_X86_PAE
18047+ if (__supported_pte_mask & _PAGE_NX)
18048+ return pte_clear_flags(pte, _PAGE_NX);
18049+ else
18050+#endif
18051+ return pte_set_flags(pte, _PAGE_USER);
18052+}
18053+
18054+static inline pte_t pte_exprotect(pte_t pte)
18055+{
18056+#ifdef CONFIG_X86_PAE
18057+ if (__supported_pte_mask & _PAGE_NX)
18058+ return pte_set_flags(pte, _PAGE_NX);
18059+ else
18060+#endif
18061+ return pte_clear_flags(pte, _PAGE_USER);
18062 }
18063
18064 static inline pte_t pte_mkdirty(pte_t pte)
18065@@ -420,6 +487,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18066 #endif
18067
18068 #ifndef __ASSEMBLY__
18069+
18070+#ifdef CONFIG_PAX_PER_CPU_PGD
18071+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18072+enum cpu_pgd_type {kernel = 0, user = 1};
18073+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18074+{
18075+ return cpu_pgd[cpu][type];
18076+}
18077+#endif
18078+
18079 #include <linux/mm_types.h>
18080 #include <linux/mmdebug.h>
18081 #include <linux/log2.h>
18082@@ -571,7 +648,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18083 * Currently stuck as a macro due to indirect forward reference to
18084 * linux/mmzone.h's __section_mem_map_addr() definition:
18085 */
18086-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18087+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18088
18089 /* Find an entry in the second-level page table.. */
18090 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18091@@ -611,7 +688,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18092 * Currently stuck as a macro due to indirect forward reference to
18093 * linux/mmzone.h's __section_mem_map_addr() definition:
18094 */
18095-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18096+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18097
18098 /* to find an entry in a page-table-directory. */
18099 static inline unsigned long pud_index(unsigned long address)
18100@@ -626,7 +703,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18101
18102 static inline int pgd_bad(pgd_t pgd)
18103 {
18104- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18105+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18106 }
18107
18108 static inline int pgd_none(pgd_t pgd)
18109@@ -649,7 +726,12 @@ static inline int pgd_none(pgd_t pgd)
18110 * pgd_offset() returns a (pgd_t *)
18111 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18112 */
18113-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18114+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18115+
18116+#ifdef CONFIG_PAX_PER_CPU_PGD
18117+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18118+#endif
18119+
18120 /*
18121 * a shortcut which implies the use of the kernel's pgd, instead
18122 * of a process's
18123@@ -660,6 +742,25 @@ static inline int pgd_none(pgd_t pgd)
18124 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18125 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18126
18127+#ifdef CONFIG_X86_32
18128+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18129+#else
18130+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18131+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18132+
18133+#ifdef CONFIG_PAX_MEMORY_UDEREF
18134+#ifdef __ASSEMBLY__
18135+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18136+#else
18137+extern unsigned long pax_user_shadow_base;
18138+extern pgdval_t clone_pgd_mask;
18139+#endif
18140+#else
18141+#define pax_user_shadow_base (0UL)
18142+#endif
18143+
18144+#endif
18145+
18146 #ifndef __ASSEMBLY__
18147
18148 extern int direct_gbpages;
18149@@ -826,11 +927,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18150 * dst and src can be on the same page, but the range must not overlap,
18151 * and must not cross a page boundary.
18152 */
18153-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18154+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18155 {
18156- memcpy(dst, src, count * sizeof(pgd_t));
18157+ pax_open_kernel();
18158+ while (count--)
18159+ *dst++ = *src++;
18160+ pax_close_kernel();
18161 }
18162
18163+#ifdef CONFIG_PAX_PER_CPU_PGD
18164+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18165+#endif
18166+
18167+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18168+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18169+#else
18170+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18171+#endif
18172+
18173 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18174 static inline int page_level_shift(enum pg_level level)
18175 {
18176diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18177index b6c0b40..3535d47 100644
18178--- a/arch/x86/include/asm/pgtable_32.h
18179+++ b/arch/x86/include/asm/pgtable_32.h
18180@@ -25,9 +25,6 @@
18181 struct mm_struct;
18182 struct vm_area_struct;
18183
18184-extern pgd_t swapper_pg_dir[1024];
18185-extern pgd_t initial_page_table[1024];
18186-
18187 static inline void pgtable_cache_init(void) { }
18188 static inline void check_pgt_cache(void) { }
18189 void paging_init(void);
18190@@ -45,6 +42,12 @@ void paging_init(void);
18191 # include <asm/pgtable-2level.h>
18192 #endif
18193
18194+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18195+extern pgd_t initial_page_table[PTRS_PER_PGD];
18196+#ifdef CONFIG_X86_PAE
18197+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18198+#endif
18199+
18200 #if defined(CONFIG_HIGHPTE)
18201 #define pte_offset_map(dir, address) \
18202 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18203@@ -59,12 +62,17 @@ void paging_init(void);
18204 /* Clear a kernel PTE and flush it from the TLB */
18205 #define kpte_clear_flush(ptep, vaddr) \
18206 do { \
18207+ pax_open_kernel(); \
18208 pte_clear(&init_mm, (vaddr), (ptep)); \
18209+ pax_close_kernel(); \
18210 __flush_tlb_one((vaddr)); \
18211 } while (0)
18212
18213 #endif /* !__ASSEMBLY__ */
18214
18215+#define HAVE_ARCH_UNMAPPED_AREA
18216+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18217+
18218 /*
18219 * kern_addr_valid() is (1) for FLATMEM and (0) for
18220 * SPARSEMEM and DISCONTIGMEM
18221diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18222index 9fb2f2b..b04b4bf 100644
18223--- a/arch/x86/include/asm/pgtable_32_types.h
18224+++ b/arch/x86/include/asm/pgtable_32_types.h
18225@@ -8,7 +8,7 @@
18226 */
18227 #ifdef CONFIG_X86_PAE
18228 # include <asm/pgtable-3level_types.h>
18229-# define PMD_SIZE (1UL << PMD_SHIFT)
18230+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18231 # define PMD_MASK (~(PMD_SIZE - 1))
18232 #else
18233 # include <asm/pgtable-2level_types.h>
18234@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18235 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18236 #endif
18237
18238+#ifdef CONFIG_PAX_KERNEXEC
18239+#ifndef __ASSEMBLY__
18240+extern unsigned char MODULES_EXEC_VADDR[];
18241+extern unsigned char MODULES_EXEC_END[];
18242+#endif
18243+#include <asm/boot.h>
18244+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18245+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18246+#else
18247+#define ktla_ktva(addr) (addr)
18248+#define ktva_ktla(addr) (addr)
18249+#endif
18250+
18251 #define MODULES_VADDR VMALLOC_START
18252 #define MODULES_END VMALLOC_END
18253 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18254diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18255index 2ee7811..db41d8c 100644
18256--- a/arch/x86/include/asm/pgtable_64.h
18257+++ b/arch/x86/include/asm/pgtable_64.h
18258@@ -16,11 +16,16 @@
18259
18260 extern pud_t level3_kernel_pgt[512];
18261 extern pud_t level3_ident_pgt[512];
18262+extern pud_t level3_vmalloc_start_pgt[512];
18263+extern pud_t level3_vmalloc_end_pgt[512];
18264+extern pud_t level3_vmemmap_pgt[512];
18265+extern pud_t level2_vmemmap_pgt[512];
18266 extern pmd_t level2_kernel_pgt[512];
18267 extern pmd_t level2_fixmap_pgt[512];
18268-extern pmd_t level2_ident_pgt[512];
18269+extern pmd_t level2_ident_pgt[512*2];
18270 extern pte_t level1_fixmap_pgt[512];
18271-extern pgd_t init_level4_pgt[];
18272+extern pte_t level1_vsyscall_pgt[512];
18273+extern pgd_t init_level4_pgt[512];
18274
18275 #define swapper_pg_dir init_level4_pgt
18276
18277@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18278
18279 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18280 {
18281+ pax_open_kernel();
18282 *pmdp = pmd;
18283+ pax_close_kernel();
18284 }
18285
18286 static inline void native_pmd_clear(pmd_t *pmd)
18287@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18288
18289 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18290 {
18291+ pax_open_kernel();
18292 *pudp = pud;
18293+ pax_close_kernel();
18294 }
18295
18296 static inline void native_pud_clear(pud_t *pud)
18297@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18298
18299 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18300 {
18301+ pax_open_kernel();
18302+ *pgdp = pgd;
18303+ pax_close_kernel();
18304+}
18305+
18306+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18307+{
18308 *pgdp = pgd;
18309 }
18310
18311diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18312index 602b602..acb53ed 100644
18313--- a/arch/x86/include/asm/pgtable_64_types.h
18314+++ b/arch/x86/include/asm/pgtable_64_types.h
18315@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18316 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18317 #define MODULES_END _AC(0xffffffffff000000, UL)
18318 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18319+#define MODULES_EXEC_VADDR MODULES_VADDR
18320+#define MODULES_EXEC_END MODULES_END
18321 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18322 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18323 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18324 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18325
18326+#define ktla_ktva(addr) (addr)
18327+#define ktva_ktla(addr) (addr)
18328+
18329 #define EARLY_DYNAMIC_PAGE_TABLES 64
18330
18331 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18332diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18333index 8c7c108..1c1b77f 100644
18334--- a/arch/x86/include/asm/pgtable_types.h
18335+++ b/arch/x86/include/asm/pgtable_types.h
18336@@ -85,8 +85,10 @@
18337
18338 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18339 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18340-#else
18341+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18342 #define _PAGE_NX (_AT(pteval_t, 0))
18343+#else
18344+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18345 #endif
18346
18347 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
18348@@ -141,6 +143,9 @@ enum page_cache_mode {
18349 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18350 _PAGE_ACCESSED)
18351
18352+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18353+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18354+
18355 #define __PAGE_KERNEL_EXEC \
18356 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18357 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18358@@ -148,7 +153,7 @@ enum page_cache_mode {
18359 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18360 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18361 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18362-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18363+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18364 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18365 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18366 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18367@@ -194,7 +199,7 @@ enum page_cache_mode {
18368 #ifdef CONFIG_X86_64
18369 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18370 #else
18371-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18372+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18373 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18374 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18375 #endif
18376@@ -233,7 +238,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18377 {
18378 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18379 }
18380+#endif
18381
18382+#if PAGETABLE_LEVELS == 3
18383+#include <asm-generic/pgtable-nopud.h>
18384+#endif
18385+
18386+#if PAGETABLE_LEVELS == 2
18387+#include <asm-generic/pgtable-nopmd.h>
18388+#endif
18389+
18390+#ifndef __ASSEMBLY__
18391 #if PAGETABLE_LEVELS > 3
18392 typedef struct { pudval_t pud; } pud_t;
18393
18394@@ -247,8 +262,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18395 return pud.pud;
18396 }
18397 #else
18398-#include <asm-generic/pgtable-nopud.h>
18399-
18400 static inline pudval_t native_pud_val(pud_t pud)
18401 {
18402 return native_pgd_val(pud.pgd);
18403@@ -268,8 +281,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18404 return pmd.pmd;
18405 }
18406 #else
18407-#include <asm-generic/pgtable-nopmd.h>
18408-
18409 static inline pmdval_t native_pmd_val(pmd_t pmd)
18410 {
18411 return native_pgd_val(pmd.pud.pgd);
18412@@ -362,7 +373,6 @@ typedef struct page *pgtable_t;
18413
18414 extern pteval_t __supported_pte_mask;
18415 extern void set_nx(void);
18416-extern int nx_enabled;
18417
18418 #define pgprot_writecombine pgprot_writecombine
18419 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18420diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18421index 8f327184..368fb29 100644
18422--- a/arch/x86/include/asm/preempt.h
18423+++ b/arch/x86/include/asm/preempt.h
18424@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18425 */
18426 static __always_inline bool __preempt_count_dec_and_test(void)
18427 {
18428- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18429+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18430 }
18431
18432 /*
18433diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18434index ec1c935..5cc6023 100644
18435--- a/arch/x86/include/asm/processor.h
18436+++ b/arch/x86/include/asm/processor.h
18437@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18438 /* Index into per_cpu list: */
18439 u16 cpu_index;
18440 u32 microcode;
18441-};
18442+} __randomize_layout;
18443
18444 #define X86_VENDOR_INTEL 0
18445 #define X86_VENDOR_CYRIX 1
18446@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18447 : "memory");
18448 }
18449
18450+/* invpcid (%rdx),%rax */
18451+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18452+
18453+#define INVPCID_SINGLE_ADDRESS 0UL
18454+#define INVPCID_SINGLE_CONTEXT 1UL
18455+#define INVPCID_ALL_GLOBAL 2UL
18456+#define INVPCID_ALL_NONGLOBAL 3UL
18457+
18458+#define PCID_KERNEL 0UL
18459+#define PCID_USER 1UL
18460+#define PCID_NOFLUSH (1UL << 63)
18461+
18462 static inline void load_cr3(pgd_t *pgdir)
18463 {
18464- write_cr3(__pa(pgdir));
18465+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18466 }
18467
18468 #ifdef CONFIG_X86_32
18469@@ -282,7 +294,7 @@ struct tss_struct {
18470
18471 } ____cacheline_aligned;
18472
18473-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18474+extern struct tss_struct init_tss[NR_CPUS];
18475
18476 /*
18477 * Save the original ist values for checking stack pointers during debugging
18478@@ -479,6 +491,7 @@ struct thread_struct {
18479 unsigned short ds;
18480 unsigned short fsindex;
18481 unsigned short gsindex;
18482+ unsigned short ss;
18483 #endif
18484 #ifdef CONFIG_X86_32
18485 unsigned long ip;
18486@@ -805,11 +818,18 @@ static inline void spin_lock_prefetch(const void *x)
18487 */
18488 #define TASK_SIZE PAGE_OFFSET
18489 #define TASK_SIZE_MAX TASK_SIZE
18490+
18491+#ifdef CONFIG_PAX_SEGMEXEC
18492+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18493+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18494+#else
18495 #define STACK_TOP TASK_SIZE
18496-#define STACK_TOP_MAX STACK_TOP
18497+#endif
18498+
18499+#define STACK_TOP_MAX TASK_SIZE
18500
18501 #define INIT_THREAD { \
18502- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18503+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18504 .vm86_info = NULL, \
18505 .sysenter_cs = __KERNEL_CS, \
18506 .io_bitmap_ptr = NULL, \
18507@@ -823,7 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
18508 */
18509 #define INIT_TSS { \
18510 .x86_tss = { \
18511- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18512+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18513 .ss0 = __KERNEL_DS, \
18514 .ss1 = __KERNEL_CS, \
18515 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18516@@ -834,11 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
18517 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18518
18519 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18520-#define KSTK_TOP(info) \
18521-({ \
18522- unsigned long *__ptr = (unsigned long *)(info); \
18523- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18524-})
18525+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18526
18527 /*
18528 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18529@@ -853,7 +869,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18530 #define task_pt_regs(task) \
18531 ({ \
18532 struct pt_regs *__regs__; \
18533- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18534+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18535 __regs__ - 1; \
18536 })
18537
18538@@ -869,13 +885,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18539 * particular problem by preventing anything from being mapped
18540 * at the maximum canonical address.
18541 */
18542-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18543+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18544
18545 /* This decides where the kernel will search for a free chunk of vm
18546 * space during mmap's.
18547 */
18548 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18549- 0xc0000000 : 0xFFFFe000)
18550+ 0xc0000000 : 0xFFFFf000)
18551
18552 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18553 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18554@@ -886,11 +902,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18555 #define STACK_TOP_MAX TASK_SIZE_MAX
18556
18557 #define INIT_THREAD { \
18558- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18559+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18560 }
18561
18562 #define INIT_TSS { \
18563- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18564+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18565 }
18566
18567 /*
18568@@ -918,6 +934,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18569 */
18570 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18571
18572+#ifdef CONFIG_PAX_SEGMEXEC
18573+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18574+#endif
18575+
18576 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18577
18578 /* Get/set a process' ability to use the timestamp counter instruction */
18579@@ -962,7 +982,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18580 return 0;
18581 }
18582
18583-extern unsigned long arch_align_stack(unsigned long sp);
18584+#define arch_align_stack(x) ((x) & ~0xfUL)
18585 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18586
18587 void default_idle(void);
18588@@ -972,6 +992,6 @@ bool xen_set_default_idle(void);
18589 #define xen_set_default_idle 0
18590 #endif
18591
18592-void stop_this_cpu(void *dummy);
18593+void stop_this_cpu(void *dummy) __noreturn;
18594 void df_debug(struct pt_regs *regs, long error_code);
18595 #endif /* _ASM_X86_PROCESSOR_H */
18596diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18597index 86fc2bb..bd5049a 100644
18598--- a/arch/x86/include/asm/ptrace.h
18599+++ b/arch/x86/include/asm/ptrace.h
18600@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18601 }
18602
18603 /*
18604- * user_mode_vm(regs) determines whether a register set came from user mode.
18605+ * user_mode(regs) determines whether a register set came from user mode.
18606 * This is true if V8086 mode was enabled OR if the register set was from
18607 * protected mode with RPL-3 CS value. This tricky test checks that with
18608 * one comparison. Many places in the kernel can bypass this full check
18609- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18610+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18611+ * be used.
18612 */
18613-static inline int user_mode(struct pt_regs *regs)
18614+static inline int user_mode_novm(struct pt_regs *regs)
18615 {
18616 #ifdef CONFIG_X86_32
18617 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18618 #else
18619- return !!(regs->cs & 3);
18620+ return !!(regs->cs & SEGMENT_RPL_MASK);
18621 #endif
18622 }
18623
18624-static inline int user_mode_vm(struct pt_regs *regs)
18625+static inline int user_mode(struct pt_regs *regs)
18626 {
18627 #ifdef CONFIG_X86_32
18628 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18629 USER_RPL;
18630 #else
18631- return user_mode(regs);
18632+ return user_mode_novm(regs);
18633 #endif
18634 }
18635
18636@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18637 #ifdef CONFIG_X86_64
18638 static inline bool user_64bit_mode(struct pt_regs *regs)
18639 {
18640+ unsigned long cs = regs->cs & 0xffff;
18641 #ifndef CONFIG_PARAVIRT
18642 /*
18643 * On non-paravirt systems, this is the only long mode CPL 3
18644 * selector. We do not allow long mode selectors in the LDT.
18645 */
18646- return regs->cs == __USER_CS;
18647+ return cs == __USER_CS;
18648 #else
18649 /* Headers are too twisted for this to go in paravirt.h. */
18650- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18651+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18652 #endif
18653 }
18654
18655@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18656 * Traps from the kernel do not save sp and ss.
18657 * Use the helper function to retrieve sp.
18658 */
18659- if (offset == offsetof(struct pt_regs, sp) &&
18660- regs->cs == __KERNEL_CS)
18661- return kernel_stack_pointer(regs);
18662+ if (offset == offsetof(struct pt_regs, sp)) {
18663+ unsigned long cs = regs->cs & 0xffff;
18664+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18665+ return kernel_stack_pointer(regs);
18666+ }
18667 #endif
18668 return *(unsigned long *)((unsigned long)regs + offset);
18669 }
18670diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18671index ae0e241..e80b10b 100644
18672--- a/arch/x86/include/asm/qrwlock.h
18673+++ b/arch/x86/include/asm/qrwlock.h
18674@@ -7,8 +7,8 @@
18675 #define queue_write_unlock queue_write_unlock
18676 static inline void queue_write_unlock(struct qrwlock *lock)
18677 {
18678- barrier();
18679- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18680+ barrier();
18681+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18682 }
18683 #endif
18684
18685diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18686index 9c6b890..5305f53 100644
18687--- a/arch/x86/include/asm/realmode.h
18688+++ b/arch/x86/include/asm/realmode.h
18689@@ -22,16 +22,14 @@ struct real_mode_header {
18690 #endif
18691 /* APM/BIOS reboot */
18692 u32 machine_real_restart_asm;
18693-#ifdef CONFIG_X86_64
18694 u32 machine_real_restart_seg;
18695-#endif
18696 };
18697
18698 /* This must match data at trampoline_32/64.S */
18699 struct trampoline_header {
18700 #ifdef CONFIG_X86_32
18701 u32 start;
18702- u16 gdt_pad;
18703+ u16 boot_cs;
18704 u16 gdt_limit;
18705 u32 gdt_base;
18706 #else
18707diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18708index a82c4f1..ac45053 100644
18709--- a/arch/x86/include/asm/reboot.h
18710+++ b/arch/x86/include/asm/reboot.h
18711@@ -6,13 +6,13 @@
18712 struct pt_regs;
18713
18714 struct machine_ops {
18715- void (*restart)(char *cmd);
18716- void (*halt)(void);
18717- void (*power_off)(void);
18718+ void (* __noreturn restart)(char *cmd);
18719+ void (* __noreturn halt)(void);
18720+ void (* __noreturn power_off)(void);
18721 void (*shutdown)(void);
18722 void (*crash_shutdown)(struct pt_regs *);
18723- void (*emergency_restart)(void);
18724-};
18725+ void (* __noreturn emergency_restart)(void);
18726+} __no_const;
18727
18728 extern struct machine_ops machine_ops;
18729
18730diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18731index 8f7866a..e442f20 100644
18732--- a/arch/x86/include/asm/rmwcc.h
18733+++ b/arch/x86/include/asm/rmwcc.h
18734@@ -3,7 +3,34 @@
18735
18736 #ifdef CC_HAVE_ASM_GOTO
18737
18738-#define __GEN_RMWcc(fullop, var, cc, ...) \
18739+#ifdef CONFIG_PAX_REFCOUNT
18740+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18741+do { \
18742+ asm_volatile_goto (fullop \
18743+ ";jno 0f\n" \
18744+ fullantiop \
18745+ ";int $4\n0:\n" \
18746+ _ASM_EXTABLE(0b, 0b) \
18747+ ";j" cc " %l[cc_label]" \
18748+ : : "m" (var), ## __VA_ARGS__ \
18749+ : "memory" : cc_label); \
18750+ return 0; \
18751+cc_label: \
18752+ return 1; \
18753+} while (0)
18754+#else
18755+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18756+do { \
18757+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18758+ : : "m" (var), ## __VA_ARGS__ \
18759+ : "memory" : cc_label); \
18760+ return 0; \
18761+cc_label: \
18762+ return 1; \
18763+} while (0)
18764+#endif
18765+
18766+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18767 do { \
18768 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18769 : : "m" (var), ## __VA_ARGS__ \
18770@@ -13,15 +40,46 @@ cc_label: \
18771 return 1; \
18772 } while (0)
18773
18774-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18775- __GEN_RMWcc(op " " arg0, var, cc)
18776+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18777+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18778
18779-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18780- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18781+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18782+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18783+
18784+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18785+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18786+
18787+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18788+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18789
18790 #else /* !CC_HAVE_ASM_GOTO */
18791
18792-#define __GEN_RMWcc(fullop, var, cc, ...) \
18793+#ifdef CONFIG_PAX_REFCOUNT
18794+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18795+do { \
18796+ char c; \
18797+ asm volatile (fullop \
18798+ ";jno 0f\n" \
18799+ fullantiop \
18800+ ";int $4\n0:\n" \
18801+ _ASM_EXTABLE(0b, 0b) \
18802+ "; set" cc " %1" \
18803+ : "+m" (var), "=qm" (c) \
18804+ : __VA_ARGS__ : "memory"); \
18805+ return c != 0; \
18806+} while (0)
18807+#else
18808+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18809+do { \
18810+ char c; \
18811+ asm volatile (fullop "; set" cc " %1" \
18812+ : "+m" (var), "=qm" (c) \
18813+ : __VA_ARGS__ : "memory"); \
18814+ return c != 0; \
18815+} while (0)
18816+#endif
18817+
18818+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18819 do { \
18820 char c; \
18821 asm volatile (fullop "; set" cc " %1" \
18822@@ -30,11 +88,17 @@ do { \
18823 return c != 0; \
18824 } while (0)
18825
18826-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18827- __GEN_RMWcc(op " " arg0, var, cc)
18828+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18829+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18830+
18831+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18832+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18833+
18834+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18835+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18836
18837-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18838- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18839+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18840+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18841
18842 #endif /* CC_HAVE_ASM_GOTO */
18843
18844diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18845index cad82c9..2e5c5c1 100644
18846--- a/arch/x86/include/asm/rwsem.h
18847+++ b/arch/x86/include/asm/rwsem.h
18848@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18849 {
18850 asm volatile("# beginning down_read\n\t"
18851 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18852+
18853+#ifdef CONFIG_PAX_REFCOUNT
18854+ "jno 0f\n"
18855+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18856+ "int $4\n0:\n"
18857+ _ASM_EXTABLE(0b, 0b)
18858+#endif
18859+
18860 /* adds 0x00000001 */
18861 " jns 1f\n"
18862 " call call_rwsem_down_read_failed\n"
18863@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18864 "1:\n\t"
18865 " mov %1,%2\n\t"
18866 " add %3,%2\n\t"
18867+
18868+#ifdef CONFIG_PAX_REFCOUNT
18869+ "jno 0f\n"
18870+ "sub %3,%2\n"
18871+ "int $4\n0:\n"
18872+ _ASM_EXTABLE(0b, 0b)
18873+#endif
18874+
18875 " jle 2f\n\t"
18876 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18877 " jnz 1b\n\t"
18878@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18879 long tmp;
18880 asm volatile("# beginning down_write\n\t"
18881 LOCK_PREFIX " xadd %1,(%2)\n\t"
18882+
18883+#ifdef CONFIG_PAX_REFCOUNT
18884+ "jno 0f\n"
18885+ "mov %1,(%2)\n"
18886+ "int $4\n0:\n"
18887+ _ASM_EXTABLE(0b, 0b)
18888+#endif
18889+
18890 /* adds 0xffff0001, returns the old value */
18891 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18892 /* was the active mask 0 before? */
18893@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18894 long tmp;
18895 asm volatile("# beginning __up_read\n\t"
18896 LOCK_PREFIX " xadd %1,(%2)\n\t"
18897+
18898+#ifdef CONFIG_PAX_REFCOUNT
18899+ "jno 0f\n"
18900+ "mov %1,(%2)\n"
18901+ "int $4\n0:\n"
18902+ _ASM_EXTABLE(0b, 0b)
18903+#endif
18904+
18905 /* subtracts 1, returns the old value */
18906 " jns 1f\n\t"
18907 " call call_rwsem_wake\n" /* expects old value in %edx */
18908@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18909 long tmp;
18910 asm volatile("# beginning __up_write\n\t"
18911 LOCK_PREFIX " xadd %1,(%2)\n\t"
18912+
18913+#ifdef CONFIG_PAX_REFCOUNT
18914+ "jno 0f\n"
18915+ "mov %1,(%2)\n"
18916+ "int $4\n0:\n"
18917+ _ASM_EXTABLE(0b, 0b)
18918+#endif
18919+
18920 /* subtracts 0xffff0001, returns the old value */
18921 " jns 1f\n\t"
18922 " call call_rwsem_wake\n" /* expects old value in %edx */
18923@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18924 {
18925 asm volatile("# beginning __downgrade_write\n\t"
18926 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18927+
18928+#ifdef CONFIG_PAX_REFCOUNT
18929+ "jno 0f\n"
18930+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18931+ "int $4\n0:\n"
18932+ _ASM_EXTABLE(0b, 0b)
18933+#endif
18934+
18935 /*
18936 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18937 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18938@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18939 */
18940 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18941 {
18942- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18943+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18944+
18945+#ifdef CONFIG_PAX_REFCOUNT
18946+ "jno 0f\n"
18947+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18948+ "int $4\n0:\n"
18949+ _ASM_EXTABLE(0b, 0b)
18950+#endif
18951+
18952 : "+m" (sem->count)
18953 : "er" (delta));
18954 }
18955@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18956 */
18957 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
18958 {
18959- return delta + xadd(&sem->count, delta);
18960+ return delta + xadd_check_overflow(&sem->count, delta);
18961 }
18962
18963 #endif /* __KERNEL__ */
18964diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
18965index db257a5..b91bc77 100644
18966--- a/arch/x86/include/asm/segment.h
18967+++ b/arch/x86/include/asm/segment.h
18968@@ -73,10 +73,15 @@
18969 * 26 - ESPFIX small SS
18970 * 27 - per-cpu [ offset to per-cpu data area ]
18971 * 28 - stack_canary-20 [ for stack protector ]
18972- * 29 - unused
18973- * 30 - unused
18974+ * 29 - PCI BIOS CS
18975+ * 30 - PCI BIOS DS
18976 * 31 - TSS for double fault handler
18977 */
18978+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
18979+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
18980+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
18981+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
18982+
18983 #define GDT_ENTRY_TLS_MIN 6
18984 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
18985
18986@@ -88,6 +93,8 @@
18987
18988 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
18989
18990+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
18991+
18992 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
18993
18994 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
18995@@ -113,6 +120,12 @@
18996 #define __KERNEL_STACK_CANARY 0
18997 #endif
18998
18999+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19000+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19001+
19002+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19003+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19004+
19005 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19006
19007 /*
19008@@ -140,7 +153,7 @@
19009 */
19010
19011 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19012-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19013+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19014
19015
19016 #else
19017@@ -164,6 +177,8 @@
19018 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19019 #define __USER32_DS __USER_DS
19020
19021+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19022+
19023 #define GDT_ENTRY_TSS 8 /* needs two entries */
19024 #define GDT_ENTRY_LDT 10 /* needs two entries */
19025 #define GDT_ENTRY_TLS_MIN 12
19026@@ -172,6 +187,8 @@
19027 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19028 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19029
19030+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19031+
19032 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19033 #define FS_TLS 0
19034 #define GS_TLS 1
19035@@ -179,12 +196,14 @@
19036 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19037 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19038
19039-#define GDT_ENTRIES 16
19040+#define GDT_ENTRIES 17
19041
19042 #endif
19043
19044 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19045+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19046 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19047+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19048 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19049 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19050 #ifndef CONFIG_PARAVIRT
19051@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
19052 {
19053 unsigned long __limit;
19054 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19055- return __limit + 1;
19056+ return __limit;
19057 }
19058
19059 #endif /* !__ASSEMBLY__ */
19060diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19061index 8d3120f..352b440 100644
19062--- a/arch/x86/include/asm/smap.h
19063+++ b/arch/x86/include/asm/smap.h
19064@@ -25,11 +25,40 @@
19065
19066 #include <asm/alternative-asm.h>
19067
19068+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19069+#define ASM_PAX_OPEN_USERLAND \
19070+ 661: jmp 663f; \
19071+ .pushsection .altinstr_replacement, "a" ; \
19072+ 662: pushq %rax; nop; \
19073+ .popsection ; \
19074+ .pushsection .altinstructions, "a" ; \
19075+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19076+ .popsection ; \
19077+ call __pax_open_userland; \
19078+ popq %rax; \
19079+ 663:
19080+
19081+#define ASM_PAX_CLOSE_USERLAND \
19082+ 661: jmp 663f; \
19083+ .pushsection .altinstr_replacement, "a" ; \
19084+ 662: pushq %rax; nop; \
19085+ .popsection; \
19086+ .pushsection .altinstructions, "a" ; \
19087+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19088+ .popsection; \
19089+ call __pax_close_userland; \
19090+ popq %rax; \
19091+ 663:
19092+#else
19093+#define ASM_PAX_OPEN_USERLAND
19094+#define ASM_PAX_CLOSE_USERLAND
19095+#endif
19096+
19097 #ifdef CONFIG_X86_SMAP
19098
19099 #define ASM_CLAC \
19100 661: ASM_NOP3 ; \
19101- .pushsection .altinstr_replacement, "ax" ; \
19102+ .pushsection .altinstr_replacement, "a" ; \
19103 662: __ASM_CLAC ; \
19104 .popsection ; \
19105 .pushsection .altinstructions, "a" ; \
19106@@ -38,7 +67,7 @@
19107
19108 #define ASM_STAC \
19109 661: ASM_NOP3 ; \
19110- .pushsection .altinstr_replacement, "ax" ; \
19111+ .pushsection .altinstr_replacement, "a" ; \
19112 662: __ASM_STAC ; \
19113 .popsection ; \
19114 .pushsection .altinstructions, "a" ; \
19115@@ -56,6 +85,37 @@
19116
19117 #include <asm/alternative.h>
19118
19119+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19120+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19121+
19122+extern void __pax_open_userland(void);
19123+static __always_inline unsigned long pax_open_userland(void)
19124+{
19125+
19126+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19127+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19128+ :
19129+ : [open] "i" (__pax_open_userland)
19130+ : "memory", "rax");
19131+#endif
19132+
19133+ return 0;
19134+}
19135+
19136+extern void __pax_close_userland(void);
19137+static __always_inline unsigned long pax_close_userland(void)
19138+{
19139+
19140+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19141+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19142+ :
19143+ : [close] "i" (__pax_close_userland)
19144+ : "memory", "rax");
19145+#endif
19146+
19147+ return 0;
19148+}
19149+
19150 #ifdef CONFIG_X86_SMAP
19151
19152 static __always_inline void clac(void)
19153diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19154index 8cd1cc3..827e09e 100644
19155--- a/arch/x86/include/asm/smp.h
19156+++ b/arch/x86/include/asm/smp.h
19157@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19158 /* cpus sharing the last level cache: */
19159 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19160 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19161-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19162+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19163
19164 static inline struct cpumask *cpu_sibling_mask(int cpu)
19165 {
19166@@ -78,7 +78,7 @@ struct smp_ops {
19167
19168 void (*send_call_func_ipi)(const struct cpumask *mask);
19169 void (*send_call_func_single_ipi)(int cpu);
19170-};
19171+} __no_const;
19172
19173 /* Globals due to paravirt */
19174 extern void set_cpu_sibling_map(int cpu);
19175@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19176 extern int safe_smp_processor_id(void);
19177
19178 #elif defined(CONFIG_X86_64_SMP)
19179-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19180-
19181-#define stack_smp_processor_id() \
19182-({ \
19183- struct thread_info *ti; \
19184- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19185- ti->cpu; \
19186-})
19187+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19188+#define stack_smp_processor_id() raw_smp_processor_id()
19189 #define safe_smp_processor_id() smp_processor_id()
19190
19191 #endif
19192diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19193index 6a99859..03cb807 100644
19194--- a/arch/x86/include/asm/stackprotector.h
19195+++ b/arch/x86/include/asm/stackprotector.h
19196@@ -47,7 +47,7 @@
19197 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19198 */
19199 #define GDT_STACK_CANARY_INIT \
19200- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19201+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19202
19203 /*
19204 * Initialize the stackprotector canary value.
19205@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19206
19207 static inline void load_stack_canary_segment(void)
19208 {
19209-#ifdef CONFIG_X86_32
19210+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19211 asm volatile ("mov %0, %%gs" : : "r" (0));
19212 #endif
19213 }
19214diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19215index 70bbe39..4ae2bd4 100644
19216--- a/arch/x86/include/asm/stacktrace.h
19217+++ b/arch/x86/include/asm/stacktrace.h
19218@@ -11,28 +11,20 @@
19219
19220 extern int kstack_depth_to_print;
19221
19222-struct thread_info;
19223+struct task_struct;
19224 struct stacktrace_ops;
19225
19226-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19227- unsigned long *stack,
19228- unsigned long bp,
19229- const struct stacktrace_ops *ops,
19230- void *data,
19231- unsigned long *end,
19232- int *graph);
19233+typedef unsigned long walk_stack_t(struct task_struct *task,
19234+ void *stack_start,
19235+ unsigned long *stack,
19236+ unsigned long bp,
19237+ const struct stacktrace_ops *ops,
19238+ void *data,
19239+ unsigned long *end,
19240+ int *graph);
19241
19242-extern unsigned long
19243-print_context_stack(struct thread_info *tinfo,
19244- unsigned long *stack, unsigned long bp,
19245- const struct stacktrace_ops *ops, void *data,
19246- unsigned long *end, int *graph);
19247-
19248-extern unsigned long
19249-print_context_stack_bp(struct thread_info *tinfo,
19250- unsigned long *stack, unsigned long bp,
19251- const struct stacktrace_ops *ops, void *data,
19252- unsigned long *end, int *graph);
19253+extern walk_stack_t print_context_stack;
19254+extern walk_stack_t print_context_stack_bp;
19255
19256 /* Generic stack tracer with callbacks */
19257
19258@@ -40,7 +32,7 @@ struct stacktrace_ops {
19259 void (*address)(void *data, unsigned long address, int reliable);
19260 /* On negative return stop dumping */
19261 int (*stack)(void *data, char *name);
19262- walk_stack_t walk_stack;
19263+ walk_stack_t *walk_stack;
19264 };
19265
19266 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19267diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19268index 751bf4b..a1278b5 100644
19269--- a/arch/x86/include/asm/switch_to.h
19270+++ b/arch/x86/include/asm/switch_to.h
19271@@ -112,7 +112,7 @@ do { \
19272 "call __switch_to\n\t" \
19273 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19274 __switch_canary \
19275- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19276+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19277 "movq %%rax,%%rdi\n\t" \
19278 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19279 "jnz ret_from_fork\n\t" \
19280@@ -123,7 +123,7 @@ do { \
19281 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19282 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19283 [_tif_fork] "i" (_TIF_FORK), \
19284- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19285+ [thread_info] "m" (current_tinfo), \
19286 [current_task] "m" (current_task) \
19287 __switch_canary_iparam \
19288 : "memory", "cc" __EXTRA_CLOBBER)
19289diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19290index 1d4e4f2..506db18 100644
19291--- a/arch/x86/include/asm/thread_info.h
19292+++ b/arch/x86/include/asm/thread_info.h
19293@@ -24,7 +24,6 @@ struct exec_domain;
19294 #include <linux/atomic.h>
19295
19296 struct thread_info {
19297- struct task_struct *task; /* main task structure */
19298 struct exec_domain *exec_domain; /* execution domain */
19299 __u32 flags; /* low level flags */
19300 __u32 status; /* thread synchronous flags */
19301@@ -32,13 +31,13 @@ struct thread_info {
19302 int saved_preempt_count;
19303 mm_segment_t addr_limit;
19304 void __user *sysenter_return;
19305+ unsigned long lowest_stack;
19306 unsigned int sig_on_uaccess_error:1;
19307 unsigned int uaccess_err:1; /* uaccess failed */
19308 };
19309
19310-#define INIT_THREAD_INFO(tsk) \
19311+#define INIT_THREAD_INFO \
19312 { \
19313- .task = &tsk, \
19314 .exec_domain = &default_exec_domain, \
19315 .flags = 0, \
19316 .cpu = 0, \
19317@@ -46,7 +45,7 @@ struct thread_info {
19318 .addr_limit = KERNEL_DS, \
19319 }
19320
19321-#define init_thread_info (init_thread_union.thread_info)
19322+#define init_thread_info (init_thread_union.stack)
19323 #define init_stack (init_thread_union.stack)
19324
19325 #else /* !__ASSEMBLY__ */
19326@@ -86,6 +85,7 @@ struct thread_info {
19327 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19328 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19329 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19330+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19331
19332 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19333 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19334@@ -109,17 +109,18 @@ struct thread_info {
19335 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19336 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19337 #define _TIF_X32 (1 << TIF_X32)
19338+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19339
19340 /* work to do in syscall_trace_enter() */
19341 #define _TIF_WORK_SYSCALL_ENTRY \
19342 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19343 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19344- _TIF_NOHZ)
19345+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19346
19347 /* work to do in syscall_trace_leave() */
19348 #define _TIF_WORK_SYSCALL_EXIT \
19349 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19350- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19351+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19352
19353 /* work to do on interrupt/exception return */
19354 #define _TIF_WORK_MASK \
19355@@ -130,7 +131,7 @@ struct thread_info {
19356 /* work to do on any return to user space */
19357 #define _TIF_ALLWORK_MASK \
19358 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19359- _TIF_NOHZ)
19360+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19361
19362 /* Only used for 64 bit */
19363 #define _TIF_DO_NOTIFY_MASK \
19364@@ -145,7 +146,6 @@ struct thread_info {
19365 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19366
19367 #define STACK_WARN (THREAD_SIZE/8)
19368-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19369
19370 /*
19371 * macros/functions for gaining access to the thread information structure
19372@@ -156,12 +156,11 @@ struct thread_info {
19373
19374 DECLARE_PER_CPU(unsigned long, kernel_stack);
19375
19376+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19377+
19378 static inline struct thread_info *current_thread_info(void)
19379 {
19380- struct thread_info *ti;
19381- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19382- KERNEL_STACK_OFFSET - THREAD_SIZE);
19383- return ti;
19384+ return this_cpu_read_stable(current_tinfo);
19385 }
19386
19387 static inline unsigned long current_stack_pointer(void)
19388@@ -179,14 +178,7 @@ static inline unsigned long current_stack_pointer(void)
19389
19390 /* how to get the thread information struct from ASM */
19391 #define GET_THREAD_INFO(reg) \
19392- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19393- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19394-
19395-/*
19396- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19397- * a certain register (to be used in assembler memory operands).
19398- */
19399-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19400+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19401
19402 #endif
19403
19404@@ -242,5 +234,12 @@ static inline bool is_ia32_task(void)
19405 extern void arch_task_cache_init(void);
19406 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19407 extern void arch_release_task_struct(struct task_struct *tsk);
19408+
19409+#define __HAVE_THREAD_FUNCTIONS
19410+#define task_thread_info(task) (&(task)->tinfo)
19411+#define task_stack_page(task) ((task)->stack)
19412+#define setup_thread_stack(p, org) do {} while (0)
19413+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19414+
19415 #endif
19416 #endif /* _ASM_X86_THREAD_INFO_H */
19417diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19418index cd79194..e7a9491 100644
19419--- a/arch/x86/include/asm/tlbflush.h
19420+++ b/arch/x86/include/asm/tlbflush.h
19421@@ -86,18 +86,44 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
19422
19423 static inline void __native_flush_tlb(void)
19424 {
19425+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19426+ u64 descriptor[2];
19427+
19428+ descriptor[0] = PCID_KERNEL;
19429+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19430+ return;
19431+ }
19432+
19433+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19434+ if (static_cpu_has(X86_FEATURE_PCID)) {
19435+ unsigned int cpu = raw_get_cpu();
19436+
19437+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19438+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19439+ raw_put_cpu_no_resched();
19440+ return;
19441+ }
19442+#endif
19443+
19444 native_write_cr3(native_read_cr3());
19445 }
19446
19447 static inline void __native_flush_tlb_global_irq_disabled(void)
19448 {
19449- unsigned long cr4;
19450+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19451+ u64 descriptor[2];
19452
19453- cr4 = this_cpu_read(cpu_tlbstate.cr4);
19454- /* clear PGE */
19455- native_write_cr4(cr4 & ~X86_CR4_PGE);
19456- /* write old PGE again and flush TLBs */
19457- native_write_cr4(cr4);
19458+ descriptor[0] = PCID_KERNEL;
19459+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19460+ } else {
19461+ unsigned long cr4;
19462+
19463+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
19464+ /* clear PGE */
19465+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19466+ /* write old PGE again and flush TLBs */
19467+ native_write_cr4(cr4);
19468+ }
19469 }
19470
19471 static inline void __native_flush_tlb_global(void)
19472@@ -118,6 +144,41 @@ static inline void __native_flush_tlb_global(void)
19473
19474 static inline void __native_flush_tlb_single(unsigned long addr)
19475 {
19476+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19477+ u64 descriptor[2];
19478+
19479+ descriptor[0] = PCID_KERNEL;
19480+ descriptor[1] = addr;
19481+
19482+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19483+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19484+ if (addr < TASK_SIZE_MAX)
19485+ descriptor[1] += pax_user_shadow_base;
19486+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19487+ }
19488+
19489+ descriptor[0] = PCID_USER;
19490+ descriptor[1] = addr;
19491+#endif
19492+
19493+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19494+ return;
19495+ }
19496+
19497+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19498+ if (static_cpu_has(X86_FEATURE_PCID)) {
19499+ unsigned int cpu = raw_get_cpu();
19500+
19501+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19502+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19503+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19504+ raw_put_cpu_no_resched();
19505+
19506+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19507+ addr += pax_user_shadow_base;
19508+ }
19509+#endif
19510+
19511 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19512 }
19513
19514diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19515index ace9dec..3f9e253 100644
19516--- a/arch/x86/include/asm/uaccess.h
19517+++ b/arch/x86/include/asm/uaccess.h
19518@@ -7,6 +7,7 @@
19519 #include <linux/compiler.h>
19520 #include <linux/thread_info.h>
19521 #include <linux/string.h>
19522+#include <linux/spinlock.h>
19523 #include <asm/asm.h>
19524 #include <asm/page.h>
19525 #include <asm/smap.h>
19526@@ -29,7 +30,12 @@
19527
19528 #define get_ds() (KERNEL_DS)
19529 #define get_fs() (current_thread_info()->addr_limit)
19530+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19531+void __set_fs(mm_segment_t x);
19532+void set_fs(mm_segment_t x);
19533+#else
19534 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19535+#endif
19536
19537 #define segment_eq(a, b) ((a).seg == (b).seg)
19538
19539@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19540 * checks that the pointer is in the user space range - after calling
19541 * this function, memory access functions may still return -EFAULT.
19542 */
19543-#define access_ok(type, addr, size) \
19544- likely(!__range_not_ok(addr, size, user_addr_max()))
19545+extern int _cond_resched(void);
19546+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19547+#define access_ok(type, addr, size) \
19548+({ \
19549+ unsigned long __size = size; \
19550+ unsigned long __addr = (unsigned long)addr; \
19551+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19552+ if (__ret_ao && __size) { \
19553+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19554+ unsigned long __end_ao = __addr + __size - 1; \
19555+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19556+ while (__addr_ao <= __end_ao) { \
19557+ char __c_ao; \
19558+ __addr_ao += PAGE_SIZE; \
19559+ if (__size > PAGE_SIZE) \
19560+ _cond_resched(); \
19561+ if (__get_user(__c_ao, (char __user *)__addr)) \
19562+ break; \
19563+ if (type != VERIFY_WRITE) { \
19564+ __addr = __addr_ao; \
19565+ continue; \
19566+ } \
19567+ if (__put_user(__c_ao, (char __user *)__addr)) \
19568+ break; \
19569+ __addr = __addr_ao; \
19570+ } \
19571+ } \
19572+ } \
19573+ __ret_ao; \
19574+})
19575
19576 /*
19577 * The exception table consists of pairs of addresses relative to the
19578@@ -134,11 +168,13 @@ extern int __get_user_8(void);
19579 extern int __get_user_bad(void);
19580
19581 /*
19582- * This is a type: either unsigned long, if the argument fits into
19583- * that type, or otherwise unsigned long long.
19584+ * This is a type: either (un)signed int, if the argument fits into
19585+ * that type, or otherwise (un)signed long long.
19586 */
19587 #define __inttype(x) \
19588-__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19589+__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0U), \
19590+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0ULL, 0LL),\
19591+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0U, 0)))
19592
19593 /**
19594 * get_user: - Get a simple variable from user space.
19595@@ -176,10 +212,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19596 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19597 __chk_user_ptr(ptr); \
19598 might_fault(); \
19599+ pax_open_userland(); \
19600 asm volatile("call __get_user_%P3" \
19601 : "=a" (__ret_gu), "=r" (__val_gu) \
19602 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19603 (x) = (__force __typeof__(*(ptr))) __val_gu; \
19604+ pax_close_userland(); \
19605 __ret_gu; \
19606 })
19607
19608@@ -187,13 +225,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19609 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19610 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19611
19612-
19613+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19614+#define __copyuser_seg "gs;"
19615+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19616+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19617+#else
19618+#define __copyuser_seg
19619+#define __COPYUSER_SET_ES
19620+#define __COPYUSER_RESTORE_ES
19621+#endif
19622
19623 #ifdef CONFIG_X86_32
19624 #define __put_user_asm_u64(x, addr, err, errret) \
19625 asm volatile(ASM_STAC "\n" \
19626- "1: movl %%eax,0(%2)\n" \
19627- "2: movl %%edx,4(%2)\n" \
19628+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19629+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19630 "3: " ASM_CLAC "\n" \
19631 ".section .fixup,\"ax\"\n" \
19632 "4: movl %3,%0\n" \
19633@@ -206,8 +252,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19634
19635 #define __put_user_asm_ex_u64(x, addr) \
19636 asm volatile(ASM_STAC "\n" \
19637- "1: movl %%eax,0(%1)\n" \
19638- "2: movl %%edx,4(%1)\n" \
19639+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19640+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19641 "3: " ASM_CLAC "\n" \
19642 _ASM_EXTABLE_EX(1b, 2b) \
19643 _ASM_EXTABLE_EX(2b, 3b) \
19644@@ -257,7 +303,8 @@ extern void __put_user_8(void);
19645 __typeof__(*(ptr)) __pu_val; \
19646 __chk_user_ptr(ptr); \
19647 might_fault(); \
19648- __pu_val = x; \
19649+ __pu_val = (x); \
19650+ pax_open_userland(); \
19651 switch (sizeof(*(ptr))) { \
19652 case 1: \
19653 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19654@@ -275,6 +322,7 @@ extern void __put_user_8(void);
19655 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19656 break; \
19657 } \
19658+ pax_close_userland(); \
19659 __ret_pu; \
19660 })
19661
19662@@ -355,8 +403,10 @@ do { \
19663 } while (0)
19664
19665 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19666+do { \
19667+ pax_open_userland(); \
19668 asm volatile(ASM_STAC "\n" \
19669- "1: mov"itype" %2,%"rtype"1\n" \
19670+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19671 "2: " ASM_CLAC "\n" \
19672 ".section .fixup,\"ax\"\n" \
19673 "3: mov %3,%0\n" \
19674@@ -364,8 +414,10 @@ do { \
19675 " jmp 2b\n" \
19676 ".previous\n" \
19677 _ASM_EXTABLE(1b, 3b) \
19678- : "=r" (err), ltype(x) \
19679- : "m" (__m(addr)), "i" (errret), "0" (err))
19680+ : "=r" (err), ltype (x) \
19681+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19682+ pax_close_userland(); \
19683+} while (0)
19684
19685 #define __get_user_size_ex(x, ptr, size) \
19686 do { \
19687@@ -389,7 +441,7 @@ do { \
19688 } while (0)
19689
19690 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19691- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19692+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19693 "2:\n" \
19694 _ASM_EXTABLE_EX(1b, 2b) \
19695 : ltype(x) : "m" (__m(addr)))
19696@@ -406,13 +458,24 @@ do { \
19697 int __gu_err; \
19698 unsigned long __gu_val; \
19699 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19700- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19701+ (x) = (__typeof__(*(ptr)))__gu_val; \
19702 __gu_err; \
19703 })
19704
19705 /* FIXME: this hack is definitely wrong -AK */
19706 struct __large_struct { unsigned long buf[100]; };
19707-#define __m(x) (*(struct __large_struct __user *)(x))
19708+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19709+#define ____m(x) \
19710+({ \
19711+ unsigned long ____x = (unsigned long)(x); \
19712+ if (____x < pax_user_shadow_base) \
19713+ ____x += pax_user_shadow_base; \
19714+ (typeof(x))____x; \
19715+})
19716+#else
19717+#define ____m(x) (x)
19718+#endif
19719+#define __m(x) (*(struct __large_struct __user *)____m(x))
19720
19721 /*
19722 * Tell gcc we read from memory instead of writing: this is because
19723@@ -420,8 +483,10 @@ struct __large_struct { unsigned long buf[100]; };
19724 * aliasing issues.
19725 */
19726 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19727+do { \
19728+ pax_open_userland(); \
19729 asm volatile(ASM_STAC "\n" \
19730- "1: mov"itype" %"rtype"1,%2\n" \
19731+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19732 "2: " ASM_CLAC "\n" \
19733 ".section .fixup,\"ax\"\n" \
19734 "3: mov %3,%0\n" \
19735@@ -429,10 +494,12 @@ struct __large_struct { unsigned long buf[100]; };
19736 ".previous\n" \
19737 _ASM_EXTABLE(1b, 3b) \
19738 : "=r"(err) \
19739- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19740+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19741+ pax_close_userland(); \
19742+} while (0)
19743
19744 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19745- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19746+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19747 "2:\n" \
19748 _ASM_EXTABLE_EX(1b, 2b) \
19749 : : ltype(x), "m" (__m(addr)))
19750@@ -442,11 +509,13 @@ struct __large_struct { unsigned long buf[100]; };
19751 */
19752 #define uaccess_try do { \
19753 current_thread_info()->uaccess_err = 0; \
19754+ pax_open_userland(); \
19755 stac(); \
19756 barrier();
19757
19758 #define uaccess_catch(err) \
19759 clac(); \
19760+ pax_close_userland(); \
19761 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19762 } while (0)
19763
19764@@ -471,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
19765 * On error, the variable @x is set to zero.
19766 */
19767
19768+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19769+#define __get_user(x, ptr) get_user((x), (ptr))
19770+#else
19771 #define __get_user(x, ptr) \
19772 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19773+#endif
19774
19775 /**
19776 * __put_user: - Write a simple value into user space, with less checking.
19777@@ -494,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
19778 * Returns zero on success, or -EFAULT on error.
19779 */
19780
19781+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19782+#define __put_user(x, ptr) put_user((x), (ptr))
19783+#else
19784 #define __put_user(x, ptr) \
19785 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19786+#endif
19787
19788 #define __get_user_unaligned __get_user
19789 #define __put_user_unaligned __put_user
19790@@ -513,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
19791 #define get_user_ex(x, ptr) do { \
19792 unsigned long __gue_val; \
19793 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19794- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19795+ (x) = (__typeof__(*(ptr)))__gue_val; \
19796 } while (0)
19797
19798 #define put_user_try uaccess_try
19799@@ -531,7 +608,7 @@ extern __must_check long strlen_user(const char __user *str);
19800 extern __must_check long strnlen_user(const char __user *str, long n);
19801
19802 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19803-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19804+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19805
19806 extern void __cmpxchg_wrong_size(void)
19807 __compiletime_error("Bad argument size for cmpxchg");
19808@@ -542,18 +619,19 @@ extern void __cmpxchg_wrong_size(void)
19809 __typeof__(ptr) __uval = (uval); \
19810 __typeof__(*(ptr)) __old = (old); \
19811 __typeof__(*(ptr)) __new = (new); \
19812+ pax_open_userland(); \
19813 switch (size) { \
19814 case 1: \
19815 { \
19816 asm volatile("\t" ASM_STAC "\n" \
19817- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19818+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19819 "2:\t" ASM_CLAC "\n" \
19820 "\t.section .fixup, \"ax\"\n" \
19821 "3:\tmov %3, %0\n" \
19822 "\tjmp 2b\n" \
19823 "\t.previous\n" \
19824 _ASM_EXTABLE(1b, 3b) \
19825- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19826+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19827 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19828 : "memory" \
19829 ); \
19830@@ -562,14 +640,14 @@ extern void __cmpxchg_wrong_size(void)
19831 case 2: \
19832 { \
19833 asm volatile("\t" ASM_STAC "\n" \
19834- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19835+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19836 "2:\t" ASM_CLAC "\n" \
19837 "\t.section .fixup, \"ax\"\n" \
19838 "3:\tmov %3, %0\n" \
19839 "\tjmp 2b\n" \
19840 "\t.previous\n" \
19841 _ASM_EXTABLE(1b, 3b) \
19842- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19843+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19844 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19845 : "memory" \
19846 ); \
19847@@ -578,14 +656,14 @@ extern void __cmpxchg_wrong_size(void)
19848 case 4: \
19849 { \
19850 asm volatile("\t" ASM_STAC "\n" \
19851- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19852+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19853 "2:\t" ASM_CLAC "\n" \
19854 "\t.section .fixup, \"ax\"\n" \
19855 "3:\tmov %3, %0\n" \
19856 "\tjmp 2b\n" \
19857 "\t.previous\n" \
19858 _ASM_EXTABLE(1b, 3b) \
19859- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19860+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19861 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19862 : "memory" \
19863 ); \
19864@@ -597,14 +675,14 @@ extern void __cmpxchg_wrong_size(void)
19865 __cmpxchg_wrong_size(); \
19866 \
19867 asm volatile("\t" ASM_STAC "\n" \
19868- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19869+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19870 "2:\t" ASM_CLAC "\n" \
19871 "\t.section .fixup, \"ax\"\n" \
19872 "3:\tmov %3, %0\n" \
19873 "\tjmp 2b\n" \
19874 "\t.previous\n" \
19875 _ASM_EXTABLE(1b, 3b) \
19876- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19877+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19878 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19879 : "memory" \
19880 ); \
19881@@ -613,6 +691,7 @@ extern void __cmpxchg_wrong_size(void)
19882 default: \
19883 __cmpxchg_wrong_size(); \
19884 } \
19885+ pax_close_userland(); \
19886 *__uval = __old; \
19887 __ret; \
19888 })
19889@@ -636,17 +715,6 @@ extern struct movsl_mask {
19890
19891 #define ARCH_HAS_NOCACHE_UACCESS 1
19892
19893-#ifdef CONFIG_X86_32
19894-# include <asm/uaccess_32.h>
19895-#else
19896-# include <asm/uaccess_64.h>
19897-#endif
19898-
19899-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19900- unsigned n);
19901-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19902- unsigned n);
19903-
19904 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19905 # define copy_user_diag __compiletime_error
19906 #else
19907@@ -656,7 +724,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19908 extern void copy_user_diag("copy_from_user() buffer size is too small")
19909 copy_from_user_overflow(void);
19910 extern void copy_user_diag("copy_to_user() buffer size is too small")
19911-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19912+copy_to_user_overflow(void);
19913
19914 #undef copy_user_diag
19915
19916@@ -669,7 +737,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19917
19918 extern void
19919 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19920-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19921+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19922 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19923
19924 #else
19925@@ -684,10 +752,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19926
19927 #endif
19928
19929+#ifdef CONFIG_X86_32
19930+# include <asm/uaccess_32.h>
19931+#else
19932+# include <asm/uaccess_64.h>
19933+#endif
19934+
19935 static inline unsigned long __must_check
19936 copy_from_user(void *to, const void __user *from, unsigned long n)
19937 {
19938- int sz = __compiletime_object_size(to);
19939+ size_t sz = __compiletime_object_size(to);
19940
19941 might_fault();
19942
19943@@ -709,12 +783,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19944 * case, and do only runtime checking for non-constant sizes.
19945 */
19946
19947- if (likely(sz < 0 || sz >= n))
19948- n = _copy_from_user(to, from, n);
19949- else if(__builtin_constant_p(n))
19950- copy_from_user_overflow();
19951- else
19952- __copy_from_user_overflow(sz, n);
19953+ if (likely(sz != (size_t)-1 && sz < n)) {
19954+ if(__builtin_constant_p(n))
19955+ copy_from_user_overflow();
19956+ else
19957+ __copy_from_user_overflow(sz, n);
19958+ } else if (access_ok(VERIFY_READ, from, n))
19959+ n = __copy_from_user(to, from, n);
19960+ else if ((long)n > 0)
19961+ memset(to, 0, n);
19962
19963 return n;
19964 }
19965@@ -722,17 +799,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19966 static inline unsigned long __must_check
19967 copy_to_user(void __user *to, const void *from, unsigned long n)
19968 {
19969- int sz = __compiletime_object_size(from);
19970+ size_t sz = __compiletime_object_size(from);
19971
19972 might_fault();
19973
19974 /* See the comment in copy_from_user() above. */
19975- if (likely(sz < 0 || sz >= n))
19976- n = _copy_to_user(to, from, n);
19977- else if(__builtin_constant_p(n))
19978- copy_to_user_overflow();
19979- else
19980- __copy_to_user_overflow(sz, n);
19981+ if (likely(sz != (size_t)-1 && sz < n)) {
19982+ if(__builtin_constant_p(n))
19983+ copy_to_user_overflow();
19984+ else
19985+ __copy_to_user_overflow(sz, n);
19986+ } else if (access_ok(VERIFY_WRITE, to, n))
19987+ n = __copy_to_user(to, from, n);
19988
19989 return n;
19990 }
19991diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
19992index 3c03a5d..edb68ae 100644
19993--- a/arch/x86/include/asm/uaccess_32.h
19994+++ b/arch/x86/include/asm/uaccess_32.h
19995@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
19996 * anything, so this is accurate.
19997 */
19998
19999-static __always_inline unsigned long __must_check
20000+static __always_inline __size_overflow(3) unsigned long __must_check
20001 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20002 {
20003+ if ((long)n < 0)
20004+ return n;
20005+
20006+ check_object_size(from, n, true);
20007+
20008 if (__builtin_constant_p(n)) {
20009 unsigned long ret;
20010
20011@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20012 __copy_to_user(void __user *to, const void *from, unsigned long n)
20013 {
20014 might_fault();
20015+
20016 return __copy_to_user_inatomic(to, from, n);
20017 }
20018
20019-static __always_inline unsigned long
20020+static __always_inline __size_overflow(3) unsigned long
20021 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20022 {
20023+ if ((long)n < 0)
20024+ return n;
20025+
20026 /* Avoid zeroing the tail if the copy fails..
20027 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20028 * but as the zeroing behaviour is only significant when n is not
20029@@ -137,6 +146,12 @@ static __always_inline unsigned long
20030 __copy_from_user(void *to, const void __user *from, unsigned long n)
20031 {
20032 might_fault();
20033+
20034+ if ((long)n < 0)
20035+ return n;
20036+
20037+ check_object_size(to, n, false);
20038+
20039 if (__builtin_constant_p(n)) {
20040 unsigned long ret;
20041
20042@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20043 const void __user *from, unsigned long n)
20044 {
20045 might_fault();
20046+
20047+ if ((long)n < 0)
20048+ return n;
20049+
20050 if (__builtin_constant_p(n)) {
20051 unsigned long ret;
20052
20053@@ -181,7 +200,10 @@ static __always_inline unsigned long
20054 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20055 unsigned long n)
20056 {
20057- return __copy_from_user_ll_nocache_nozero(to, from, n);
20058+ if ((long)n < 0)
20059+ return n;
20060+
20061+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20062 }
20063
20064 #endif /* _ASM_X86_UACCESS_32_H */
20065diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20066index f2f9b39..2ae1bf8 100644
20067--- a/arch/x86/include/asm/uaccess_64.h
20068+++ b/arch/x86/include/asm/uaccess_64.h
20069@@ -10,6 +10,9 @@
20070 #include <asm/alternative.h>
20071 #include <asm/cpufeature.h>
20072 #include <asm/page.h>
20073+#include <asm/pgtable.h>
20074+
20075+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20076
20077 /*
20078 * Copy To/From Userspace
20079@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20080 __must_check unsigned long
20081 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20082
20083-static __always_inline __must_check unsigned long
20084-copy_user_generic(void *to, const void *from, unsigned len)
20085+static __always_inline __must_check __size_overflow(3) unsigned long
20086+copy_user_generic(void *to, const void *from, unsigned long len)
20087 {
20088 unsigned ret;
20089
20090@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20091 }
20092
20093 __must_check unsigned long
20094-copy_in_user(void __user *to, const void __user *from, unsigned len);
20095+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20096
20097 static __always_inline __must_check
20098-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20099+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20100 {
20101- int ret = 0;
20102+ size_t sz = __compiletime_object_size(dst);
20103+ unsigned ret = 0;
20104+
20105+ if (size > INT_MAX)
20106+ return size;
20107+
20108+ check_object_size(dst, size, false);
20109+
20110+#ifdef CONFIG_PAX_MEMORY_UDEREF
20111+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20112+ return size;
20113+#endif
20114+
20115+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20116+ if(__builtin_constant_p(size))
20117+ copy_from_user_overflow();
20118+ else
20119+ __copy_from_user_overflow(sz, size);
20120+ return size;
20121+ }
20122
20123 if (!__builtin_constant_p(size))
20124- return copy_user_generic(dst, (__force void *)src, size);
20125+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20126 switch (size) {
20127- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20128+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20129 ret, "b", "b", "=q", 1);
20130 return ret;
20131- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20132+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20133 ret, "w", "w", "=r", 2);
20134 return ret;
20135- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20136+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20137 ret, "l", "k", "=r", 4);
20138 return ret;
20139- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20140+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20141 ret, "q", "", "=r", 8);
20142 return ret;
20143 case 10:
20144- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20145+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20146 ret, "q", "", "=r", 10);
20147 if (unlikely(ret))
20148 return ret;
20149 __get_user_asm(*(u16 *)(8 + (char *)dst),
20150- (u16 __user *)(8 + (char __user *)src),
20151+ (const u16 __user *)(8 + (const char __user *)src),
20152 ret, "w", "w", "=r", 2);
20153 return ret;
20154 case 16:
20155- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20156+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20157 ret, "q", "", "=r", 16);
20158 if (unlikely(ret))
20159 return ret;
20160 __get_user_asm(*(u64 *)(8 + (char *)dst),
20161- (u64 __user *)(8 + (char __user *)src),
20162+ (const u64 __user *)(8 + (const char __user *)src),
20163 ret, "q", "", "=r", 8);
20164 return ret;
20165 default:
20166- return copy_user_generic(dst, (__force void *)src, size);
20167+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20168 }
20169 }
20170
20171 static __always_inline __must_check
20172-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20173+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20174 {
20175 might_fault();
20176 return __copy_from_user_nocheck(dst, src, size);
20177 }
20178
20179 static __always_inline __must_check
20180-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20181+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20182 {
20183- int ret = 0;
20184+ size_t sz = __compiletime_object_size(src);
20185+ unsigned ret = 0;
20186+
20187+ if (size > INT_MAX)
20188+ return size;
20189+
20190+ check_object_size(src, size, true);
20191+
20192+#ifdef CONFIG_PAX_MEMORY_UDEREF
20193+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20194+ return size;
20195+#endif
20196+
20197+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20198+ if(__builtin_constant_p(size))
20199+ copy_to_user_overflow();
20200+ else
20201+ __copy_to_user_overflow(sz, size);
20202+ return size;
20203+ }
20204
20205 if (!__builtin_constant_p(size))
20206- return copy_user_generic((__force void *)dst, src, size);
20207+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20208 switch (size) {
20209- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20210+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20211 ret, "b", "b", "iq", 1);
20212 return ret;
20213- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20214+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20215 ret, "w", "w", "ir", 2);
20216 return ret;
20217- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20218+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20219 ret, "l", "k", "ir", 4);
20220 return ret;
20221- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20222+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20223 ret, "q", "", "er", 8);
20224 return ret;
20225 case 10:
20226- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20227+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20228 ret, "q", "", "er", 10);
20229 if (unlikely(ret))
20230 return ret;
20231 asm("":::"memory");
20232- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20233+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20234 ret, "w", "w", "ir", 2);
20235 return ret;
20236 case 16:
20237- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20238+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20239 ret, "q", "", "er", 16);
20240 if (unlikely(ret))
20241 return ret;
20242 asm("":::"memory");
20243- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20244+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20245 ret, "q", "", "er", 8);
20246 return ret;
20247 default:
20248- return copy_user_generic((__force void *)dst, src, size);
20249+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20250 }
20251 }
20252
20253 static __always_inline __must_check
20254-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20255+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20256 {
20257 might_fault();
20258 return __copy_to_user_nocheck(dst, src, size);
20259 }
20260
20261 static __always_inline __must_check
20262-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20263+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20264 {
20265- int ret = 0;
20266+ unsigned ret = 0;
20267
20268 might_fault();
20269+
20270+ if (size > INT_MAX)
20271+ return size;
20272+
20273+#ifdef CONFIG_PAX_MEMORY_UDEREF
20274+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20275+ return size;
20276+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20277+ return size;
20278+#endif
20279+
20280 if (!__builtin_constant_p(size))
20281- return copy_user_generic((__force void *)dst,
20282- (__force void *)src, size);
20283+ return copy_user_generic((__force_kernel void *)____m(dst),
20284+ (__force_kernel const void *)____m(src), size);
20285 switch (size) {
20286 case 1: {
20287 u8 tmp;
20288- __get_user_asm(tmp, (u8 __user *)src,
20289+ __get_user_asm(tmp, (const u8 __user *)src,
20290 ret, "b", "b", "=q", 1);
20291 if (likely(!ret))
20292 __put_user_asm(tmp, (u8 __user *)dst,
20293@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20294 }
20295 case 2: {
20296 u16 tmp;
20297- __get_user_asm(tmp, (u16 __user *)src,
20298+ __get_user_asm(tmp, (const u16 __user *)src,
20299 ret, "w", "w", "=r", 2);
20300 if (likely(!ret))
20301 __put_user_asm(tmp, (u16 __user *)dst,
20302@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20303
20304 case 4: {
20305 u32 tmp;
20306- __get_user_asm(tmp, (u32 __user *)src,
20307+ __get_user_asm(tmp, (const u32 __user *)src,
20308 ret, "l", "k", "=r", 4);
20309 if (likely(!ret))
20310 __put_user_asm(tmp, (u32 __user *)dst,
20311@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20312 }
20313 case 8: {
20314 u64 tmp;
20315- __get_user_asm(tmp, (u64 __user *)src,
20316+ __get_user_asm(tmp, (const u64 __user *)src,
20317 ret, "q", "", "=r", 8);
20318 if (likely(!ret))
20319 __put_user_asm(tmp, (u64 __user *)dst,
20320@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20321 return ret;
20322 }
20323 default:
20324- return copy_user_generic((__force void *)dst,
20325- (__force void *)src, size);
20326+ return copy_user_generic((__force_kernel void *)____m(dst),
20327+ (__force_kernel const void *)____m(src), size);
20328 }
20329 }
20330
20331-static __must_check __always_inline int
20332-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20333+static __must_check __always_inline unsigned long
20334+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20335 {
20336 return __copy_from_user_nocheck(dst, src, size);
20337 }
20338
20339-static __must_check __always_inline int
20340-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20341+static __must_check __always_inline unsigned long
20342+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20343 {
20344 return __copy_to_user_nocheck(dst, src, size);
20345 }
20346
20347-extern long __copy_user_nocache(void *dst, const void __user *src,
20348- unsigned size, int zerorest);
20349+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20350+ unsigned long size, int zerorest);
20351
20352-static inline int
20353-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20354+static inline unsigned long
20355+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20356 {
20357 might_fault();
20358+
20359+ if (size > INT_MAX)
20360+ return size;
20361+
20362+#ifdef CONFIG_PAX_MEMORY_UDEREF
20363+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20364+ return size;
20365+#endif
20366+
20367 return __copy_user_nocache(dst, src, size, 1);
20368 }
20369
20370-static inline int
20371+static inline unsigned long
20372 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20373- unsigned size)
20374+ unsigned long size)
20375 {
20376+ if (size > INT_MAX)
20377+ return size;
20378+
20379+#ifdef CONFIG_PAX_MEMORY_UDEREF
20380+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20381+ return size;
20382+#endif
20383+
20384 return __copy_user_nocache(dst, src, size, 0);
20385 }
20386
20387 unsigned long
20388-copy_user_handle_tail(char *to, char *from, unsigned len);
20389+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len) __size_overflow(3);
20390
20391 #endif /* _ASM_X86_UACCESS_64_H */
20392diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20393index 5b238981..77fdd78 100644
20394--- a/arch/x86/include/asm/word-at-a-time.h
20395+++ b/arch/x86/include/asm/word-at-a-time.h
20396@@ -11,7 +11,7 @@
20397 * and shift, for example.
20398 */
20399 struct word_at_a_time {
20400- const unsigned long one_bits, high_bits;
20401+ unsigned long one_bits, high_bits;
20402 };
20403
20404 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20405diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20406index f58a9c7..dc378042a 100644
20407--- a/arch/x86/include/asm/x86_init.h
20408+++ b/arch/x86/include/asm/x86_init.h
20409@@ -129,7 +129,7 @@ struct x86_init_ops {
20410 struct x86_init_timers timers;
20411 struct x86_init_iommu iommu;
20412 struct x86_init_pci pci;
20413-};
20414+} __no_const;
20415
20416 /**
20417 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20418@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20419 void (*setup_percpu_clockev)(void);
20420 void (*early_percpu_clock_init)(void);
20421 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20422-};
20423+} __no_const;
20424
20425 struct timespec;
20426
20427@@ -168,7 +168,7 @@ struct x86_platform_ops {
20428 void (*save_sched_clock_state)(void);
20429 void (*restore_sched_clock_state)(void);
20430 void (*apic_post_init)(void);
20431-};
20432+} __no_const;
20433
20434 struct pci_dev;
20435 struct msi_msg;
20436@@ -182,7 +182,7 @@ struct x86_msi_ops {
20437 void (*teardown_msi_irqs)(struct pci_dev *dev);
20438 void (*restore_msi_irqs)(struct pci_dev *dev);
20439 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20440-};
20441+} __no_const;
20442
20443 struct IO_APIC_route_entry;
20444 struct io_apic_irq_attr;
20445@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20446 unsigned int destination, int vector,
20447 struct io_apic_irq_attr *attr);
20448 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20449-};
20450+} __no_const;
20451
20452 extern struct x86_init_ops x86_init;
20453 extern struct x86_cpuinit_ops x86_cpuinit;
20454diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20455index 358dcd3..23c0bf1 100644
20456--- a/arch/x86/include/asm/xen/page.h
20457+++ b/arch/x86/include/asm/xen/page.h
20458@@ -82,7 +82,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20459 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20460 * cases needing an extended handling.
20461 */
20462-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20463+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20464 {
20465 unsigned long mfn;
20466
20467diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20468index c9a6d68..cb57f42 100644
20469--- a/arch/x86/include/asm/xsave.h
20470+++ b/arch/x86/include/asm/xsave.h
20471@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20472 if (unlikely(err))
20473 return -EFAULT;
20474
20475+ pax_open_userland();
20476 __asm__ __volatile__(ASM_STAC "\n"
20477- "1:"XSAVE"\n"
20478+ "1:"
20479+ __copyuser_seg
20480+ XSAVE"\n"
20481 "2: " ASM_CLAC "\n"
20482 xstate_fault
20483 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20484 : "memory");
20485+ pax_close_userland();
20486 return err;
20487 }
20488
20489@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20490 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20491 {
20492 int err = 0;
20493- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20494+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20495 u32 lmask = mask;
20496 u32 hmask = mask >> 32;
20497
20498+ pax_open_userland();
20499 __asm__ __volatile__(ASM_STAC "\n"
20500- "1:"XRSTOR"\n"
20501+ "1:"
20502+ __copyuser_seg
20503+ XRSTOR"\n"
20504 "2: " ASM_CLAC "\n"
20505 xstate_fault
20506 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20507 : "memory"); /* memory required? */
20508+ pax_close_userland();
20509 return err;
20510 }
20511
20512diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20513index d993e33..8db1b18 100644
20514--- a/arch/x86/include/uapi/asm/e820.h
20515+++ b/arch/x86/include/uapi/asm/e820.h
20516@@ -58,7 +58,7 @@ struct e820map {
20517 #define ISA_START_ADDRESS 0xa0000
20518 #define ISA_END_ADDRESS 0x100000
20519
20520-#define BIOS_BEGIN 0x000a0000
20521+#define BIOS_BEGIN 0x000c0000
20522 #define BIOS_END 0x00100000
20523
20524 #define BIOS_ROM_BASE 0xffe00000
20525diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20526index 7b0a55a..ad115bf 100644
20527--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20528+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20529@@ -49,7 +49,6 @@
20530 #define EFLAGS 144
20531 #define RSP 152
20532 #define SS 160
20533-#define ARGOFFSET R11
20534 #endif /* __ASSEMBLY__ */
20535
20536 /* top of stack page */
20537diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20538index cdb1b70..426434c 100644
20539--- a/arch/x86/kernel/Makefile
20540+++ b/arch/x86/kernel/Makefile
20541@@ -28,7 +28,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20542 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20543 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20544 obj-y += probe_roms.o
20545-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20546+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20547 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20548 obj-$(CONFIG_X86_64) += mcount_64.o
20549 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20550diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20551index 803b684..68c64f1 100644
20552--- a/arch/x86/kernel/acpi/boot.c
20553+++ b/arch/x86/kernel/acpi/boot.c
20554@@ -1361,7 +1361,7 @@ static void __init acpi_reduced_hw_init(void)
20555 * If your system is blacklisted here, but you find that acpi=force
20556 * works for you, please contact linux-acpi@vger.kernel.org
20557 */
20558-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20559+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20560 /*
20561 * Boxes that need ACPI disabled
20562 */
20563@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20564 };
20565
20566 /* second table for DMI checks that should run after early-quirks */
20567-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20568+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20569 /*
20570 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20571 * which includes some code which overrides all temperature
20572diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20573index d1daead..acd77e2 100644
20574--- a/arch/x86/kernel/acpi/sleep.c
20575+++ b/arch/x86/kernel/acpi/sleep.c
20576@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20577 #else /* CONFIG_64BIT */
20578 #ifdef CONFIG_SMP
20579 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20580+
20581+ pax_open_kernel();
20582 early_gdt_descr.address =
20583 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20584+ pax_close_kernel();
20585+
20586 initial_gs = per_cpu_offset(smp_processor_id());
20587 #endif
20588 initial_code = (unsigned long)wakeup_long64;
20589diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20590index 665c6b7..eae4d56 100644
20591--- a/arch/x86/kernel/acpi/wakeup_32.S
20592+++ b/arch/x86/kernel/acpi/wakeup_32.S
20593@@ -29,13 +29,11 @@ wakeup_pmode_return:
20594 # and restore the stack ... but you need gdt for this to work
20595 movl saved_context_esp, %esp
20596
20597- movl %cs:saved_magic, %eax
20598- cmpl $0x12345678, %eax
20599+ cmpl $0x12345678, saved_magic
20600 jne bogus_magic
20601
20602 # jump to place where we left off
20603- movl saved_eip, %eax
20604- jmp *%eax
20605+ jmp *(saved_eip)
20606
20607 bogus_magic:
20608 jmp bogus_magic
20609diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20610index 703130f..27a155d 100644
20611--- a/arch/x86/kernel/alternative.c
20612+++ b/arch/x86/kernel/alternative.c
20613@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20614 */
20615 for (a = start; a < end; a++) {
20616 instr = (u8 *)&a->instr_offset + a->instr_offset;
20617+
20618+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20619+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20620+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20621+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20622+#endif
20623+
20624 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20625 BUG_ON(a->replacementlen > a->instrlen);
20626 BUG_ON(a->instrlen > sizeof(insnbuf));
20627@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20628 add_nops(insnbuf + a->replacementlen,
20629 a->instrlen - a->replacementlen);
20630
20631+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20632+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20633+ instr = ktva_ktla(instr);
20634+#endif
20635+
20636 text_poke_early(instr, insnbuf, a->instrlen);
20637 }
20638 }
20639@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20640 for (poff = start; poff < end; poff++) {
20641 u8 *ptr = (u8 *)poff + *poff;
20642
20643+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20644+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20645+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20646+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20647+#endif
20648+
20649 if (!*poff || ptr < text || ptr >= text_end)
20650 continue;
20651 /* turn DS segment override prefix into lock prefix */
20652- if (*ptr == 0x3e)
20653+ if (*ktla_ktva(ptr) == 0x3e)
20654 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20655 }
20656 mutex_unlock(&text_mutex);
20657@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20658 for (poff = start; poff < end; poff++) {
20659 u8 *ptr = (u8 *)poff + *poff;
20660
20661+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20662+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20663+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20664+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20665+#endif
20666+
20667 if (!*poff || ptr < text || ptr >= text_end)
20668 continue;
20669 /* turn lock prefix into DS segment override prefix */
20670- if (*ptr == 0xf0)
20671+ if (*ktla_ktva(ptr) == 0xf0)
20672 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20673 }
20674 mutex_unlock(&text_mutex);
20675@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20676
20677 BUG_ON(p->len > MAX_PATCH_LEN);
20678 /* prep the buffer with the original instructions */
20679- memcpy(insnbuf, p->instr, p->len);
20680+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20681 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20682 (unsigned long)p->instr, p->len);
20683
20684@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20685 if (!uniproc_patched || num_possible_cpus() == 1)
20686 free_init_pages("SMP alternatives",
20687 (unsigned long)__smp_locks,
20688- (unsigned long)__smp_locks_end);
20689+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20690 #endif
20691
20692 apply_paravirt(__parainstructions, __parainstructions_end);
20693@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20694 * instructions. And on the local CPU you need to be protected again NMI or MCE
20695 * handlers seeing an inconsistent instruction while you patch.
20696 */
20697-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20698+void *__kprobes text_poke_early(void *addr, const void *opcode,
20699 size_t len)
20700 {
20701 unsigned long flags;
20702 local_irq_save(flags);
20703- memcpy(addr, opcode, len);
20704+
20705+ pax_open_kernel();
20706+ memcpy(ktla_ktva(addr), opcode, len);
20707 sync_core();
20708+ pax_close_kernel();
20709+
20710 local_irq_restore(flags);
20711 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20712 that causes hangs on some VIA CPUs. */
20713@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20714 */
20715 void *text_poke(void *addr, const void *opcode, size_t len)
20716 {
20717- unsigned long flags;
20718- char *vaddr;
20719+ unsigned char *vaddr = ktla_ktva(addr);
20720 struct page *pages[2];
20721- int i;
20722+ size_t i;
20723
20724 if (!core_kernel_text((unsigned long)addr)) {
20725- pages[0] = vmalloc_to_page(addr);
20726- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20727+ pages[0] = vmalloc_to_page(vaddr);
20728+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20729 } else {
20730- pages[0] = virt_to_page(addr);
20731+ pages[0] = virt_to_page(vaddr);
20732 WARN_ON(!PageReserved(pages[0]));
20733- pages[1] = virt_to_page(addr + PAGE_SIZE);
20734+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20735 }
20736 BUG_ON(!pages[0]);
20737- local_irq_save(flags);
20738- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20739- if (pages[1])
20740- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20741- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20742- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20743- clear_fixmap(FIX_TEXT_POKE0);
20744- if (pages[1])
20745- clear_fixmap(FIX_TEXT_POKE1);
20746- local_flush_tlb();
20747- sync_core();
20748- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20749- that causes hangs on some VIA CPUs. */
20750+ text_poke_early(addr, opcode, len);
20751 for (i = 0; i < len; i++)
20752- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20753- local_irq_restore(flags);
20754+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20755 return addr;
20756 }
20757
20758@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20759 if (likely(!bp_patching_in_progress))
20760 return 0;
20761
20762- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20763+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20764 return 0;
20765
20766 /* set up the specified breakpoint handler */
20767@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20768 */
20769 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20770 {
20771- unsigned char int3 = 0xcc;
20772+ const unsigned char int3 = 0xcc;
20773
20774 bp_int3_handler = handler;
20775 bp_int3_addr = (u8 *)addr + sizeof(int3);
20776diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20777index ad3639a..bd4253c 100644
20778--- a/arch/x86/kernel/apic/apic.c
20779+++ b/arch/x86/kernel/apic/apic.c
20780@@ -171,7 +171,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20781 /*
20782 * Debug level, exported for io_apic.c
20783 */
20784-unsigned int apic_verbosity;
20785+int apic_verbosity;
20786
20787 int pic_mode;
20788
20789@@ -1918,7 +1918,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20790 apic_write(APIC_ESR, 0);
20791 v = apic_read(APIC_ESR);
20792 ack_APIC_irq();
20793- atomic_inc(&irq_err_count);
20794+ atomic_inc_unchecked(&irq_err_count);
20795
20796 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20797 smp_processor_id(), v);
20798diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20799index de918c4..32eed23 100644
20800--- a/arch/x86/kernel/apic/apic_flat_64.c
20801+++ b/arch/x86/kernel/apic/apic_flat_64.c
20802@@ -154,7 +154,7 @@ static int flat_probe(void)
20803 return 1;
20804 }
20805
20806-static struct apic apic_flat = {
20807+static struct apic apic_flat __read_only = {
20808 .name = "flat",
20809 .probe = flat_probe,
20810 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20811@@ -260,7 +260,7 @@ static int physflat_probe(void)
20812 return 0;
20813 }
20814
20815-static struct apic apic_physflat = {
20816+static struct apic apic_physflat __read_only = {
20817
20818 .name = "physical flat",
20819 .probe = physflat_probe,
20820diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20821index b205cdb..d8503ff 100644
20822--- a/arch/x86/kernel/apic/apic_noop.c
20823+++ b/arch/x86/kernel/apic/apic_noop.c
20824@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20825 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20826 }
20827
20828-struct apic apic_noop = {
20829+struct apic apic_noop __read_only = {
20830 .name = "noop",
20831 .probe = noop_probe,
20832 .acpi_madt_oem_check = NULL,
20833diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20834index c4a8d63..fe893ac 100644
20835--- a/arch/x86/kernel/apic/bigsmp_32.c
20836+++ b/arch/x86/kernel/apic/bigsmp_32.c
20837@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20838 return dmi_bigsmp;
20839 }
20840
20841-static struct apic apic_bigsmp = {
20842+static struct apic apic_bigsmp __read_only = {
20843
20844 .name = "bigsmp",
20845 .probe = probe_bigsmp,
20846diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20847index f4dc246..fbab133 100644
20848--- a/arch/x86/kernel/apic/io_apic.c
20849+++ b/arch/x86/kernel/apic/io_apic.c
20850@@ -1862,7 +1862,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20851 return ret;
20852 }
20853
20854-atomic_t irq_mis_count;
20855+atomic_unchecked_t irq_mis_count;
20856
20857 #ifdef CONFIG_GENERIC_PENDING_IRQ
20858 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20859@@ -2003,7 +2003,7 @@ static void ack_ioapic_level(struct irq_data *data)
20860 * at the cpu.
20861 */
20862 if (!(v & (1 << (i & 0x1f)))) {
20863- atomic_inc(&irq_mis_count);
20864+ atomic_inc_unchecked(&irq_mis_count);
20865
20866 eoi_ioapic_irq(irq, cfg);
20867 }
20868@@ -2011,7 +2011,7 @@ static void ack_ioapic_level(struct irq_data *data)
20869 ioapic_irqd_unmask(data, cfg, masked);
20870 }
20871
20872-static struct irq_chip ioapic_chip __read_mostly = {
20873+static struct irq_chip ioapic_chip = {
20874 .name = "IO-APIC",
20875 .irq_startup = startup_ioapic_irq,
20876 .irq_mask = mask_ioapic_irq,
20877@@ -2070,7 +2070,7 @@ static void ack_lapic_irq(struct irq_data *data)
20878 ack_APIC_irq();
20879 }
20880
20881-static struct irq_chip lapic_chip __read_mostly = {
20882+static struct irq_chip lapic_chip = {
20883 .name = "local-APIC",
20884 .irq_mask = mask_lapic_irq,
20885 .irq_unmask = unmask_lapic_irq,
20886diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20887index bda4886..f9c7195 100644
20888--- a/arch/x86/kernel/apic/probe_32.c
20889+++ b/arch/x86/kernel/apic/probe_32.c
20890@@ -72,7 +72,7 @@ static int probe_default(void)
20891 return 1;
20892 }
20893
20894-static struct apic apic_default = {
20895+static struct apic apic_default __read_only = {
20896
20897 .name = "default",
20898 .probe = probe_default,
20899diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
20900index 6cedd79..023ff8e 100644
20901--- a/arch/x86/kernel/apic/vector.c
20902+++ b/arch/x86/kernel/apic/vector.c
20903@@ -21,7 +21,7 @@
20904
20905 static DEFINE_RAW_SPINLOCK(vector_lock);
20906
20907-void lock_vector_lock(void)
20908+void lock_vector_lock(void) __acquires(vector_lock)
20909 {
20910 /* Used to the online set of cpus does not change
20911 * during assign_irq_vector.
20912@@ -29,7 +29,7 @@ void lock_vector_lock(void)
20913 raw_spin_lock(&vector_lock);
20914 }
20915
20916-void unlock_vector_lock(void)
20917+void unlock_vector_lock(void) __releases(vector_lock)
20918 {
20919 raw_spin_unlock(&vector_lock);
20920 }
20921diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20922index e658f21..b695a1a 100644
20923--- a/arch/x86/kernel/apic/x2apic_cluster.c
20924+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20925@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20926 return notifier_from_errno(err);
20927 }
20928
20929-static struct notifier_block __refdata x2apic_cpu_notifier = {
20930+static struct notifier_block x2apic_cpu_notifier = {
20931 .notifier_call = update_clusterinfo,
20932 };
20933
20934@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20935 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20936 }
20937
20938-static struct apic apic_x2apic_cluster = {
20939+static struct apic apic_x2apic_cluster __read_only = {
20940
20941 .name = "cluster x2apic",
20942 .probe = x2apic_cluster_probe,
20943diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20944index 6fae733..5ca17af 100644
20945--- a/arch/x86/kernel/apic/x2apic_phys.c
20946+++ b/arch/x86/kernel/apic/x2apic_phys.c
20947@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
20948 return apic == &apic_x2apic_phys;
20949 }
20950
20951-static struct apic apic_x2apic_phys = {
20952+static struct apic apic_x2apic_phys __read_only = {
20953
20954 .name = "physical x2apic",
20955 .probe = x2apic_phys_probe,
20956diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20957index 8e9dcfd..c61b3e4 100644
20958--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20959+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20960@@ -348,7 +348,7 @@ static int uv_probe(void)
20961 return apic == &apic_x2apic_uv_x;
20962 }
20963
20964-static struct apic __refdata apic_x2apic_uv_x = {
20965+static struct apic apic_x2apic_uv_x __read_only = {
20966
20967 .name = "UV large system",
20968 .probe = uv_probe,
20969diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20970index 927ec92..de68f32 100644
20971--- a/arch/x86/kernel/apm_32.c
20972+++ b/arch/x86/kernel/apm_32.c
20973@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
20974 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20975 * even though they are called in protected mode.
20976 */
20977-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20978+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20979 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20980
20981 static const char driver_version[] = "1.16ac"; /* no spaces */
20982@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
20983 BUG_ON(cpu != 0);
20984 gdt = get_cpu_gdt_table(cpu);
20985 save_desc_40 = gdt[0x40 / 8];
20986+
20987+ pax_open_kernel();
20988 gdt[0x40 / 8] = bad_bios_desc;
20989+ pax_close_kernel();
20990
20991 apm_irq_save(flags);
20992 APM_DO_SAVE_SEGS;
20993@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
20994 &call->esi);
20995 APM_DO_RESTORE_SEGS;
20996 apm_irq_restore(flags);
20997+
20998+ pax_open_kernel();
20999 gdt[0x40 / 8] = save_desc_40;
21000+ pax_close_kernel();
21001+
21002 put_cpu();
21003
21004 return call->eax & 0xff;
21005@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
21006 BUG_ON(cpu != 0);
21007 gdt = get_cpu_gdt_table(cpu);
21008 save_desc_40 = gdt[0x40 / 8];
21009+
21010+ pax_open_kernel();
21011 gdt[0x40 / 8] = bad_bios_desc;
21012+ pax_close_kernel();
21013
21014 apm_irq_save(flags);
21015 APM_DO_SAVE_SEGS;
21016@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
21017 &call->eax);
21018 APM_DO_RESTORE_SEGS;
21019 apm_irq_restore(flags);
21020+
21021+ pax_open_kernel();
21022 gdt[0x40 / 8] = save_desc_40;
21023+ pax_close_kernel();
21024+
21025 put_cpu();
21026 return error;
21027 }
21028@@ -2039,7 +2053,7 @@ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d)
21029 return 0;
21030 }
21031
21032-static struct dmi_system_id __initdata apm_dmi_table[] = {
21033+static const struct dmi_system_id __initconst apm_dmi_table[] = {
21034 {
21035 print_if_true,
21036 KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.",
21037@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21038 * code to that CPU.
21039 */
21040 gdt = get_cpu_gdt_table(0);
21041+
21042+ pax_open_kernel();
21043 set_desc_base(&gdt[APM_CS >> 3],
21044 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21045 set_desc_base(&gdt[APM_CS_16 >> 3],
21046 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21047 set_desc_base(&gdt[APM_DS >> 3],
21048 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21049+ pax_close_kernel();
21050
21051 proc_create("apm", 0, NULL, &apm_file_ops);
21052
21053diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21054index 9f6b934..cf5ffb3 100644
21055--- a/arch/x86/kernel/asm-offsets.c
21056+++ b/arch/x86/kernel/asm-offsets.c
21057@@ -32,6 +32,8 @@ void common(void) {
21058 OFFSET(TI_flags, thread_info, flags);
21059 OFFSET(TI_status, thread_info, status);
21060 OFFSET(TI_addr_limit, thread_info, addr_limit);
21061+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21062+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21063
21064 BLANK();
21065 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21066@@ -52,8 +54,26 @@ void common(void) {
21067 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21068 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21069 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21070+
21071+#ifdef CONFIG_PAX_KERNEXEC
21072+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21073 #endif
21074
21075+#ifdef CONFIG_PAX_MEMORY_UDEREF
21076+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21077+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21078+#ifdef CONFIG_X86_64
21079+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21080+#endif
21081+#endif
21082+
21083+#endif
21084+
21085+ BLANK();
21086+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21087+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21088+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21089+
21090 #ifdef CONFIG_XEN
21091 BLANK();
21092 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21093diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21094index fdcbb4d..036dd93 100644
21095--- a/arch/x86/kernel/asm-offsets_64.c
21096+++ b/arch/x86/kernel/asm-offsets_64.c
21097@@ -80,6 +80,7 @@ int main(void)
21098 BLANK();
21099 #undef ENTRY
21100
21101+ DEFINE(TSS_size, sizeof(struct tss_struct));
21102 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21103 BLANK();
21104
21105diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21106index 80091ae..0c5184f 100644
21107--- a/arch/x86/kernel/cpu/Makefile
21108+++ b/arch/x86/kernel/cpu/Makefile
21109@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21110 CFLAGS_REMOVE_perf_event.o = -pg
21111 endif
21112
21113-# Make sure load_percpu_segment has no stackprotector
21114-nostackp := $(call cc-option, -fno-stack-protector)
21115-CFLAGS_common.o := $(nostackp)
21116-
21117 obj-y := intel_cacheinfo.o scattered.o topology.o
21118 obj-y += common.o
21119 obj-y += rdrand.o
21120diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21121index a220239..607fc38 100644
21122--- a/arch/x86/kernel/cpu/amd.c
21123+++ b/arch/x86/kernel/cpu/amd.c
21124@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21125 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21126 {
21127 /* AMD errata T13 (order #21922) */
21128- if ((c->x86 == 6)) {
21129+ if (c->x86 == 6) {
21130 /* Duron Rev A0 */
21131 if (c->x86_model == 3 && c->x86_mask == 0)
21132 size = 64;
21133diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21134index 2346c95..c061472 100644
21135--- a/arch/x86/kernel/cpu/common.c
21136+++ b/arch/x86/kernel/cpu/common.c
21137@@ -91,60 +91,6 @@ static const struct cpu_dev default_cpu = {
21138
21139 static const struct cpu_dev *this_cpu = &default_cpu;
21140
21141-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21142-#ifdef CONFIG_X86_64
21143- /*
21144- * We need valid kernel segments for data and code in long mode too
21145- * IRET will check the segment types kkeil 2000/10/28
21146- * Also sysret mandates a special GDT layout
21147- *
21148- * TLS descriptors are currently at a different place compared to i386.
21149- * Hopefully nobody expects them at a fixed place (Wine?)
21150- */
21151- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21152- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21153- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21154- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21155- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21156- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21157-#else
21158- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21159- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21160- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21161- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21162- /*
21163- * Segments used for calling PnP BIOS have byte granularity.
21164- * They code segments and data segments have fixed 64k limits,
21165- * the transfer segment sizes are set at run time.
21166- */
21167- /* 32-bit code */
21168- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21169- /* 16-bit code */
21170- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21171- /* 16-bit data */
21172- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21173- /* 16-bit data */
21174- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21175- /* 16-bit data */
21176- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21177- /*
21178- * The APM segments have byte granularity and their bases
21179- * are set at run time. All have 64k limits.
21180- */
21181- /* 32-bit code */
21182- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21183- /* 16-bit code */
21184- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21185- /* data */
21186- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21187-
21188- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21189- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21190- GDT_STACK_CANARY_INIT
21191-#endif
21192-} };
21193-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21194-
21195 static int __init x86_xsave_setup(char *s)
21196 {
21197 if (strlen(s))
21198@@ -306,6 +252,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21199 }
21200 }
21201
21202+#ifdef CONFIG_X86_64
21203+static __init int setup_disable_pcid(char *arg)
21204+{
21205+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21206+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21207+
21208+#ifdef CONFIG_PAX_MEMORY_UDEREF
21209+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21210+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21211+#endif
21212+
21213+ return 1;
21214+}
21215+__setup("nopcid", setup_disable_pcid);
21216+
21217+static void setup_pcid(struct cpuinfo_x86 *c)
21218+{
21219+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21220+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21221+
21222+#ifdef CONFIG_PAX_MEMORY_UDEREF
21223+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21224+ pax_open_kernel();
21225+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21226+ pax_close_kernel();
21227+ printk("PAX: slow and weak UDEREF enabled\n");
21228+ } else
21229+ printk("PAX: UDEREF disabled\n");
21230+#endif
21231+
21232+ return;
21233+ }
21234+
21235+ printk("PAX: PCID detected\n");
21236+ cr4_set_bits(X86_CR4_PCIDE);
21237+
21238+#ifdef CONFIG_PAX_MEMORY_UDEREF
21239+ pax_open_kernel();
21240+ clone_pgd_mask = ~(pgdval_t)0UL;
21241+ pax_close_kernel();
21242+ if (pax_user_shadow_base)
21243+ printk("PAX: weak UDEREF enabled\n");
21244+ else {
21245+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21246+ printk("PAX: strong UDEREF enabled\n");
21247+ }
21248+#endif
21249+
21250+ if (cpu_has(c, X86_FEATURE_INVPCID))
21251+ printk("PAX: INVPCID detected\n");
21252+}
21253+#endif
21254+
21255 /*
21256 * Some CPU features depend on higher CPUID levels, which may not always
21257 * be available due to CPUID level capping or broken virtualization
21258@@ -406,7 +405,7 @@ void switch_to_new_gdt(int cpu)
21259 {
21260 struct desc_ptr gdt_descr;
21261
21262- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21263+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21264 gdt_descr.size = GDT_SIZE - 1;
21265 load_gdt(&gdt_descr);
21266 /* Reload the per-cpu base */
21267@@ -897,6 +896,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21268 setup_smep(c);
21269 setup_smap(c);
21270
21271+#ifdef CONFIG_X86_32
21272+#ifdef CONFIG_PAX_PAGEEXEC
21273+ if (!(__supported_pte_mask & _PAGE_NX))
21274+ clear_cpu_cap(c, X86_FEATURE_PSE);
21275+#endif
21276+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21277+ clear_cpu_cap(c, X86_FEATURE_SEP);
21278+#endif
21279+#endif
21280+
21281+#ifdef CONFIG_X86_64
21282+ setup_pcid(c);
21283+#endif
21284+
21285 /*
21286 * The vendor-specific functions might have changed features.
21287 * Now we do "generic changes."
21288@@ -979,7 +992,7 @@ static void syscall32_cpu_init(void)
21289 void enable_sep_cpu(void)
21290 {
21291 int cpu = get_cpu();
21292- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21293+ struct tss_struct *tss = init_tss + cpu;
21294
21295 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21296 put_cpu();
21297@@ -1117,14 +1130,16 @@ static __init int setup_disablecpuid(char *arg)
21298 }
21299 __setup("clearcpuid=", setup_disablecpuid);
21300
21301+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21302+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21303+
21304 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21305- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21306+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21307 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21308
21309 #ifdef CONFIG_X86_64
21310-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21311-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21312- (unsigned long) debug_idt_table };
21313+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21314+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21315
21316 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21317 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21318@@ -1307,7 +1322,7 @@ void cpu_init(void)
21319 */
21320 load_ucode_ap();
21321
21322- t = &per_cpu(init_tss, cpu);
21323+ t = init_tss + cpu;
21324 oist = &per_cpu(orig_ist, cpu);
21325
21326 #ifdef CONFIG_NUMA
21327@@ -1339,7 +1354,6 @@ void cpu_init(void)
21328 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21329 barrier();
21330
21331- x86_configure_nx();
21332 x2apic_setup();
21333
21334 /*
21335@@ -1391,7 +1405,7 @@ void cpu_init(void)
21336 {
21337 int cpu = smp_processor_id();
21338 struct task_struct *curr = current;
21339- struct tss_struct *t = &per_cpu(init_tss, cpu);
21340+ struct tss_struct *t = init_tss + cpu;
21341 struct thread_struct *thread = &curr->thread;
21342
21343 wait_for_master_cpu(cpu);
21344diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21345index 6596433..1ad6eaf 100644
21346--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21347+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21348@@ -1024,6 +1024,22 @@ static struct attribute *default_attrs[] = {
21349 };
21350
21351 #ifdef CONFIG_AMD_NB
21352+static struct attribute *default_attrs_amd_nb[] = {
21353+ &type.attr,
21354+ &level.attr,
21355+ &coherency_line_size.attr,
21356+ &physical_line_partition.attr,
21357+ &ways_of_associativity.attr,
21358+ &number_of_sets.attr,
21359+ &size.attr,
21360+ &shared_cpu_map.attr,
21361+ &shared_cpu_list.attr,
21362+ NULL,
21363+ NULL,
21364+ NULL,
21365+ NULL
21366+};
21367+
21368 static struct attribute **amd_l3_attrs(void)
21369 {
21370 static struct attribute **attrs;
21371@@ -1034,18 +1050,7 @@ static struct attribute **amd_l3_attrs(void)
21372
21373 n = ARRAY_SIZE(default_attrs);
21374
21375- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21376- n += 2;
21377-
21378- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21379- n += 1;
21380-
21381- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21382- if (attrs == NULL)
21383- return attrs = default_attrs;
21384-
21385- for (n = 0; default_attrs[n]; n++)
21386- attrs[n] = default_attrs[n];
21387+ attrs = default_attrs_amd_nb;
21388
21389 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21390 attrs[n++] = &cache_disable_0.attr;
21391@@ -1096,6 +1101,13 @@ static struct kobj_type ktype_cache = {
21392 .default_attrs = default_attrs,
21393 };
21394
21395+#ifdef CONFIG_AMD_NB
21396+static struct kobj_type ktype_cache_amd_nb = {
21397+ .sysfs_ops = &sysfs_ops,
21398+ .default_attrs = default_attrs_amd_nb,
21399+};
21400+#endif
21401+
21402 static struct kobj_type ktype_percpu_entry = {
21403 .sysfs_ops = &sysfs_ops,
21404 };
21405@@ -1161,20 +1173,26 @@ static int cache_add_dev(struct device *dev)
21406 return retval;
21407 }
21408
21409+#ifdef CONFIG_AMD_NB
21410+ amd_l3_attrs();
21411+#endif
21412+
21413 for (i = 0; i < num_cache_leaves; i++) {
21414+ struct kobj_type *ktype;
21415+
21416 this_object = INDEX_KOBJECT_PTR(cpu, i);
21417 this_object->cpu = cpu;
21418 this_object->index = i;
21419
21420 this_leaf = CPUID4_INFO_IDX(cpu, i);
21421
21422- ktype_cache.default_attrs = default_attrs;
21423+ ktype = &ktype_cache;
21424 #ifdef CONFIG_AMD_NB
21425 if (this_leaf->base.nb)
21426- ktype_cache.default_attrs = amd_l3_attrs();
21427+ ktype = &ktype_cache_amd_nb;
21428 #endif
21429 retval = kobject_init_and_add(&(this_object->kobj),
21430- &ktype_cache,
21431+ ktype,
21432 per_cpu(ici_cache_kobject, cpu),
21433 "index%1lu", i);
21434 if (unlikely(retval)) {
21435diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21436index 3c036cb..3b5677d 100644
21437--- a/arch/x86/kernel/cpu/mcheck/mce.c
21438+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21439@@ -47,6 +47,7 @@
21440 #include <asm/tlbflush.h>
21441 #include <asm/mce.h>
21442 #include <asm/msr.h>
21443+#include <asm/local.h>
21444
21445 #include "mce-internal.h"
21446
21447@@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
21448 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21449 m->cs, m->ip);
21450
21451- if (m->cs == __KERNEL_CS)
21452+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21453 print_symbol("{%s}", m->ip);
21454 pr_cont("\n");
21455 }
21456@@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
21457
21458 #define PANIC_TIMEOUT 5 /* 5 seconds */
21459
21460-static atomic_t mce_panicked;
21461+static atomic_unchecked_t mce_panicked;
21462
21463 static int fake_panic;
21464-static atomic_t mce_fake_panicked;
21465+static atomic_unchecked_t mce_fake_panicked;
21466
21467 /* Panic in progress. Enable interrupts and wait for final IPI */
21468 static void wait_for_panic(void)
21469@@ -318,7 +319,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21470 /*
21471 * Make sure only one CPU runs in machine check panic
21472 */
21473- if (atomic_inc_return(&mce_panicked) > 1)
21474+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21475 wait_for_panic();
21476 barrier();
21477
21478@@ -326,7 +327,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21479 console_verbose();
21480 } else {
21481 /* Don't log too much for fake panic */
21482- if (atomic_inc_return(&mce_fake_panicked) > 1)
21483+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21484 return;
21485 }
21486 /* First print corrected ones that are still unlogged */
21487@@ -365,7 +366,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21488 if (!fake_panic) {
21489 if (panic_timeout == 0)
21490 panic_timeout = mca_cfg.panic_timeout;
21491- panic(msg);
21492+ panic("%s", msg);
21493 } else
21494 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21495 }
21496@@ -743,7 +744,7 @@ static int mce_timed_out(u64 *t, const char *msg)
21497 * might have been modified by someone else.
21498 */
21499 rmb();
21500- if (atomic_read(&mce_panicked))
21501+ if (atomic_read_unchecked(&mce_panicked))
21502 wait_for_panic();
21503 if (!mca_cfg.monarch_timeout)
21504 goto out;
21505@@ -1669,7 +1670,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21506 }
21507
21508 /* Call the installed machine check handler for this CPU setup. */
21509-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21510+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21511 unexpected_machine_check;
21512
21513 /*
21514@@ -1692,7 +1693,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21515 return;
21516 }
21517
21518+ pax_open_kernel();
21519 machine_check_vector = do_machine_check;
21520+ pax_close_kernel();
21521
21522 __mcheck_cpu_init_generic();
21523 __mcheck_cpu_init_vendor(c);
21524@@ -1706,7 +1709,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21525 */
21526
21527 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21528-static int mce_chrdev_open_count; /* #times opened */
21529+static local_t mce_chrdev_open_count; /* #times opened */
21530 static int mce_chrdev_open_exclu; /* already open exclusive? */
21531
21532 static int mce_chrdev_open(struct inode *inode, struct file *file)
21533@@ -1714,7 +1717,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21534 spin_lock(&mce_chrdev_state_lock);
21535
21536 if (mce_chrdev_open_exclu ||
21537- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21538+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21539 spin_unlock(&mce_chrdev_state_lock);
21540
21541 return -EBUSY;
21542@@ -1722,7 +1725,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21543
21544 if (file->f_flags & O_EXCL)
21545 mce_chrdev_open_exclu = 1;
21546- mce_chrdev_open_count++;
21547+ local_inc(&mce_chrdev_open_count);
21548
21549 spin_unlock(&mce_chrdev_state_lock);
21550
21551@@ -1733,7 +1736,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21552 {
21553 spin_lock(&mce_chrdev_state_lock);
21554
21555- mce_chrdev_open_count--;
21556+ local_dec(&mce_chrdev_open_count);
21557 mce_chrdev_open_exclu = 0;
21558
21559 spin_unlock(&mce_chrdev_state_lock);
21560@@ -2408,7 +2411,7 @@ static __init void mce_init_banks(void)
21561
21562 for (i = 0; i < mca_cfg.banks; i++) {
21563 struct mce_bank *b = &mce_banks[i];
21564- struct device_attribute *a = &b->attr;
21565+ device_attribute_no_const *a = &b->attr;
21566
21567 sysfs_attr_init(&a->attr);
21568 a->attr.name = b->attrname;
21569@@ -2515,7 +2518,7 @@ struct dentry *mce_get_debugfs_dir(void)
21570 static void mce_reset(void)
21571 {
21572 cpu_missing = 0;
21573- atomic_set(&mce_fake_panicked, 0);
21574+ atomic_set_unchecked(&mce_fake_panicked, 0);
21575 atomic_set(&mce_executing, 0);
21576 atomic_set(&mce_callin, 0);
21577 atomic_set(&global_nwo, 0);
21578diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21579index 737b0ad..09ec66e 100644
21580--- a/arch/x86/kernel/cpu/mcheck/p5.c
21581+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21582@@ -12,6 +12,7 @@
21583 #include <asm/tlbflush.h>
21584 #include <asm/mce.h>
21585 #include <asm/msr.h>
21586+#include <asm/pgtable.h>
21587
21588 /* By default disabled */
21589 int mce_p5_enabled __read_mostly;
21590@@ -55,7 +56,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21591 if (!cpu_has(c, X86_FEATURE_MCE))
21592 return;
21593
21594+ pax_open_kernel();
21595 machine_check_vector = pentium_machine_check;
21596+ pax_close_kernel();
21597 /* Make sure the vector pointer is visible before we enable MCEs: */
21598 wmb();
21599
21600diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21601index 44f1382..315b292 100644
21602--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21603+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21604@@ -11,6 +11,7 @@
21605 #include <asm/tlbflush.h>
21606 #include <asm/mce.h>
21607 #include <asm/msr.h>
21608+#include <asm/pgtable.h>
21609
21610 /* Machine check handler for WinChip C6: */
21611 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21612@@ -28,7 +29,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21613 {
21614 u32 lo, hi;
21615
21616+ pax_open_kernel();
21617 machine_check_vector = winchip_machine_check;
21618+ pax_close_kernel();
21619 /* Make sure the vector pointer is visible before we enable MCEs: */
21620 wmb();
21621
21622diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21623index 36a8361..e7058c2 100644
21624--- a/arch/x86/kernel/cpu/microcode/core.c
21625+++ b/arch/x86/kernel/cpu/microcode/core.c
21626@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21627 return NOTIFY_OK;
21628 }
21629
21630-static struct notifier_block __refdata mc_cpu_notifier = {
21631+static struct notifier_block mc_cpu_notifier = {
21632 .notifier_call = mc_cpu_callback,
21633 };
21634
21635diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21636index 746e7fd..8dc677e 100644
21637--- a/arch/x86/kernel/cpu/microcode/intel.c
21638+++ b/arch/x86/kernel/cpu/microcode/intel.c
21639@@ -298,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21640
21641 static int get_ucode_user(void *to, const void *from, size_t n)
21642 {
21643- return copy_from_user(to, from, n);
21644+ return copy_from_user(to, (const void __force_user *)from, n);
21645 }
21646
21647 static enum ucode_state
21648 request_microcode_user(int cpu, const void __user *buf, size_t size)
21649 {
21650- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21651+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21652 }
21653
21654 static void microcode_fini_cpu(int cpu)
21655diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21656index ea5f363..cb0e905 100644
21657--- a/arch/x86/kernel/cpu/mtrr/main.c
21658+++ b/arch/x86/kernel/cpu/mtrr/main.c
21659@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21660 u64 size_or_mask, size_and_mask;
21661 static bool mtrr_aps_delayed_init;
21662
21663-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21664+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21665
21666 const struct mtrr_ops *mtrr_if;
21667
21668diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21669index df5e41f..816c719 100644
21670--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21671+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21672@@ -25,7 +25,7 @@ struct mtrr_ops {
21673 int (*validate_add_page)(unsigned long base, unsigned long size,
21674 unsigned int type);
21675 int (*have_wrcomb)(void);
21676-};
21677+} __do_const;
21678
21679 extern int generic_get_free_region(unsigned long base, unsigned long size,
21680 int replace_reg);
21681diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21682index b71a7f8..534af0e 100644
21683--- a/arch/x86/kernel/cpu/perf_event.c
21684+++ b/arch/x86/kernel/cpu/perf_event.c
21685@@ -1376,7 +1376,7 @@ static void __init pmu_check_apic(void)
21686
21687 }
21688
21689-static struct attribute_group x86_pmu_format_group = {
21690+static attribute_group_no_const x86_pmu_format_group = {
21691 .name = "format",
21692 .attrs = NULL,
21693 };
21694@@ -1475,7 +1475,7 @@ static struct attribute *events_attr[] = {
21695 NULL,
21696 };
21697
21698-static struct attribute_group x86_pmu_events_group = {
21699+static attribute_group_no_const x86_pmu_events_group = {
21700 .name = "events",
21701 .attrs = events_attr,
21702 };
21703@@ -2037,7 +2037,7 @@ static unsigned long get_segment_base(unsigned int segment)
21704 if (idx > GDT_ENTRIES)
21705 return 0;
21706
21707- desc = raw_cpu_ptr(gdt_page.gdt);
21708+ desc = get_cpu_gdt_table(smp_processor_id());
21709 }
21710
21711 return get_desc_base(desc + idx);
21712@@ -2127,7 +2127,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21713 break;
21714
21715 perf_callchain_store(entry, frame.return_address);
21716- fp = frame.next_frame;
21717+ fp = (const void __force_user *)frame.next_frame;
21718 }
21719 }
21720
21721diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21722index 97242a9..cf9c30e 100644
21723--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21724+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21725@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21726 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21727 {
21728 struct attribute **attrs;
21729- struct attribute_group *attr_group;
21730+ attribute_group_no_const *attr_group;
21731 int i = 0, j;
21732
21733 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21734diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21735index 2589906..1ca1000 100644
21736--- a/arch/x86/kernel/cpu/perf_event_intel.c
21737+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21738@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21739 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21740
21741 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21742- u64 capabilities;
21743+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21744
21745- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21746- x86_pmu.intel_cap.capabilities = capabilities;
21747+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21748+ x86_pmu.intel_cap.capabilities = capabilities;
21749 }
21750
21751 intel_ds_init();
21752diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21753index c4bb8b8..9f7384d 100644
21754--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21755+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21756@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21757 NULL,
21758 };
21759
21760-static struct attribute_group rapl_pmu_events_group = {
21761+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21762 .name = "events",
21763 .attrs = NULL, /* patched at runtime */
21764 };
21765diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21766index c635b8b..b78835e 100644
21767--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21768+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21769@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21770 static int __init uncore_type_init(struct intel_uncore_type *type)
21771 {
21772 struct intel_uncore_pmu *pmus;
21773- struct attribute_group *attr_group;
21774+ attribute_group_no_const *attr_group;
21775 struct attribute **attrs;
21776 int i, j;
21777
21778diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21779index 6c8c1e7..515b98a 100644
21780--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21781+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21782@@ -114,7 +114,7 @@ struct intel_uncore_box {
21783 struct uncore_event_desc {
21784 struct kobj_attribute attr;
21785 const char *config;
21786-};
21787+} __do_const;
21788
21789 ssize_t uncore_event_show(struct kobject *kobj,
21790 struct kobj_attribute *attr, char *buf);
21791diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21792index 83741a7..bd3507d 100644
21793--- a/arch/x86/kernel/cpuid.c
21794+++ b/arch/x86/kernel/cpuid.c
21795@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21796 return notifier_from_errno(err);
21797 }
21798
21799-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21800+static struct notifier_block cpuid_class_cpu_notifier =
21801 {
21802 .notifier_call = cpuid_class_cpu_callback,
21803 };
21804diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21805index aceb2f9..c76d3e3 100644
21806--- a/arch/x86/kernel/crash.c
21807+++ b/arch/x86/kernel/crash.c
21808@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21809 #ifdef CONFIG_X86_32
21810 struct pt_regs fixed_regs;
21811
21812- if (!user_mode_vm(regs)) {
21813+ if (!user_mode(regs)) {
21814 crash_fixup_ss_esp(&fixed_regs, regs);
21815 regs = &fixed_regs;
21816 }
21817diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21818index afa64ad..dce67dd 100644
21819--- a/arch/x86/kernel/crash_dump_64.c
21820+++ b/arch/x86/kernel/crash_dump_64.c
21821@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21822 return -ENOMEM;
21823
21824 if (userbuf) {
21825- if (copy_to_user(buf, vaddr + offset, csize)) {
21826+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21827 iounmap(vaddr);
21828 return -EFAULT;
21829 }
21830diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21831index f6dfd93..892ade4 100644
21832--- a/arch/x86/kernel/doublefault.c
21833+++ b/arch/x86/kernel/doublefault.c
21834@@ -12,7 +12,7 @@
21835
21836 #define DOUBLEFAULT_STACKSIZE (1024)
21837 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21838-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21839+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21840
21841 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21842
21843@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21844 unsigned long gdt, tss;
21845
21846 native_store_gdt(&gdt_desc);
21847- gdt = gdt_desc.address;
21848+ gdt = (unsigned long)gdt_desc.address;
21849
21850 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21851
21852@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21853 /* 0x2 bit is always set */
21854 .flags = X86_EFLAGS_SF | 0x2,
21855 .sp = STACK_START,
21856- .es = __USER_DS,
21857+ .es = __KERNEL_DS,
21858 .cs = __KERNEL_CS,
21859 .ss = __KERNEL_DS,
21860- .ds = __USER_DS,
21861+ .ds = __KERNEL_DS,
21862 .fs = __KERNEL_PERCPU,
21863
21864 .__cr3 = __pa_nodebug(swapper_pg_dir),
21865diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21866index cf3df1d..b637d9a 100644
21867--- a/arch/x86/kernel/dumpstack.c
21868+++ b/arch/x86/kernel/dumpstack.c
21869@@ -2,6 +2,9 @@
21870 * Copyright (C) 1991, 1992 Linus Torvalds
21871 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21872 */
21873+#ifdef CONFIG_GRKERNSEC_HIDESYM
21874+#define __INCLUDED_BY_HIDESYM 1
21875+#endif
21876 #include <linux/kallsyms.h>
21877 #include <linux/kprobes.h>
21878 #include <linux/uaccess.h>
21879@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
21880
21881 void printk_address(unsigned long address)
21882 {
21883- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
21884+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
21885 }
21886
21887 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
21888 static void
21889 print_ftrace_graph_addr(unsigned long addr, void *data,
21890 const struct stacktrace_ops *ops,
21891- struct thread_info *tinfo, int *graph)
21892+ struct task_struct *task, int *graph)
21893 {
21894- struct task_struct *task;
21895 unsigned long ret_addr;
21896 int index;
21897
21898 if (addr != (unsigned long)return_to_handler)
21899 return;
21900
21901- task = tinfo->task;
21902 index = task->curr_ret_stack;
21903
21904 if (!task->ret_stack || index < *graph)
21905@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21906 static inline void
21907 print_ftrace_graph_addr(unsigned long addr, void *data,
21908 const struct stacktrace_ops *ops,
21909- struct thread_info *tinfo, int *graph)
21910+ struct task_struct *task, int *graph)
21911 { }
21912 #endif
21913
21914@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21915 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21916 */
21917
21918-static inline int valid_stack_ptr(struct thread_info *tinfo,
21919- void *p, unsigned int size, void *end)
21920+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21921 {
21922- void *t = tinfo;
21923 if (end) {
21924 if (p < end && p >= (end-THREAD_SIZE))
21925 return 1;
21926@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21927 }
21928
21929 unsigned long
21930-print_context_stack(struct thread_info *tinfo,
21931+print_context_stack(struct task_struct *task, void *stack_start,
21932 unsigned long *stack, unsigned long bp,
21933 const struct stacktrace_ops *ops, void *data,
21934 unsigned long *end, int *graph)
21935 {
21936 struct stack_frame *frame = (struct stack_frame *)bp;
21937
21938- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21939+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21940 unsigned long addr;
21941
21942 addr = *stack;
21943@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21944 } else {
21945 ops->address(data, addr, 0);
21946 }
21947- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21948+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21949 }
21950 stack++;
21951 }
21952@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21953 EXPORT_SYMBOL_GPL(print_context_stack);
21954
21955 unsigned long
21956-print_context_stack_bp(struct thread_info *tinfo,
21957+print_context_stack_bp(struct task_struct *task, void *stack_start,
21958 unsigned long *stack, unsigned long bp,
21959 const struct stacktrace_ops *ops, void *data,
21960 unsigned long *end, int *graph)
21961@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21962 struct stack_frame *frame = (struct stack_frame *)bp;
21963 unsigned long *ret_addr = &frame->return_address;
21964
21965- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21966+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21967 unsigned long addr = *ret_addr;
21968
21969 if (!__kernel_text_address(addr))
21970@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21971 ops->address(data, addr, 1);
21972 frame = frame->next_frame;
21973 ret_addr = &frame->return_address;
21974- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21975+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21976 }
21977
21978 return (unsigned long)frame;
21979@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
21980 static void print_trace_address(void *data, unsigned long addr, int reliable)
21981 {
21982 touch_nmi_watchdog();
21983- printk(data);
21984+ printk("%s", (char *)data);
21985 printk_stack_address(addr, reliable);
21986 }
21987
21988@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
21989 EXPORT_SYMBOL_GPL(oops_begin);
21990 NOKPROBE_SYMBOL(oops_begin);
21991
21992+extern void gr_handle_kernel_exploit(void);
21993+
21994 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21995 {
21996 if (regs && kexec_should_crash(current))
21997@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21998 panic("Fatal exception in interrupt");
21999 if (panic_on_oops)
22000 panic("Fatal exception");
22001- do_exit(signr);
22002+
22003+ gr_handle_kernel_exploit();
22004+
22005+ do_group_exit(signr);
22006 }
22007 NOKPROBE_SYMBOL(oops_end);
22008
22009@@ -278,7 +282,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22010 print_modules();
22011 show_regs(regs);
22012 #ifdef CONFIG_X86_32
22013- if (user_mode_vm(regs)) {
22014+ if (user_mode(regs)) {
22015 sp = regs->sp;
22016 ss = regs->ss & 0xffff;
22017 } else {
22018@@ -307,7 +311,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22019 unsigned long flags = oops_begin();
22020 int sig = SIGSEGV;
22021
22022- if (!user_mode_vm(regs))
22023+ if (!user_mode(regs))
22024 report_bug(regs->ip, regs);
22025
22026 if (__die(str, regs, err))
22027diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22028index 5abd4cd..c65733b 100644
22029--- a/arch/x86/kernel/dumpstack_32.c
22030+++ b/arch/x86/kernel/dumpstack_32.c
22031@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22032 bp = stack_frame(task, regs);
22033
22034 for (;;) {
22035- struct thread_info *context;
22036+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22037 void *end_stack;
22038
22039 end_stack = is_hardirq_stack(stack, cpu);
22040 if (!end_stack)
22041 end_stack = is_softirq_stack(stack, cpu);
22042
22043- context = task_thread_info(task);
22044- bp = ops->walk_stack(context, stack, bp, ops, data,
22045+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22046 end_stack, &graph);
22047
22048 /* Stop if not on irq stack */
22049@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22050 int i;
22051
22052 show_regs_print_info(KERN_EMERG);
22053- __show_regs(regs, !user_mode_vm(regs));
22054+ __show_regs(regs, !user_mode(regs));
22055
22056 /*
22057 * When in-kernel, we also print out the stack and code at the
22058 * time of the fault..
22059 */
22060- if (!user_mode_vm(regs)) {
22061+ if (!user_mode(regs)) {
22062 unsigned int code_prologue = code_bytes * 43 / 64;
22063 unsigned int code_len = code_bytes;
22064 unsigned char c;
22065 u8 *ip;
22066+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22067
22068 pr_emerg("Stack:\n");
22069 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22070
22071 pr_emerg("Code:");
22072
22073- ip = (u8 *)regs->ip - code_prologue;
22074+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22075 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22076 /* try starting at IP */
22077- ip = (u8 *)regs->ip;
22078+ ip = (u8 *)regs->ip + cs_base;
22079 code_len = code_len - code_prologue + 1;
22080 }
22081 for (i = 0; i < code_len; i++, ip++) {
22082@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22083 pr_cont(" Bad EIP value.");
22084 break;
22085 }
22086- if (ip == (u8 *)regs->ip)
22087+ if (ip == (u8 *)regs->ip + cs_base)
22088 pr_cont(" <%02x>", c);
22089 else
22090 pr_cont(" %02x", c);
22091@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22092 {
22093 unsigned short ud2;
22094
22095+ ip = ktla_ktva(ip);
22096 if (ip < PAGE_OFFSET)
22097 return 0;
22098 if (probe_kernel_address((unsigned short *)ip, ud2))
22099@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22100
22101 return ud2 == 0x0b0f;
22102 }
22103+
22104+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22105+void pax_check_alloca(unsigned long size)
22106+{
22107+ unsigned long sp = (unsigned long)&sp, stack_left;
22108+
22109+ /* all kernel stacks are of the same size */
22110+ stack_left = sp & (THREAD_SIZE - 1);
22111+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22112+}
22113+EXPORT_SYMBOL(pax_check_alloca);
22114+#endif
22115diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22116index ff86f19..73eabf4 100644
22117--- a/arch/x86/kernel/dumpstack_64.c
22118+++ b/arch/x86/kernel/dumpstack_64.c
22119@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22120 const struct stacktrace_ops *ops, void *data)
22121 {
22122 const unsigned cpu = get_cpu();
22123- struct thread_info *tinfo;
22124 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22125 unsigned long dummy;
22126 unsigned used = 0;
22127 int graph = 0;
22128 int done = 0;
22129+ void *stack_start;
22130
22131 if (!task)
22132 task = current;
22133@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22134 * current stack address. If the stacks consist of nested
22135 * exceptions
22136 */
22137- tinfo = task_thread_info(task);
22138 while (!done) {
22139 unsigned long *stack_end;
22140 enum stack_type stype;
22141@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22142 if (ops->stack(data, id) < 0)
22143 break;
22144
22145- bp = ops->walk_stack(tinfo, stack, bp, ops,
22146+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22147 data, stack_end, &graph);
22148 ops->stack(data, "<EOE>");
22149 /*
22150@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22151 * second-to-last pointer (index -2 to end) in the
22152 * exception stack:
22153 */
22154+ if ((u16)stack_end[-1] != __KERNEL_DS)
22155+ goto out;
22156 stack = (unsigned long *) stack_end[-2];
22157 done = 0;
22158 break;
22159@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22160
22161 if (ops->stack(data, "IRQ") < 0)
22162 break;
22163- bp = ops->walk_stack(tinfo, stack, bp,
22164+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22165 ops, data, stack_end, &graph);
22166 /*
22167 * We link to the next stack (which would be
22168@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22169 /*
22170 * This handles the process stack:
22171 */
22172- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22173+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22174+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22175+out:
22176 put_cpu();
22177 }
22178 EXPORT_SYMBOL(dump_trace);
22179@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22180 {
22181 unsigned short ud2;
22182
22183- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22184+ if (probe_kernel_address((unsigned short *)ip, ud2))
22185 return 0;
22186
22187 return ud2 == 0x0b0f;
22188 }
22189+
22190+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22191+void pax_check_alloca(unsigned long size)
22192+{
22193+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22194+ unsigned cpu, used;
22195+ char *id;
22196+
22197+ /* check the process stack first */
22198+ stack_start = (unsigned long)task_stack_page(current);
22199+ stack_end = stack_start + THREAD_SIZE;
22200+ if (likely(stack_start <= sp && sp < stack_end)) {
22201+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22202+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22203+ return;
22204+ }
22205+
22206+ cpu = get_cpu();
22207+
22208+ /* check the irq stacks */
22209+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22210+ stack_start = stack_end - IRQ_STACK_SIZE;
22211+ if (stack_start <= sp && sp < stack_end) {
22212+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22213+ put_cpu();
22214+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22215+ return;
22216+ }
22217+
22218+ /* check the exception stacks */
22219+ used = 0;
22220+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22221+ stack_start = stack_end - EXCEPTION_STKSZ;
22222+ if (stack_end && stack_start <= sp && sp < stack_end) {
22223+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22224+ put_cpu();
22225+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22226+ return;
22227+ }
22228+
22229+ put_cpu();
22230+
22231+ /* unknown stack */
22232+ BUG();
22233+}
22234+EXPORT_SYMBOL(pax_check_alloca);
22235+#endif
22236diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22237index 46201de..ebffabf 100644
22238--- a/arch/x86/kernel/e820.c
22239+++ b/arch/x86/kernel/e820.c
22240@@ -794,8 +794,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22241
22242 static void early_panic(char *msg)
22243 {
22244- early_printk(msg);
22245- panic(msg);
22246+ early_printk("%s", msg);
22247+ panic("%s", msg);
22248 }
22249
22250 static int userdef __initdata;
22251diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22252index a62536a..8444df4 100644
22253--- a/arch/x86/kernel/early_printk.c
22254+++ b/arch/x86/kernel/early_printk.c
22255@@ -7,6 +7,7 @@
22256 #include <linux/pci_regs.h>
22257 #include <linux/pci_ids.h>
22258 #include <linux/errno.h>
22259+#include <linux/sched.h>
22260 #include <asm/io.h>
22261 #include <asm/processor.h>
22262 #include <asm/fcntl.h>
22263diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22264index 31e2d5b..b31c76d 100644
22265--- a/arch/x86/kernel/entry_32.S
22266+++ b/arch/x86/kernel/entry_32.S
22267@@ -177,13 +177,154 @@
22268 /*CFI_REL_OFFSET gs, PT_GS*/
22269 .endm
22270 .macro SET_KERNEL_GS reg
22271+
22272+#ifdef CONFIG_CC_STACKPROTECTOR
22273 movl $(__KERNEL_STACK_CANARY), \reg
22274+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22275+ movl $(__USER_DS), \reg
22276+#else
22277+ xorl \reg, \reg
22278+#endif
22279+
22280 movl \reg, %gs
22281 .endm
22282
22283 #endif /* CONFIG_X86_32_LAZY_GS */
22284
22285-.macro SAVE_ALL
22286+.macro pax_enter_kernel
22287+#ifdef CONFIG_PAX_KERNEXEC
22288+ call pax_enter_kernel
22289+#endif
22290+.endm
22291+
22292+.macro pax_exit_kernel
22293+#ifdef CONFIG_PAX_KERNEXEC
22294+ call pax_exit_kernel
22295+#endif
22296+.endm
22297+
22298+#ifdef CONFIG_PAX_KERNEXEC
22299+ENTRY(pax_enter_kernel)
22300+#ifdef CONFIG_PARAVIRT
22301+ pushl %eax
22302+ pushl %ecx
22303+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22304+ mov %eax, %esi
22305+#else
22306+ mov %cr0, %esi
22307+#endif
22308+ bts $16, %esi
22309+ jnc 1f
22310+ mov %cs, %esi
22311+ cmp $__KERNEL_CS, %esi
22312+ jz 3f
22313+ ljmp $__KERNEL_CS, $3f
22314+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22315+2:
22316+#ifdef CONFIG_PARAVIRT
22317+ mov %esi, %eax
22318+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22319+#else
22320+ mov %esi, %cr0
22321+#endif
22322+3:
22323+#ifdef CONFIG_PARAVIRT
22324+ popl %ecx
22325+ popl %eax
22326+#endif
22327+ ret
22328+ENDPROC(pax_enter_kernel)
22329+
22330+ENTRY(pax_exit_kernel)
22331+#ifdef CONFIG_PARAVIRT
22332+ pushl %eax
22333+ pushl %ecx
22334+#endif
22335+ mov %cs, %esi
22336+ cmp $__KERNEXEC_KERNEL_CS, %esi
22337+ jnz 2f
22338+#ifdef CONFIG_PARAVIRT
22339+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22340+ mov %eax, %esi
22341+#else
22342+ mov %cr0, %esi
22343+#endif
22344+ btr $16, %esi
22345+ ljmp $__KERNEL_CS, $1f
22346+1:
22347+#ifdef CONFIG_PARAVIRT
22348+ mov %esi, %eax
22349+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22350+#else
22351+ mov %esi, %cr0
22352+#endif
22353+2:
22354+#ifdef CONFIG_PARAVIRT
22355+ popl %ecx
22356+ popl %eax
22357+#endif
22358+ ret
22359+ENDPROC(pax_exit_kernel)
22360+#endif
22361+
22362+ .macro pax_erase_kstack
22363+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22364+ call pax_erase_kstack
22365+#endif
22366+ .endm
22367+
22368+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22369+/*
22370+ * ebp: thread_info
22371+ */
22372+ENTRY(pax_erase_kstack)
22373+ pushl %edi
22374+ pushl %ecx
22375+ pushl %eax
22376+
22377+ mov TI_lowest_stack(%ebp), %edi
22378+ mov $-0xBEEF, %eax
22379+ std
22380+
22381+1: mov %edi, %ecx
22382+ and $THREAD_SIZE_asm - 1, %ecx
22383+ shr $2, %ecx
22384+ repne scasl
22385+ jecxz 2f
22386+
22387+ cmp $2*16, %ecx
22388+ jc 2f
22389+
22390+ mov $2*16, %ecx
22391+ repe scasl
22392+ jecxz 2f
22393+ jne 1b
22394+
22395+2: cld
22396+ or $2*4, %edi
22397+ mov %esp, %ecx
22398+ sub %edi, %ecx
22399+
22400+ cmp $THREAD_SIZE_asm, %ecx
22401+ jb 3f
22402+ ud2
22403+3:
22404+
22405+ shr $2, %ecx
22406+ rep stosl
22407+
22408+ mov TI_task_thread_sp0(%ebp), %edi
22409+ sub $128, %edi
22410+ mov %edi, TI_lowest_stack(%ebp)
22411+
22412+ popl %eax
22413+ popl %ecx
22414+ popl %edi
22415+ ret
22416+ENDPROC(pax_erase_kstack)
22417+#endif
22418+
22419+.macro __SAVE_ALL _DS
22420 cld
22421 PUSH_GS
22422 pushl_cfi %fs
22423@@ -206,7 +347,7 @@
22424 CFI_REL_OFFSET ecx, 0
22425 pushl_cfi %ebx
22426 CFI_REL_OFFSET ebx, 0
22427- movl $(__USER_DS), %edx
22428+ movl $\_DS, %edx
22429 movl %edx, %ds
22430 movl %edx, %es
22431 movl $(__KERNEL_PERCPU), %edx
22432@@ -214,6 +355,15 @@
22433 SET_KERNEL_GS %edx
22434 .endm
22435
22436+.macro SAVE_ALL
22437+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22438+ __SAVE_ALL __KERNEL_DS
22439+ pax_enter_kernel
22440+#else
22441+ __SAVE_ALL __USER_DS
22442+#endif
22443+.endm
22444+
22445 .macro RESTORE_INT_REGS
22446 popl_cfi %ebx
22447 CFI_RESTORE ebx
22448@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22449 popfl_cfi
22450 jmp syscall_exit
22451 CFI_ENDPROC
22452-END(ret_from_fork)
22453+ENDPROC(ret_from_fork)
22454
22455 ENTRY(ret_from_kernel_thread)
22456 CFI_STARTPROC
22457@@ -340,7 +490,15 @@ ret_from_intr:
22458 andl $SEGMENT_RPL_MASK, %eax
22459 #endif
22460 cmpl $USER_RPL, %eax
22461+
22462+#ifdef CONFIG_PAX_KERNEXEC
22463+ jae resume_userspace
22464+
22465+ pax_exit_kernel
22466+ jmp resume_kernel
22467+#else
22468 jb resume_kernel # not returning to v8086 or userspace
22469+#endif
22470
22471 ENTRY(resume_userspace)
22472 LOCKDEP_SYS_EXIT
22473@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22474 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22475 # int/exception return?
22476 jne work_pending
22477- jmp restore_all
22478-END(ret_from_exception)
22479+ jmp restore_all_pax
22480+ENDPROC(ret_from_exception)
22481
22482 #ifdef CONFIG_PREEMPT
22483 ENTRY(resume_kernel)
22484@@ -365,7 +523,7 @@ need_resched:
22485 jz restore_all
22486 call preempt_schedule_irq
22487 jmp need_resched
22488-END(resume_kernel)
22489+ENDPROC(resume_kernel)
22490 #endif
22491 CFI_ENDPROC
22492
22493@@ -395,30 +553,45 @@ sysenter_past_esp:
22494 /*CFI_REL_OFFSET cs, 0*/
22495 /*
22496 * Push current_thread_info()->sysenter_return to the stack.
22497- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22498- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22499 */
22500- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22501+ pushl_cfi $0
22502 CFI_REL_OFFSET eip, 0
22503
22504 pushl_cfi %eax
22505 SAVE_ALL
22506+ GET_THREAD_INFO(%ebp)
22507+ movl TI_sysenter_return(%ebp),%ebp
22508+ movl %ebp,PT_EIP(%esp)
22509 ENABLE_INTERRUPTS(CLBR_NONE)
22510
22511 /*
22512 * Load the potential sixth argument from user stack.
22513 * Careful about security.
22514 */
22515+ movl PT_OLDESP(%esp),%ebp
22516+
22517+#ifdef CONFIG_PAX_MEMORY_UDEREF
22518+ mov PT_OLDSS(%esp),%ds
22519+1: movl %ds:(%ebp),%ebp
22520+ push %ss
22521+ pop %ds
22522+#else
22523 cmpl $__PAGE_OFFSET-3,%ebp
22524 jae syscall_fault
22525 ASM_STAC
22526 1: movl (%ebp),%ebp
22527 ASM_CLAC
22528+#endif
22529+
22530 movl %ebp,PT_EBP(%esp)
22531 _ASM_EXTABLE(1b,syscall_fault)
22532
22533 GET_THREAD_INFO(%ebp)
22534
22535+#ifdef CONFIG_PAX_RANDKSTACK
22536+ pax_erase_kstack
22537+#endif
22538+
22539 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22540 jnz sysenter_audit
22541 sysenter_do_call:
22542@@ -434,12 +607,24 @@ sysenter_after_call:
22543 testl $_TIF_ALLWORK_MASK, %ecx
22544 jne sysexit_audit
22545 sysenter_exit:
22546+
22547+#ifdef CONFIG_PAX_RANDKSTACK
22548+ pushl_cfi %eax
22549+ movl %esp, %eax
22550+ call pax_randomize_kstack
22551+ popl_cfi %eax
22552+#endif
22553+
22554+ pax_erase_kstack
22555+
22556 /* if something modifies registers it must also disable sysexit */
22557 movl PT_EIP(%esp), %edx
22558 movl PT_OLDESP(%esp), %ecx
22559 xorl %ebp,%ebp
22560 TRACE_IRQS_ON
22561 1: mov PT_FS(%esp), %fs
22562+2: mov PT_DS(%esp), %ds
22563+3: mov PT_ES(%esp), %es
22564 PTGS_TO_GS
22565 ENABLE_INTERRUPTS_SYSEXIT
22566
22567@@ -453,6 +638,9 @@ sysenter_audit:
22568 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22569 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22570 call __audit_syscall_entry
22571+
22572+ pax_erase_kstack
22573+
22574 popl_cfi %ecx /* get that remapped edx off the stack */
22575 popl_cfi %ecx /* get that remapped esi off the stack */
22576 movl PT_EAX(%esp),%eax /* reload syscall number */
22577@@ -479,10 +667,16 @@ sysexit_audit:
22578
22579 CFI_ENDPROC
22580 .pushsection .fixup,"ax"
22581-2: movl $0,PT_FS(%esp)
22582+4: movl $0,PT_FS(%esp)
22583+ jmp 1b
22584+5: movl $0,PT_DS(%esp)
22585+ jmp 1b
22586+6: movl $0,PT_ES(%esp)
22587 jmp 1b
22588 .popsection
22589- _ASM_EXTABLE(1b,2b)
22590+ _ASM_EXTABLE(1b,4b)
22591+ _ASM_EXTABLE(2b,5b)
22592+ _ASM_EXTABLE(3b,6b)
22593 PTGS_TO_GS_EX
22594 ENDPROC(ia32_sysenter_target)
22595
22596@@ -493,6 +687,11 @@ ENTRY(system_call)
22597 pushl_cfi %eax # save orig_eax
22598 SAVE_ALL
22599 GET_THREAD_INFO(%ebp)
22600+
22601+#ifdef CONFIG_PAX_RANDKSTACK
22602+ pax_erase_kstack
22603+#endif
22604+
22605 # system call tracing in operation / emulation
22606 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22607 jnz syscall_trace_entry
22608@@ -512,6 +711,15 @@ syscall_exit:
22609 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22610 jne syscall_exit_work
22611
22612+restore_all_pax:
22613+
22614+#ifdef CONFIG_PAX_RANDKSTACK
22615+ movl %esp, %eax
22616+ call pax_randomize_kstack
22617+#endif
22618+
22619+ pax_erase_kstack
22620+
22621 restore_all:
22622 TRACE_IRQS_IRET
22623 restore_all_notrace:
22624@@ -566,14 +774,34 @@ ldt_ss:
22625 * compensating for the offset by changing to the ESPFIX segment with
22626 * a base address that matches for the difference.
22627 */
22628-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22629+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22630 mov %esp, %edx /* load kernel esp */
22631 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22632 mov %dx, %ax /* eax: new kernel esp */
22633 sub %eax, %edx /* offset (low word is 0) */
22634+#ifdef CONFIG_SMP
22635+ movl PER_CPU_VAR(cpu_number), %ebx
22636+ shll $PAGE_SHIFT_asm, %ebx
22637+ addl $cpu_gdt_table, %ebx
22638+#else
22639+ movl $cpu_gdt_table, %ebx
22640+#endif
22641 shr $16, %edx
22642- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22643- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22644+
22645+#ifdef CONFIG_PAX_KERNEXEC
22646+ mov %cr0, %esi
22647+ btr $16, %esi
22648+ mov %esi, %cr0
22649+#endif
22650+
22651+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22652+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22653+
22654+#ifdef CONFIG_PAX_KERNEXEC
22655+ bts $16, %esi
22656+ mov %esi, %cr0
22657+#endif
22658+
22659 pushl_cfi $__ESPFIX_SS
22660 pushl_cfi %eax /* new kernel esp */
22661 /* Disable interrupts, but do not irqtrace this section: we
22662@@ -603,20 +831,18 @@ work_resched:
22663 movl TI_flags(%ebp), %ecx
22664 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22665 # than syscall tracing?
22666- jz restore_all
22667+ jz restore_all_pax
22668 testb $_TIF_NEED_RESCHED, %cl
22669 jnz work_resched
22670
22671 work_notifysig: # deal with pending signals and
22672 # notify-resume requests
22673+ movl %esp, %eax
22674 #ifdef CONFIG_VM86
22675 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22676- movl %esp, %eax
22677 jne work_notifysig_v86 # returning to kernel-space or
22678 # vm86-space
22679 1:
22680-#else
22681- movl %esp, %eax
22682 #endif
22683 TRACE_IRQS_ON
22684 ENABLE_INTERRUPTS(CLBR_NONE)
22685@@ -637,7 +863,7 @@ work_notifysig_v86:
22686 movl %eax, %esp
22687 jmp 1b
22688 #endif
22689-END(work_pending)
22690+ENDPROC(work_pending)
22691
22692 # perform syscall exit tracing
22693 ALIGN
22694@@ -645,11 +871,14 @@ syscall_trace_entry:
22695 movl $-ENOSYS,PT_EAX(%esp)
22696 movl %esp, %eax
22697 call syscall_trace_enter
22698+
22699+ pax_erase_kstack
22700+
22701 /* What it returned is what we'll actually use. */
22702 cmpl $(NR_syscalls), %eax
22703 jnae syscall_call
22704 jmp syscall_exit
22705-END(syscall_trace_entry)
22706+ENDPROC(syscall_trace_entry)
22707
22708 # perform syscall exit tracing
22709 ALIGN
22710@@ -662,26 +891,30 @@ syscall_exit_work:
22711 movl %esp, %eax
22712 call syscall_trace_leave
22713 jmp resume_userspace
22714-END(syscall_exit_work)
22715+ENDPROC(syscall_exit_work)
22716 CFI_ENDPROC
22717
22718 RING0_INT_FRAME # can't unwind into user space anyway
22719 syscall_fault:
22720+#ifdef CONFIG_PAX_MEMORY_UDEREF
22721+ push %ss
22722+ pop %ds
22723+#endif
22724 ASM_CLAC
22725 GET_THREAD_INFO(%ebp)
22726 movl $-EFAULT,PT_EAX(%esp)
22727 jmp resume_userspace
22728-END(syscall_fault)
22729+ENDPROC(syscall_fault)
22730
22731 syscall_badsys:
22732 movl $-ENOSYS,%eax
22733 jmp syscall_after_call
22734-END(syscall_badsys)
22735+ENDPROC(syscall_badsys)
22736
22737 sysenter_badsys:
22738 movl $-ENOSYS,%eax
22739 jmp sysenter_after_call
22740-END(sysenter_badsys)
22741+ENDPROC(sysenter_badsys)
22742 CFI_ENDPROC
22743
22744 .macro FIXUP_ESPFIX_STACK
22745@@ -694,8 +927,15 @@ END(sysenter_badsys)
22746 */
22747 #ifdef CONFIG_X86_ESPFIX32
22748 /* fixup the stack */
22749- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22750- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22751+#ifdef CONFIG_SMP
22752+ movl PER_CPU_VAR(cpu_number), %ebx
22753+ shll $PAGE_SHIFT_asm, %ebx
22754+ addl $cpu_gdt_table, %ebx
22755+#else
22756+ movl $cpu_gdt_table, %ebx
22757+#endif
22758+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22759+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22760 shl $16, %eax
22761 addl %esp, %eax /* the adjusted stack pointer */
22762 pushl_cfi $__KERNEL_DS
22763@@ -751,7 +991,7 @@ vector=vector+1
22764 .endr
22765 2: jmp common_interrupt
22766 .endr
22767-END(irq_entries_start)
22768+ENDPROC(irq_entries_start)
22769
22770 .previous
22771 END(interrupt)
22772@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22773 pushl_cfi $do_coprocessor_error
22774 jmp error_code
22775 CFI_ENDPROC
22776-END(coprocessor_error)
22777+ENDPROC(coprocessor_error)
22778
22779 ENTRY(simd_coprocessor_error)
22780 RING0_INT_FRAME
22781@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22782 .section .altinstructions,"a"
22783 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22784 .previous
22785-.section .altinstr_replacement,"ax"
22786+.section .altinstr_replacement,"a"
22787 663: pushl $do_simd_coprocessor_error
22788 664:
22789 .previous
22790@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22791 #endif
22792 jmp error_code
22793 CFI_ENDPROC
22794-END(simd_coprocessor_error)
22795+ENDPROC(simd_coprocessor_error)
22796
22797 ENTRY(device_not_available)
22798 RING0_INT_FRAME
22799@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22800 pushl_cfi $do_device_not_available
22801 jmp error_code
22802 CFI_ENDPROC
22803-END(device_not_available)
22804+ENDPROC(device_not_available)
22805
22806 #ifdef CONFIG_PARAVIRT
22807 ENTRY(native_iret)
22808 iret
22809 _ASM_EXTABLE(native_iret, iret_exc)
22810-END(native_iret)
22811+ENDPROC(native_iret)
22812
22813 ENTRY(native_irq_enable_sysexit)
22814 sti
22815 sysexit
22816-END(native_irq_enable_sysexit)
22817+ENDPROC(native_irq_enable_sysexit)
22818 #endif
22819
22820 ENTRY(overflow)
22821@@ -860,7 +1100,7 @@ ENTRY(overflow)
22822 pushl_cfi $do_overflow
22823 jmp error_code
22824 CFI_ENDPROC
22825-END(overflow)
22826+ENDPROC(overflow)
22827
22828 ENTRY(bounds)
22829 RING0_INT_FRAME
22830@@ -869,7 +1109,7 @@ ENTRY(bounds)
22831 pushl_cfi $do_bounds
22832 jmp error_code
22833 CFI_ENDPROC
22834-END(bounds)
22835+ENDPROC(bounds)
22836
22837 ENTRY(invalid_op)
22838 RING0_INT_FRAME
22839@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22840 pushl_cfi $do_invalid_op
22841 jmp error_code
22842 CFI_ENDPROC
22843-END(invalid_op)
22844+ENDPROC(invalid_op)
22845
22846 ENTRY(coprocessor_segment_overrun)
22847 RING0_INT_FRAME
22848@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22849 pushl_cfi $do_coprocessor_segment_overrun
22850 jmp error_code
22851 CFI_ENDPROC
22852-END(coprocessor_segment_overrun)
22853+ENDPROC(coprocessor_segment_overrun)
22854
22855 ENTRY(invalid_TSS)
22856 RING0_EC_FRAME
22857@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22858 pushl_cfi $do_invalid_TSS
22859 jmp error_code
22860 CFI_ENDPROC
22861-END(invalid_TSS)
22862+ENDPROC(invalid_TSS)
22863
22864 ENTRY(segment_not_present)
22865 RING0_EC_FRAME
22866@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22867 pushl_cfi $do_segment_not_present
22868 jmp error_code
22869 CFI_ENDPROC
22870-END(segment_not_present)
22871+ENDPROC(segment_not_present)
22872
22873 ENTRY(stack_segment)
22874 RING0_EC_FRAME
22875@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
22876 pushl_cfi $do_stack_segment
22877 jmp error_code
22878 CFI_ENDPROC
22879-END(stack_segment)
22880+ENDPROC(stack_segment)
22881
22882 ENTRY(alignment_check)
22883 RING0_EC_FRAME
22884@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
22885 pushl_cfi $do_alignment_check
22886 jmp error_code
22887 CFI_ENDPROC
22888-END(alignment_check)
22889+ENDPROC(alignment_check)
22890
22891 ENTRY(divide_error)
22892 RING0_INT_FRAME
22893@@ -928,7 +1168,7 @@ ENTRY(divide_error)
22894 pushl_cfi $do_divide_error
22895 jmp error_code
22896 CFI_ENDPROC
22897-END(divide_error)
22898+ENDPROC(divide_error)
22899
22900 #ifdef CONFIG_X86_MCE
22901 ENTRY(machine_check)
22902@@ -938,7 +1178,7 @@ ENTRY(machine_check)
22903 pushl_cfi machine_check_vector
22904 jmp error_code
22905 CFI_ENDPROC
22906-END(machine_check)
22907+ENDPROC(machine_check)
22908 #endif
22909
22910 ENTRY(spurious_interrupt_bug)
22911@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
22912 pushl_cfi $do_spurious_interrupt_bug
22913 jmp error_code
22914 CFI_ENDPROC
22915-END(spurious_interrupt_bug)
22916+ENDPROC(spurious_interrupt_bug)
22917
22918 #ifdef CONFIG_XEN
22919 /* Xen doesn't set %esp to be precisely what the normal sysenter
22920@@ -1057,7 +1297,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22921
22922 ENTRY(mcount)
22923 ret
22924-END(mcount)
22925+ENDPROC(mcount)
22926
22927 ENTRY(ftrace_caller)
22928 pushl %eax
22929@@ -1087,7 +1327,7 @@ ftrace_graph_call:
22930 .globl ftrace_stub
22931 ftrace_stub:
22932 ret
22933-END(ftrace_caller)
22934+ENDPROC(ftrace_caller)
22935
22936 ENTRY(ftrace_regs_caller)
22937 pushf /* push flags before compare (in cs location) */
22938@@ -1185,7 +1425,7 @@ trace:
22939 popl %ecx
22940 popl %eax
22941 jmp ftrace_stub
22942-END(mcount)
22943+ENDPROC(mcount)
22944 #endif /* CONFIG_DYNAMIC_FTRACE */
22945 #endif /* CONFIG_FUNCTION_TRACER */
22946
22947@@ -1203,7 +1443,7 @@ ENTRY(ftrace_graph_caller)
22948 popl %ecx
22949 popl %eax
22950 ret
22951-END(ftrace_graph_caller)
22952+ENDPROC(ftrace_graph_caller)
22953
22954 .globl return_to_handler
22955 return_to_handler:
22956@@ -1264,15 +1504,18 @@ error_code:
22957 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22958 REG_TO_PTGS %ecx
22959 SET_KERNEL_GS %ecx
22960- movl $(__USER_DS), %ecx
22961+ movl $(__KERNEL_DS), %ecx
22962 movl %ecx, %ds
22963 movl %ecx, %es
22964+
22965+ pax_enter_kernel
22966+
22967 TRACE_IRQS_OFF
22968 movl %esp,%eax # pt_regs pointer
22969 call *%edi
22970 jmp ret_from_exception
22971 CFI_ENDPROC
22972-END(page_fault)
22973+ENDPROC(page_fault)
22974
22975 /*
22976 * Debug traps and NMI can happen at the one SYSENTER instruction
22977@@ -1315,7 +1558,7 @@ debug_stack_correct:
22978 call do_debug
22979 jmp ret_from_exception
22980 CFI_ENDPROC
22981-END(debug)
22982+ENDPROC(debug)
22983
22984 /*
22985 * NMI is doubly nasty. It can happen _while_ we're handling
22986@@ -1355,6 +1598,9 @@ nmi_stack_correct:
22987 xorl %edx,%edx # zero error code
22988 movl %esp,%eax # pt_regs pointer
22989 call do_nmi
22990+
22991+ pax_exit_kernel
22992+
22993 jmp restore_all_notrace
22994 CFI_ENDPROC
22995
22996@@ -1392,13 +1638,16 @@ nmi_espfix_stack:
22997 FIXUP_ESPFIX_STACK # %eax == %esp
22998 xorl %edx,%edx # zero error code
22999 call do_nmi
23000+
23001+ pax_exit_kernel
23002+
23003 RESTORE_REGS
23004 lss 12+4(%esp), %esp # back to espfix stack
23005 CFI_ADJUST_CFA_OFFSET -24
23006 jmp irq_return
23007 #endif
23008 CFI_ENDPROC
23009-END(nmi)
23010+ENDPROC(nmi)
23011
23012 ENTRY(int3)
23013 RING0_INT_FRAME
23014@@ -1411,14 +1660,14 @@ ENTRY(int3)
23015 call do_int3
23016 jmp ret_from_exception
23017 CFI_ENDPROC
23018-END(int3)
23019+ENDPROC(int3)
23020
23021 ENTRY(general_protection)
23022 RING0_EC_FRAME
23023 pushl_cfi $do_general_protection
23024 jmp error_code
23025 CFI_ENDPROC
23026-END(general_protection)
23027+ENDPROC(general_protection)
23028
23029 #ifdef CONFIG_KVM_GUEST
23030 ENTRY(async_page_fault)
23031@@ -1427,6 +1676,6 @@ ENTRY(async_page_fault)
23032 pushl_cfi $do_async_page_fault
23033 jmp error_code
23034 CFI_ENDPROC
23035-END(async_page_fault)
23036+ENDPROC(async_page_fault)
23037 #endif
23038
23039diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23040index f0095a7..ec77893 100644
23041--- a/arch/x86/kernel/entry_64.S
23042+++ b/arch/x86/kernel/entry_64.S
23043@@ -59,6 +59,8 @@
23044 #include <asm/smap.h>
23045 #include <asm/pgtable_types.h>
23046 #include <linux/err.h>
23047+#include <asm/pgtable.h>
23048+#include <asm/alternative-asm.h>
23049
23050 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23051 #include <linux/elf-em.h>
23052@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23053 ENDPROC(native_usergs_sysret64)
23054 #endif /* CONFIG_PARAVIRT */
23055
23056+ .macro ljmpq sel, off
23057+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23058+ .byte 0x48; ljmp *1234f(%rip)
23059+ .pushsection .rodata
23060+ .align 16
23061+ 1234: .quad \off; .word \sel
23062+ .popsection
23063+#else
23064+ pushq $\sel
23065+ pushq $\off
23066+ lretq
23067+#endif
23068+ .endm
23069+
23070+ .macro pax_enter_kernel
23071+ pax_set_fptr_mask
23072+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23073+ call pax_enter_kernel
23074+#endif
23075+ .endm
23076+
23077+ .macro pax_exit_kernel
23078+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23079+ call pax_exit_kernel
23080+#endif
23081+
23082+ .endm
23083+
23084+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23085+ENTRY(pax_enter_kernel)
23086+ pushq %rdi
23087+
23088+#ifdef CONFIG_PARAVIRT
23089+ PV_SAVE_REGS(CLBR_RDI)
23090+#endif
23091+
23092+#ifdef CONFIG_PAX_KERNEXEC
23093+ GET_CR0_INTO_RDI
23094+ bts $16,%rdi
23095+ jnc 3f
23096+ mov %cs,%edi
23097+ cmp $__KERNEL_CS,%edi
23098+ jnz 2f
23099+1:
23100+#endif
23101+
23102+#ifdef CONFIG_PAX_MEMORY_UDEREF
23103+ 661: jmp 111f
23104+ .pushsection .altinstr_replacement, "a"
23105+ 662: ASM_NOP2
23106+ .popsection
23107+ .pushsection .altinstructions, "a"
23108+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23109+ .popsection
23110+ GET_CR3_INTO_RDI
23111+ cmp $0,%dil
23112+ jnz 112f
23113+ mov $__KERNEL_DS,%edi
23114+ mov %edi,%ss
23115+ jmp 111f
23116+112: cmp $1,%dil
23117+ jz 113f
23118+ ud2
23119+113: sub $4097,%rdi
23120+ bts $63,%rdi
23121+ SET_RDI_INTO_CR3
23122+ mov $__UDEREF_KERNEL_DS,%edi
23123+ mov %edi,%ss
23124+111:
23125+#endif
23126+
23127+#ifdef CONFIG_PARAVIRT
23128+ PV_RESTORE_REGS(CLBR_RDI)
23129+#endif
23130+
23131+ popq %rdi
23132+ pax_force_retaddr
23133+ retq
23134+
23135+#ifdef CONFIG_PAX_KERNEXEC
23136+2: ljmpq __KERNEL_CS,1b
23137+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23138+4: SET_RDI_INTO_CR0
23139+ jmp 1b
23140+#endif
23141+ENDPROC(pax_enter_kernel)
23142+
23143+ENTRY(pax_exit_kernel)
23144+ pushq %rdi
23145+
23146+#ifdef CONFIG_PARAVIRT
23147+ PV_SAVE_REGS(CLBR_RDI)
23148+#endif
23149+
23150+#ifdef CONFIG_PAX_KERNEXEC
23151+ mov %cs,%rdi
23152+ cmp $__KERNEXEC_KERNEL_CS,%edi
23153+ jz 2f
23154+ GET_CR0_INTO_RDI
23155+ bts $16,%rdi
23156+ jnc 4f
23157+1:
23158+#endif
23159+
23160+#ifdef CONFIG_PAX_MEMORY_UDEREF
23161+ 661: jmp 111f
23162+ .pushsection .altinstr_replacement, "a"
23163+ 662: ASM_NOP2
23164+ .popsection
23165+ .pushsection .altinstructions, "a"
23166+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23167+ .popsection
23168+ mov %ss,%edi
23169+ cmp $__UDEREF_KERNEL_DS,%edi
23170+ jnz 111f
23171+ GET_CR3_INTO_RDI
23172+ cmp $0,%dil
23173+ jz 112f
23174+ ud2
23175+112: add $4097,%rdi
23176+ bts $63,%rdi
23177+ SET_RDI_INTO_CR3
23178+ mov $__KERNEL_DS,%edi
23179+ mov %edi,%ss
23180+111:
23181+#endif
23182+
23183+#ifdef CONFIG_PARAVIRT
23184+ PV_RESTORE_REGS(CLBR_RDI);
23185+#endif
23186+
23187+ popq %rdi
23188+ pax_force_retaddr
23189+ retq
23190+
23191+#ifdef CONFIG_PAX_KERNEXEC
23192+2: GET_CR0_INTO_RDI
23193+ btr $16,%rdi
23194+ jnc 4f
23195+ ljmpq __KERNEL_CS,3f
23196+3: SET_RDI_INTO_CR0
23197+ jmp 1b
23198+4: ud2
23199+ jmp 4b
23200+#endif
23201+ENDPROC(pax_exit_kernel)
23202+#endif
23203+
23204+ .macro pax_enter_kernel_user
23205+ pax_set_fptr_mask
23206+#ifdef CONFIG_PAX_MEMORY_UDEREF
23207+ call pax_enter_kernel_user
23208+#endif
23209+ .endm
23210+
23211+ .macro pax_exit_kernel_user
23212+#ifdef CONFIG_PAX_MEMORY_UDEREF
23213+ call pax_exit_kernel_user
23214+#endif
23215+#ifdef CONFIG_PAX_RANDKSTACK
23216+ pushq %rax
23217+ pushq %r11
23218+ call pax_randomize_kstack
23219+ popq %r11
23220+ popq %rax
23221+#endif
23222+ .endm
23223+
23224+#ifdef CONFIG_PAX_MEMORY_UDEREF
23225+ENTRY(pax_enter_kernel_user)
23226+ pushq %rdi
23227+ pushq %rbx
23228+
23229+#ifdef CONFIG_PARAVIRT
23230+ PV_SAVE_REGS(CLBR_RDI)
23231+#endif
23232+
23233+ 661: jmp 111f
23234+ .pushsection .altinstr_replacement, "a"
23235+ 662: ASM_NOP2
23236+ .popsection
23237+ .pushsection .altinstructions, "a"
23238+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23239+ .popsection
23240+ GET_CR3_INTO_RDI
23241+ cmp $1,%dil
23242+ jnz 4f
23243+ sub $4097,%rdi
23244+ bts $63,%rdi
23245+ SET_RDI_INTO_CR3
23246+ jmp 3f
23247+111:
23248+
23249+ GET_CR3_INTO_RDI
23250+ mov %rdi,%rbx
23251+ add $__START_KERNEL_map,%rbx
23252+ sub phys_base(%rip),%rbx
23253+
23254+#ifdef CONFIG_PARAVIRT
23255+ cmpl $0, pv_info+PARAVIRT_enabled
23256+ jz 1f
23257+ pushq %rdi
23258+ i = 0
23259+ .rept USER_PGD_PTRS
23260+ mov i*8(%rbx),%rsi
23261+ mov $0,%sil
23262+ lea i*8(%rbx),%rdi
23263+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23264+ i = i + 1
23265+ .endr
23266+ popq %rdi
23267+ jmp 2f
23268+1:
23269+#endif
23270+
23271+ i = 0
23272+ .rept USER_PGD_PTRS
23273+ movb $0,i*8(%rbx)
23274+ i = i + 1
23275+ .endr
23276+
23277+2: SET_RDI_INTO_CR3
23278+
23279+#ifdef CONFIG_PAX_KERNEXEC
23280+ GET_CR0_INTO_RDI
23281+ bts $16,%rdi
23282+ SET_RDI_INTO_CR0
23283+#endif
23284+
23285+3:
23286+
23287+#ifdef CONFIG_PARAVIRT
23288+ PV_RESTORE_REGS(CLBR_RDI)
23289+#endif
23290+
23291+ popq %rbx
23292+ popq %rdi
23293+ pax_force_retaddr
23294+ retq
23295+4: ud2
23296+ENDPROC(pax_enter_kernel_user)
23297+
23298+ENTRY(pax_exit_kernel_user)
23299+ pushq %rdi
23300+ pushq %rbx
23301+
23302+#ifdef CONFIG_PARAVIRT
23303+ PV_SAVE_REGS(CLBR_RDI)
23304+#endif
23305+
23306+ GET_CR3_INTO_RDI
23307+ 661: jmp 1f
23308+ .pushsection .altinstr_replacement, "a"
23309+ 662: ASM_NOP2
23310+ .popsection
23311+ .pushsection .altinstructions, "a"
23312+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23313+ .popsection
23314+ cmp $0,%dil
23315+ jnz 3f
23316+ add $4097,%rdi
23317+ bts $63,%rdi
23318+ SET_RDI_INTO_CR3
23319+ jmp 2f
23320+1:
23321+
23322+ mov %rdi,%rbx
23323+
23324+#ifdef CONFIG_PAX_KERNEXEC
23325+ GET_CR0_INTO_RDI
23326+ btr $16,%rdi
23327+ jnc 3f
23328+ SET_RDI_INTO_CR0
23329+#endif
23330+
23331+ add $__START_KERNEL_map,%rbx
23332+ sub phys_base(%rip),%rbx
23333+
23334+#ifdef CONFIG_PARAVIRT
23335+ cmpl $0, pv_info+PARAVIRT_enabled
23336+ jz 1f
23337+ i = 0
23338+ .rept USER_PGD_PTRS
23339+ mov i*8(%rbx),%rsi
23340+ mov $0x67,%sil
23341+ lea i*8(%rbx),%rdi
23342+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23343+ i = i + 1
23344+ .endr
23345+ jmp 2f
23346+1:
23347+#endif
23348+
23349+ i = 0
23350+ .rept USER_PGD_PTRS
23351+ movb $0x67,i*8(%rbx)
23352+ i = i + 1
23353+ .endr
23354+2:
23355+
23356+#ifdef CONFIG_PARAVIRT
23357+ PV_RESTORE_REGS(CLBR_RDI)
23358+#endif
23359+
23360+ popq %rbx
23361+ popq %rdi
23362+ pax_force_retaddr
23363+ retq
23364+3: ud2
23365+ENDPROC(pax_exit_kernel_user)
23366+#endif
23367+
23368+ .macro pax_enter_kernel_nmi
23369+ pax_set_fptr_mask
23370+
23371+#ifdef CONFIG_PAX_KERNEXEC
23372+ GET_CR0_INTO_RDI
23373+ bts $16,%rdi
23374+ jc 110f
23375+ SET_RDI_INTO_CR0
23376+ or $2,%ebx
23377+110:
23378+#endif
23379+
23380+#ifdef CONFIG_PAX_MEMORY_UDEREF
23381+ 661: jmp 111f
23382+ .pushsection .altinstr_replacement, "a"
23383+ 662: ASM_NOP2
23384+ .popsection
23385+ .pushsection .altinstructions, "a"
23386+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23387+ .popsection
23388+ GET_CR3_INTO_RDI
23389+ cmp $0,%dil
23390+ jz 111f
23391+ sub $4097,%rdi
23392+ or $4,%ebx
23393+ bts $63,%rdi
23394+ SET_RDI_INTO_CR3
23395+ mov $__UDEREF_KERNEL_DS,%edi
23396+ mov %edi,%ss
23397+111:
23398+#endif
23399+ .endm
23400+
23401+ .macro pax_exit_kernel_nmi
23402+#ifdef CONFIG_PAX_KERNEXEC
23403+ btr $1,%ebx
23404+ jnc 110f
23405+ GET_CR0_INTO_RDI
23406+ btr $16,%rdi
23407+ SET_RDI_INTO_CR0
23408+110:
23409+#endif
23410+
23411+#ifdef CONFIG_PAX_MEMORY_UDEREF
23412+ btr $2,%ebx
23413+ jnc 111f
23414+ GET_CR3_INTO_RDI
23415+ add $4097,%rdi
23416+ bts $63,%rdi
23417+ SET_RDI_INTO_CR3
23418+ mov $__KERNEL_DS,%edi
23419+ mov %edi,%ss
23420+111:
23421+#endif
23422+ .endm
23423+
23424+ .macro pax_erase_kstack
23425+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23426+ call pax_erase_kstack
23427+#endif
23428+ .endm
23429+
23430+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23431+ENTRY(pax_erase_kstack)
23432+ pushq %rdi
23433+ pushq %rcx
23434+ pushq %rax
23435+ pushq %r11
23436+
23437+ GET_THREAD_INFO(%r11)
23438+ mov TI_lowest_stack(%r11), %rdi
23439+ mov $-0xBEEF, %rax
23440+ std
23441+
23442+1: mov %edi, %ecx
23443+ and $THREAD_SIZE_asm - 1, %ecx
23444+ shr $3, %ecx
23445+ repne scasq
23446+ jecxz 2f
23447+
23448+ cmp $2*8, %ecx
23449+ jc 2f
23450+
23451+ mov $2*8, %ecx
23452+ repe scasq
23453+ jecxz 2f
23454+ jne 1b
23455+
23456+2: cld
23457+ or $2*8, %rdi
23458+ mov %esp, %ecx
23459+ sub %edi, %ecx
23460+
23461+ cmp $THREAD_SIZE_asm, %rcx
23462+ jb 3f
23463+ ud2
23464+3:
23465+
23466+ shr $3, %ecx
23467+ rep stosq
23468+
23469+ mov TI_task_thread_sp0(%r11), %rdi
23470+ sub $256, %rdi
23471+ mov %rdi, TI_lowest_stack(%r11)
23472+
23473+ popq %r11
23474+ popq %rax
23475+ popq %rcx
23476+ popq %rdi
23477+ pax_force_retaddr
23478+ ret
23479+ENDPROC(pax_erase_kstack)
23480+#endif
23481
23482 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23483 #ifdef CONFIG_TRACE_IRQFLAGS
23484@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23485 .endm
23486
23487 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23488- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23489+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23490 jnc 1f
23491 TRACE_IRQS_ON_DEBUG
23492 1:
23493@@ -243,9 +670,52 @@ ENTRY(save_paranoid)
23494 js 1f /* negative -> in kernel */
23495 SWAPGS
23496 xorl %ebx,%ebx
23497-1: ret
23498+1:
23499+#ifdef CONFIG_PAX_MEMORY_UDEREF
23500+ testb $3, CS+8(%rsp)
23501+ jnz 1f
23502+ pax_enter_kernel
23503+ jmp 2f
23504+1: pax_enter_kernel_user
23505+2:
23506+#else
23507+ pax_enter_kernel
23508+#endif
23509+ pax_force_retaddr
23510+ ret
23511 CFI_ENDPROC
23512-END(save_paranoid)
23513+ENDPROC(save_paranoid)
23514+
23515+ENTRY(save_paranoid_nmi)
23516+ XCPT_FRAME 1 RDI+8
23517+ cld
23518+ movq_cfi rdi, RDI+8
23519+ movq_cfi rsi, RSI+8
23520+ movq_cfi rdx, RDX+8
23521+ movq_cfi rcx, RCX+8
23522+ movq_cfi rax, RAX+8
23523+ movq_cfi r8, R8+8
23524+ movq_cfi r9, R9+8
23525+ movq_cfi r10, R10+8
23526+ movq_cfi r11, R11+8
23527+ movq_cfi rbx, RBX+8
23528+ movq_cfi rbp, RBP+8
23529+ movq_cfi r12, R12+8
23530+ movq_cfi r13, R13+8
23531+ movq_cfi r14, R14+8
23532+ movq_cfi r15, R15+8
23533+ movl $1,%ebx
23534+ movl $MSR_GS_BASE,%ecx
23535+ rdmsr
23536+ testl %edx,%edx
23537+ js 1f /* negative -> in kernel */
23538+ SWAPGS
23539+ xorl %ebx,%ebx
23540+1: pax_enter_kernel_nmi
23541+ pax_force_retaddr
23542+ ret
23543+ CFI_ENDPROC
23544+ENDPROC(save_paranoid_nmi)
23545
23546 /*
23547 * A newly forked process directly context switches into this address.
23548@@ -266,7 +736,7 @@ ENTRY(ret_from_fork)
23549
23550 RESTORE_REST
23551
23552- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23553+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23554 jz 1f
23555
23556 /*
23557@@ -279,15 +749,13 @@ ENTRY(ret_from_fork)
23558 jmp int_ret_from_sys_call
23559
23560 1:
23561- subq $REST_SKIP, %rsp # leave space for volatiles
23562- CFI_ADJUST_CFA_OFFSET REST_SKIP
23563 movq %rbp, %rdi
23564 call *%rbx
23565 movl $0, RAX(%rsp)
23566 RESTORE_REST
23567 jmp int_ret_from_sys_call
23568 CFI_ENDPROC
23569-END(ret_from_fork)
23570+ENDPROC(ret_from_fork)
23571
23572 /*
23573 * System call entry. Up to 6 arguments in registers are supported.
23574@@ -324,7 +792,7 @@ END(ret_from_fork)
23575 ENTRY(system_call)
23576 CFI_STARTPROC simple
23577 CFI_SIGNAL_FRAME
23578- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23579+ CFI_DEF_CFA rsp,0
23580 CFI_REGISTER rip,rcx
23581 /*CFI_REGISTER rflags,r11*/
23582 SWAPGS_UNSAFE_STACK
23583@@ -337,16 +805,23 @@ GLOBAL(system_call_after_swapgs)
23584
23585 movq %rsp,PER_CPU_VAR(old_rsp)
23586 movq PER_CPU_VAR(kernel_stack),%rsp
23587+ SAVE_ARGS 8*6, 0, rax_enosys=1
23588+ pax_enter_kernel_user
23589+
23590+#ifdef CONFIG_PAX_RANDKSTACK
23591+ pax_erase_kstack
23592+#endif
23593+
23594 /*
23595 * No need to follow this irqs off/on section - it's straight
23596 * and short:
23597 */
23598 ENABLE_INTERRUPTS(CLBR_NONE)
23599- SAVE_ARGS 8, 0, rax_enosys=1
23600 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23601 movq %rcx,RIP-ARGOFFSET(%rsp)
23602 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23603- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23604+ GET_THREAD_INFO(%rcx)
23605+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23606 jnz tracesys
23607 system_call_fastpath:
23608 #if __SYSCALL_MASK == ~0
23609@@ -376,10 +851,13 @@ ret_from_sys_call:
23610 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
23611 * very bad.
23612 */
23613- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23614+ GET_THREAD_INFO(%rcx)
23615+ testl $_TIF_ALLWORK_MASK,TI_flags(%rcx)
23616 jnz int_ret_from_sys_call_fixup /* Go the the slow path */
23617
23618 CFI_REMEMBER_STATE
23619+ pax_exit_kernel_user
23620+ pax_erase_kstack
23621 /*
23622 * sysretq will re-enable interrupts:
23623 */
23624@@ -399,12 +877,15 @@ int_ret_from_sys_call_fixup:
23625
23626 /* Do syscall tracing */
23627 tracesys:
23628- leaq -REST_SKIP(%rsp), %rdi
23629+ movq %rsp, %rdi
23630 movq $AUDIT_ARCH_X86_64, %rsi
23631 call syscall_trace_enter_phase1
23632 test %rax, %rax
23633 jnz tracesys_phase2 /* if needed, run the slow path */
23634- LOAD_ARGS 0 /* else restore clobbered regs */
23635+
23636+ pax_erase_kstack
23637+
23638+ LOAD_ARGS /* else restore clobbered regs */
23639 jmp system_call_fastpath /* and return to the fast path */
23640
23641 tracesys_phase2:
23642@@ -415,12 +896,14 @@ tracesys_phase2:
23643 movq %rax,%rdx
23644 call syscall_trace_enter_phase2
23645
23646+ pax_erase_kstack
23647+
23648 /*
23649 * Reload arg registers from stack in case ptrace changed them.
23650 * We don't reload %rax because syscall_trace_entry_phase2() returned
23651 * the value it wants us to use in the table lookup.
23652 */
23653- LOAD_ARGS ARGOFFSET, 1
23654+ LOAD_ARGS 1
23655 RESTORE_REST
23656 #if __SYSCALL_MASK == ~0
23657 cmpq $__NR_syscall_max,%rax
23658@@ -451,7 +934,9 @@ GLOBAL(int_with_check)
23659 andl %edi,%edx
23660 jnz int_careful
23661 andl $~TS_COMPAT,TI_status(%rcx)
23662- jmp retint_swapgs
23663+ pax_exit_kernel_user
23664+ pax_erase_kstack
23665+ jmp retint_swapgs_pax
23666
23667 /* Either reschedule or signal or syscall exit tracking needed. */
23668 /* First do a reschedule test. */
23669@@ -497,7 +982,7 @@ int_restore_rest:
23670 TRACE_IRQS_OFF
23671 jmp int_with_check
23672 CFI_ENDPROC
23673-END(system_call)
23674+ENDPROC(system_call)
23675
23676 .macro FORK_LIKE func
23677 ENTRY(stub_\func)
23678@@ -510,9 +995,10 @@ ENTRY(stub_\func)
23679 DEFAULT_FRAME 0 8 /* offset 8: return address */
23680 call sys_\func
23681 RESTORE_TOP_OF_STACK %r11, 8
23682- ret $REST_SKIP /* pop extended registers */
23683+ pax_force_retaddr
23684+ ret
23685 CFI_ENDPROC
23686-END(stub_\func)
23687+ENDPROC(stub_\func)
23688 .endm
23689
23690 .macro FIXED_FRAME label,func
23691@@ -522,9 +1008,10 @@ ENTRY(\label)
23692 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23693 call \func
23694 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23695+ pax_force_retaddr
23696 ret
23697 CFI_ENDPROC
23698-END(\label)
23699+ENDPROC(\label)
23700 .endm
23701
23702 FORK_LIKE clone
23703@@ -543,7 +1030,7 @@ ENTRY(stub_execve)
23704 RESTORE_REST
23705 jmp int_ret_from_sys_call
23706 CFI_ENDPROC
23707-END(stub_execve)
23708+ENDPROC(stub_execve)
23709
23710 ENTRY(stub_execveat)
23711 CFI_STARTPROC
23712@@ -557,7 +1044,7 @@ ENTRY(stub_execveat)
23713 RESTORE_REST
23714 jmp int_ret_from_sys_call
23715 CFI_ENDPROC
23716-END(stub_execveat)
23717+ENDPROC(stub_execveat)
23718
23719 /*
23720 * sigreturn is special because it needs to restore all registers on return.
23721@@ -574,7 +1061,7 @@ ENTRY(stub_rt_sigreturn)
23722 RESTORE_REST
23723 jmp int_ret_from_sys_call
23724 CFI_ENDPROC
23725-END(stub_rt_sigreturn)
23726+ENDPROC(stub_rt_sigreturn)
23727
23728 #ifdef CONFIG_X86_X32_ABI
23729 ENTRY(stub_x32_rt_sigreturn)
23730@@ -588,7 +1075,7 @@ ENTRY(stub_x32_rt_sigreturn)
23731 RESTORE_REST
23732 jmp int_ret_from_sys_call
23733 CFI_ENDPROC
23734-END(stub_x32_rt_sigreturn)
23735+ENDPROC(stub_x32_rt_sigreturn)
23736
23737 ENTRY(stub_x32_execve)
23738 CFI_STARTPROC
23739@@ -602,7 +1089,7 @@ ENTRY(stub_x32_execve)
23740 RESTORE_REST
23741 jmp int_ret_from_sys_call
23742 CFI_ENDPROC
23743-END(stub_x32_execve)
23744+ENDPROC(stub_x32_execve)
23745
23746 ENTRY(stub_x32_execveat)
23747 CFI_STARTPROC
23748@@ -616,7 +1103,7 @@ ENTRY(stub_x32_execveat)
23749 RESTORE_REST
23750 jmp int_ret_from_sys_call
23751 CFI_ENDPROC
23752-END(stub_x32_execveat)
23753+ENDPROC(stub_x32_execveat)
23754
23755 #endif
23756
23757@@ -653,7 +1140,7 @@ vector=vector+1
23758 2: jmp common_interrupt
23759 .endr
23760 CFI_ENDPROC
23761-END(irq_entries_start)
23762+ENDPROC(irq_entries_start)
23763
23764 .previous
23765 END(interrupt)
23766@@ -670,28 +1157,29 @@ END(interrupt)
23767 /* 0(%rsp): ~(interrupt number) */
23768 .macro interrupt func
23769 /* reserve pt_regs for scratch regs and rbp */
23770- subq $ORIG_RAX-RBP, %rsp
23771- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23772+ subq $ORIG_RAX, %rsp
23773+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23774 cld
23775- /* start from rbp in pt_regs and jump over */
23776- movq_cfi rdi, (RDI-RBP)
23777- movq_cfi rsi, (RSI-RBP)
23778- movq_cfi rdx, (RDX-RBP)
23779- movq_cfi rcx, (RCX-RBP)
23780- movq_cfi rax, (RAX-RBP)
23781- movq_cfi r8, (R8-RBP)
23782- movq_cfi r9, (R9-RBP)
23783- movq_cfi r10, (R10-RBP)
23784- movq_cfi r11, (R11-RBP)
23785+ /* start from r15 in pt_regs and jump over */
23786+ movq_cfi rdi, RDI
23787+ movq_cfi rsi, RSI
23788+ movq_cfi rdx, RDX
23789+ movq_cfi rcx, RCX
23790+ movq_cfi rax, RAX
23791+ movq_cfi r8, R8
23792+ movq_cfi r9, R9
23793+ movq_cfi r10, R10
23794+ movq_cfi r11, R11
23795+ movq_cfi r12, R12
23796
23797 /* Save rbp so that we can unwind from get_irq_regs() */
23798- movq_cfi rbp, 0
23799+ movq_cfi rbp, RBP
23800
23801 /* Save previous stack value */
23802 movq %rsp, %rsi
23803
23804- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23805- testl $3, CS-RBP(%rsi)
23806+ movq %rsp,%rdi /* arg1 for handler */
23807+ testb $3, CS(%rsi)
23808 je 1f
23809 SWAPGS
23810 /*
23811@@ -711,6 +1199,18 @@ END(interrupt)
23812 0x06 /* DW_OP_deref */, \
23813 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23814 0x22 /* DW_OP_plus */
23815+
23816+#ifdef CONFIG_PAX_MEMORY_UDEREF
23817+ testb $3, CS(%rdi)
23818+ jnz 1f
23819+ pax_enter_kernel
23820+ jmp 2f
23821+1: pax_enter_kernel_user
23822+2:
23823+#else
23824+ pax_enter_kernel
23825+#endif
23826+
23827 /* We entered an interrupt context - irqs are off: */
23828 TRACE_IRQS_OFF
23829
23830@@ -735,14 +1235,14 @@ ret_from_intr:
23831
23832 /* Restore saved previous stack */
23833 popq %rsi
23834- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23835- leaq ARGOFFSET-RBP(%rsi), %rsp
23836+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23837+ movq %rsi, %rsp
23838 CFI_DEF_CFA_REGISTER rsp
23839- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23840+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23841
23842 exit_intr:
23843 GET_THREAD_INFO(%rcx)
23844- testl $3,CS-ARGOFFSET(%rsp)
23845+ testb $3,CS-ARGOFFSET(%rsp)
23846 je retint_kernel
23847
23848 /* Interrupt came from user space */
23849@@ -764,14 +1264,16 @@ retint_swapgs: /* return to user-space */
23850 * The iretq could re-enable interrupts:
23851 */
23852 DISABLE_INTERRUPTS(CLBR_ANY)
23853+ pax_exit_kernel_user
23854+retint_swapgs_pax:
23855 TRACE_IRQS_IRETQ
23856
23857 /*
23858 * Try to use SYSRET instead of IRET if we're returning to
23859 * a completely clean 64-bit userspace context.
23860 */
23861- movq (RCX-R11)(%rsp), %rcx
23862- cmpq %rcx,(RIP-R11)(%rsp) /* RCX == RIP */
23863+ movq (RCX-ARGOFFSET)(%rsp), %rcx
23864+ cmpq %rcx,(RIP-ARGOFFSET)(%rsp) /* RCX == RIP */
23865 jne opportunistic_sysret_failed
23866
23867 /*
23868@@ -792,7 +1294,7 @@ retint_swapgs: /* return to user-space */
23869 shr $__VIRTUAL_MASK_SHIFT, %rcx
23870 jnz opportunistic_sysret_failed
23871
23872- cmpq $__USER_CS,(CS-R11)(%rsp) /* CS must match SYSRET */
23873+ cmpq $__USER_CS,(CS-ARGOFFSET)(%rsp) /* CS must match SYSRET */
23874 jne opportunistic_sysret_failed
23875
23876 movq (R11-ARGOFFSET)(%rsp), %r11
23877@@ -838,6 +1340,27 @@ opportunistic_sysret_failed:
23878
23879 retint_restore_args: /* return to kernel space */
23880 DISABLE_INTERRUPTS(CLBR_ANY)
23881+ pax_exit_kernel
23882+
23883+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
23884+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
23885+ * namely calling EFI runtime services with a phys mapping. We're
23886+ * starting off with NOPs and patch in the real instrumentation
23887+ * (BTS/OR) before starting any userland process; even before starting
23888+ * up the APs.
23889+ */
23890+ .pushsection .altinstr_replacement, "a"
23891+ 601: pax_force_retaddr (RIP-ARGOFFSET)
23892+ 602:
23893+ .popsection
23894+ 603: .fill 602b-601b, 1, 0x90
23895+ .pushsection .altinstructions, "a"
23896+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
23897+ .popsection
23898+#else
23899+ pax_force_retaddr (RIP-ARGOFFSET)
23900+#endif
23901+
23902 /*
23903 * The iretq could re-enable interrupts:
23904 */
23905@@ -875,15 +1398,15 @@ native_irq_return_ldt:
23906 SWAPGS
23907 movq PER_CPU_VAR(espfix_waddr),%rdi
23908 movq %rax,(0*8)(%rdi) /* RAX */
23909- movq (2*8)(%rsp),%rax /* RIP */
23910+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
23911 movq %rax,(1*8)(%rdi)
23912- movq (3*8)(%rsp),%rax /* CS */
23913+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
23914 movq %rax,(2*8)(%rdi)
23915- movq (4*8)(%rsp),%rax /* RFLAGS */
23916+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
23917 movq %rax,(3*8)(%rdi)
23918- movq (6*8)(%rsp),%rax /* SS */
23919+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
23920 movq %rax,(5*8)(%rdi)
23921- movq (5*8)(%rsp),%rax /* RSP */
23922+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
23923 movq %rax,(4*8)(%rdi)
23924 andl $0xffff0000,%eax
23925 popq_cfi %rdi
23926@@ -937,7 +1460,7 @@ ENTRY(retint_kernel)
23927 jmp exit_intr
23928 #endif
23929 CFI_ENDPROC
23930-END(common_interrupt)
23931+ENDPROC(common_interrupt)
23932
23933 /*
23934 * APIC interrupts.
23935@@ -951,7 +1474,7 @@ ENTRY(\sym)
23936 interrupt \do_sym
23937 jmp ret_from_intr
23938 CFI_ENDPROC
23939-END(\sym)
23940+ENDPROC(\sym)
23941 .endm
23942
23943 #ifdef CONFIG_TRACING
23944@@ -1024,7 +1547,7 @@ apicinterrupt IRQ_WORK_VECTOR \
23945 /*
23946 * Exception entry points.
23947 */
23948-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23949+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23950
23951 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
23952 ENTRY(\sym)
23953@@ -1080,6 +1603,12 @@ ENTRY(\sym)
23954 .endif
23955
23956 .if \shift_ist != -1
23957+#ifdef CONFIG_SMP
23958+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
23959+ lea init_tss(%r13), %r13
23960+#else
23961+ lea init_tss(%rip), %r13
23962+#endif
23963 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
23964 .endif
23965
23966@@ -1126,7 +1655,7 @@ ENTRY(\sym)
23967 .endif
23968
23969 CFI_ENDPROC
23970-END(\sym)
23971+ENDPROC(\sym)
23972 .endm
23973
23974 #ifdef CONFIG_TRACING
23975@@ -1167,9 +1696,10 @@ gs_change:
23976 2: mfence /* workaround */
23977 SWAPGS
23978 popfq_cfi
23979+ pax_force_retaddr
23980 ret
23981 CFI_ENDPROC
23982-END(native_load_gs_index)
23983+ENDPROC(native_load_gs_index)
23984
23985 _ASM_EXTABLE(gs_change,bad_gs)
23986 .section .fixup,"ax"
23987@@ -1197,9 +1727,10 @@ ENTRY(do_softirq_own_stack)
23988 CFI_DEF_CFA_REGISTER rsp
23989 CFI_ADJUST_CFA_OFFSET -8
23990 decl PER_CPU_VAR(irq_count)
23991+ pax_force_retaddr
23992 ret
23993 CFI_ENDPROC
23994-END(do_softirq_own_stack)
23995+ENDPROC(do_softirq_own_stack)
23996
23997 #ifdef CONFIG_XEN
23998 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
23999@@ -1240,7 +1771,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24000 #endif
24001 jmp error_exit
24002 CFI_ENDPROC
24003-END(xen_do_hypervisor_callback)
24004+ENDPROC(xen_do_hypervisor_callback)
24005
24006 /*
24007 * Hypervisor uses this for application faults while it executes.
24008@@ -1299,7 +1830,7 @@ ENTRY(xen_failsafe_callback)
24009 SAVE_ALL
24010 jmp error_exit
24011 CFI_ENDPROC
24012-END(xen_failsafe_callback)
24013+ENDPROC(xen_failsafe_callback)
24014
24015 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24016 xen_hvm_callback_vector xen_evtchn_do_upcall
24017@@ -1344,18 +1875,25 @@ ENTRY(paranoid_exit)
24018 DEFAULT_FRAME
24019 DISABLE_INTERRUPTS(CLBR_NONE)
24020 TRACE_IRQS_OFF_DEBUG
24021- testl %ebx,%ebx /* swapgs needed? */
24022+ testl $1,%ebx /* swapgs needed? */
24023 jnz paranoid_restore
24024+#ifdef CONFIG_PAX_MEMORY_UDEREF
24025+ pax_exit_kernel_user
24026+#else
24027+ pax_exit_kernel
24028+#endif
24029 TRACE_IRQS_IRETQ 0
24030 SWAPGS_UNSAFE_STACK
24031 RESTORE_ALL 8
24032 INTERRUPT_RETURN
24033 paranoid_restore:
24034+ pax_exit_kernel
24035 TRACE_IRQS_IRETQ_DEBUG 0
24036 RESTORE_ALL 8
24037+ pax_force_retaddr_bts
24038 INTERRUPT_RETURN
24039 CFI_ENDPROC
24040-END(paranoid_exit)
24041+ENDPROC(paranoid_exit)
24042
24043 /*
24044 * Exception entry point. This expects an error code/orig_rax on the stack.
24045@@ -1382,12 +1920,23 @@ ENTRY(error_entry)
24046 movq %r14, R14+8(%rsp)
24047 movq %r15, R15+8(%rsp)
24048 xorl %ebx,%ebx
24049- testl $3,CS+8(%rsp)
24050+ testb $3,CS+8(%rsp)
24051 je error_kernelspace
24052 error_swapgs:
24053 SWAPGS
24054 error_sti:
24055+#ifdef CONFIG_PAX_MEMORY_UDEREF
24056+ testb $3, CS+8(%rsp)
24057+ jnz 1f
24058+ pax_enter_kernel
24059+ jmp 2f
24060+1: pax_enter_kernel_user
24061+2:
24062+#else
24063+ pax_enter_kernel
24064+#endif
24065 TRACE_IRQS_OFF
24066+ pax_force_retaddr
24067 ret
24068
24069 /*
24070@@ -1422,7 +1971,7 @@ error_bad_iret:
24071 decl %ebx /* Return to usergs */
24072 jmp error_sti
24073 CFI_ENDPROC
24074-END(error_entry)
24075+ENDPROC(error_entry)
24076
24077
24078 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24079@@ -1433,7 +1982,7 @@ ENTRY(error_exit)
24080 DISABLE_INTERRUPTS(CLBR_NONE)
24081 TRACE_IRQS_OFF
24082 GET_THREAD_INFO(%rcx)
24083- testl %eax,%eax
24084+ testl $1,%eax
24085 jne retint_kernel
24086 LOCKDEP_SYS_EXIT_IRQ
24087 movl TI_flags(%rcx),%edx
24088@@ -1442,7 +1991,7 @@ ENTRY(error_exit)
24089 jnz retint_careful
24090 jmp retint_swapgs
24091 CFI_ENDPROC
24092-END(error_exit)
24093+ENDPROC(error_exit)
24094
24095 /*
24096 * Test if a given stack is an NMI stack or not.
24097@@ -1500,9 +2049,11 @@ ENTRY(nmi)
24098 * If %cs was not the kernel segment, then the NMI triggered in user
24099 * space, which means it is definitely not nested.
24100 */
24101+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24102+ je 1f
24103 cmpl $__KERNEL_CS, 16(%rsp)
24104 jne first_nmi
24105-
24106+1:
24107 /*
24108 * Check the special variable on the stack to see if NMIs are
24109 * executing.
24110@@ -1536,8 +2087,7 @@ nested_nmi:
24111
24112 1:
24113 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24114- leaq -1*8(%rsp), %rdx
24115- movq %rdx, %rsp
24116+ subq $8, %rsp
24117 CFI_ADJUST_CFA_OFFSET 1*8
24118 leaq -10*8(%rsp), %rdx
24119 pushq_cfi $__KERNEL_DS
24120@@ -1555,6 +2105,7 @@ nested_nmi_out:
24121 CFI_RESTORE rdx
24122
24123 /* No need to check faults here */
24124+# pax_force_retaddr_bts
24125 INTERRUPT_RETURN
24126
24127 CFI_RESTORE_STATE
24128@@ -1651,13 +2202,13 @@ end_repeat_nmi:
24129 subq $ORIG_RAX-R15, %rsp
24130 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24131 /*
24132- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24133+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24134 * as we should not be calling schedule in NMI context.
24135 * Even with normal interrupts enabled. An NMI should not be
24136 * setting NEED_RESCHED or anything that normal interrupts and
24137 * exceptions might do.
24138 */
24139- call save_paranoid
24140+ call save_paranoid_nmi
24141 DEFAULT_FRAME 0
24142
24143 /*
24144@@ -1667,9 +2218,9 @@ end_repeat_nmi:
24145 * NMI itself takes a page fault, the page fault that was preempted
24146 * will read the information from the NMI page fault and not the
24147 * origin fault. Save it off and restore it if it changes.
24148- * Use the r12 callee-saved register.
24149+ * Use the r13 callee-saved register.
24150 */
24151- movq %cr2, %r12
24152+ movq %cr2, %r13
24153
24154 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24155 movq %rsp,%rdi
24156@@ -1678,29 +2229,34 @@ end_repeat_nmi:
24157
24158 /* Did the NMI take a page fault? Restore cr2 if it did */
24159 movq %cr2, %rcx
24160- cmpq %rcx, %r12
24161+ cmpq %rcx, %r13
24162 je 1f
24163- movq %r12, %cr2
24164+ movq %r13, %cr2
24165 1:
24166
24167- testl %ebx,%ebx /* swapgs needed? */
24168+ testl $1,%ebx /* swapgs needed? */
24169 jnz nmi_restore
24170 nmi_swapgs:
24171 SWAPGS_UNSAFE_STACK
24172 nmi_restore:
24173+ pax_exit_kernel_nmi
24174 /* Pop the extra iret frame at once */
24175 RESTORE_ALL 6*8
24176+ testb $3, 8(%rsp)
24177+ jnz 1f
24178+ pax_force_retaddr_bts
24179+1:
24180
24181 /* Clear the NMI executing stack variable */
24182 movq $0, 5*8(%rsp)
24183 jmp irq_return
24184 CFI_ENDPROC
24185-END(nmi)
24186+ENDPROC(nmi)
24187
24188 ENTRY(ignore_sysret)
24189 CFI_STARTPROC
24190 mov $-ENOSYS,%eax
24191 sysret
24192 CFI_ENDPROC
24193-END(ignore_sysret)
24194+ENDPROC(ignore_sysret)
24195
24196diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24197index f5d0730..5bce89c 100644
24198--- a/arch/x86/kernel/espfix_64.c
24199+++ b/arch/x86/kernel/espfix_64.c
24200@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24201 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24202 static void *espfix_pages[ESPFIX_MAX_PAGES];
24203
24204-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24205- __aligned(PAGE_SIZE);
24206+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24207
24208 static unsigned int page_random, slot_random;
24209
24210@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24211 void __init init_espfix_bsp(void)
24212 {
24213 pgd_t *pgd_p;
24214+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24215
24216 /* Install the espfix pud into the kernel page directory */
24217- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24218+ pgd_p = &init_level4_pgt[index];
24219 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24220
24221+#ifdef CONFIG_PAX_PER_CPU_PGD
24222+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24223+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24224+#endif
24225+
24226 /* Randomize the locations */
24227 init_espfix_random();
24228
24229@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24230 set_pte(&pte_p[n*PTE_STRIDE], pte);
24231
24232 /* Job is done for this CPU and any CPU which shares this page */
24233- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24234+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24235
24236 unlock_done:
24237 mutex_unlock(&espfix_init_mutex);
24238diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24239index 8b7b0a5..2395f29 100644
24240--- a/arch/x86/kernel/ftrace.c
24241+++ b/arch/x86/kernel/ftrace.c
24242@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24243 * kernel identity mapping to modify code.
24244 */
24245 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24246- ip = (unsigned long)__va(__pa_symbol(ip));
24247+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24248
24249 return ip;
24250 }
24251@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24252 {
24253 unsigned char replaced[MCOUNT_INSN_SIZE];
24254
24255+ ip = ktla_ktva(ip);
24256+
24257 /*
24258 * Note: Due to modules and __init, code can
24259 * disappear and change, we need to protect against faulting
24260@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24261 unsigned char old[MCOUNT_INSN_SIZE];
24262 int ret;
24263
24264- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24265+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24266
24267 ftrace_update_func = ip;
24268 /* Make sure the breakpoints see the ftrace_update_func update */
24269@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24270 unsigned char replaced[MCOUNT_INSN_SIZE];
24271 unsigned char brk = BREAKPOINT_INSTRUCTION;
24272
24273- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24274+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24275 return -EFAULT;
24276
24277 /* Make sure it is what we expect it to be */
24278diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24279index c4f8d46..2d63ae2 100644
24280--- a/arch/x86/kernel/head64.c
24281+++ b/arch/x86/kernel/head64.c
24282@@ -68,12 +68,12 @@ again:
24283 pgd = *pgd_p;
24284
24285 /*
24286- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24287- * critical -- __PAGE_OFFSET would point us back into the dynamic
24288+ * The use of __early_va rather than __va here is critical:
24289+ * __va would point us back into the dynamic
24290 * range and we might end up looping forever...
24291 */
24292 if (pgd)
24293- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24294+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24295 else {
24296 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24297 reset_early_page_tables();
24298@@ -83,13 +83,13 @@ again:
24299 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24300 for (i = 0; i < PTRS_PER_PUD; i++)
24301 pud_p[i] = 0;
24302- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24303+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24304 }
24305 pud_p += pud_index(address);
24306 pud = *pud_p;
24307
24308 if (pud)
24309- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24310+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24311 else {
24312 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24313 reset_early_page_tables();
24314@@ -99,7 +99,7 @@ again:
24315 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24316 for (i = 0; i < PTRS_PER_PMD; i++)
24317 pmd_p[i] = 0;
24318- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24319+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24320 }
24321 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24322 pmd_p[pmd_index(address)] = pmd;
24323@@ -180,7 +180,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24324 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24325 early_printk("Kernel alive\n");
24326
24327- clear_page(init_level4_pgt);
24328 /* set init_level4_pgt kernel high mapping*/
24329 init_level4_pgt[511] = early_level4_pgt[511];
24330
24331diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24332index f36bd42..0ab4474 100644
24333--- a/arch/x86/kernel/head_32.S
24334+++ b/arch/x86/kernel/head_32.S
24335@@ -26,6 +26,12 @@
24336 /* Physical address */
24337 #define pa(X) ((X) - __PAGE_OFFSET)
24338
24339+#ifdef CONFIG_PAX_KERNEXEC
24340+#define ta(X) (X)
24341+#else
24342+#define ta(X) ((X) - __PAGE_OFFSET)
24343+#endif
24344+
24345 /*
24346 * References to members of the new_cpu_data structure.
24347 */
24348@@ -55,11 +61,7 @@
24349 * and small than max_low_pfn, otherwise will waste some page table entries
24350 */
24351
24352-#if PTRS_PER_PMD > 1
24353-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24354-#else
24355-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24356-#endif
24357+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24358
24359 /* Number of possible pages in the lowmem region */
24360 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24361@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24362 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24363
24364 /*
24365+ * Real beginning of normal "text" segment
24366+ */
24367+ENTRY(stext)
24368+ENTRY(_stext)
24369+
24370+/*
24371 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24372 * %esi points to the real-mode code as a 32-bit pointer.
24373 * CS and DS must be 4 GB flat segments, but we don't depend on
24374@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24375 * can.
24376 */
24377 __HEAD
24378+
24379+#ifdef CONFIG_PAX_KERNEXEC
24380+ jmp startup_32
24381+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24382+.fill PAGE_SIZE-5,1,0xcc
24383+#endif
24384+
24385 ENTRY(startup_32)
24386 movl pa(stack_start),%ecx
24387
24388@@ -106,6 +121,59 @@ ENTRY(startup_32)
24389 2:
24390 leal -__PAGE_OFFSET(%ecx),%esp
24391
24392+#ifdef CONFIG_SMP
24393+ movl $pa(cpu_gdt_table),%edi
24394+ movl $__per_cpu_load,%eax
24395+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24396+ rorl $16,%eax
24397+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24398+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24399+ movl $__per_cpu_end - 1,%eax
24400+ subl $__per_cpu_start,%eax
24401+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24402+#endif
24403+
24404+#ifdef CONFIG_PAX_MEMORY_UDEREF
24405+ movl $NR_CPUS,%ecx
24406+ movl $pa(cpu_gdt_table),%edi
24407+1:
24408+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24409+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24410+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24411+ addl $PAGE_SIZE_asm,%edi
24412+ loop 1b
24413+#endif
24414+
24415+#ifdef CONFIG_PAX_KERNEXEC
24416+ movl $pa(boot_gdt),%edi
24417+ movl $__LOAD_PHYSICAL_ADDR,%eax
24418+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24419+ rorl $16,%eax
24420+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24421+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24422+ rorl $16,%eax
24423+
24424+ ljmp $(__BOOT_CS),$1f
24425+1:
24426+
24427+ movl $NR_CPUS,%ecx
24428+ movl $pa(cpu_gdt_table),%edi
24429+ addl $__PAGE_OFFSET,%eax
24430+1:
24431+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24432+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24433+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24434+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24435+ rorl $16,%eax
24436+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24437+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24438+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24439+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24440+ rorl $16,%eax
24441+ addl $PAGE_SIZE_asm,%edi
24442+ loop 1b
24443+#endif
24444+
24445 /*
24446 * Clear BSS first so that there are no surprises...
24447 */
24448@@ -201,8 +269,11 @@ ENTRY(startup_32)
24449 movl %eax, pa(max_pfn_mapped)
24450
24451 /* Do early initialization of the fixmap area */
24452- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24453- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24454+#ifdef CONFIG_COMPAT_VDSO
24455+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24456+#else
24457+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24458+#endif
24459 #else /* Not PAE */
24460
24461 page_pde_offset = (__PAGE_OFFSET >> 20);
24462@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24463 movl %eax, pa(max_pfn_mapped)
24464
24465 /* Do early initialization of the fixmap area */
24466- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24467- movl %eax,pa(initial_page_table+0xffc)
24468+#ifdef CONFIG_COMPAT_VDSO
24469+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24470+#else
24471+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24472+#endif
24473 #endif
24474
24475 #ifdef CONFIG_PARAVIRT
24476@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24477 cmpl $num_subarch_entries, %eax
24478 jae bad_subarch
24479
24480- movl pa(subarch_entries)(,%eax,4), %eax
24481- subl $__PAGE_OFFSET, %eax
24482- jmp *%eax
24483+ jmp *pa(subarch_entries)(,%eax,4)
24484
24485 bad_subarch:
24486 WEAK(lguest_entry)
24487@@ -261,10 +333,10 @@ WEAK(xen_entry)
24488 __INITDATA
24489
24490 subarch_entries:
24491- .long default_entry /* normal x86/PC */
24492- .long lguest_entry /* lguest hypervisor */
24493- .long xen_entry /* Xen hypervisor */
24494- .long default_entry /* Moorestown MID */
24495+ .long ta(default_entry) /* normal x86/PC */
24496+ .long ta(lguest_entry) /* lguest hypervisor */
24497+ .long ta(xen_entry) /* Xen hypervisor */
24498+ .long ta(default_entry) /* Moorestown MID */
24499 num_subarch_entries = (. - subarch_entries) / 4
24500 .previous
24501 #else
24502@@ -354,6 +426,7 @@ default_entry:
24503 movl pa(mmu_cr4_features),%eax
24504 movl %eax,%cr4
24505
24506+#ifdef CONFIG_X86_PAE
24507 testb $X86_CR4_PAE, %al # check if PAE is enabled
24508 jz enable_paging
24509
24510@@ -382,6 +455,9 @@ default_entry:
24511 /* Make changes effective */
24512 wrmsr
24513
24514+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24515+#endif
24516+
24517 enable_paging:
24518
24519 /*
24520@@ -449,14 +525,20 @@ is486:
24521 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24522 movl %eax,%ss # after changing gdt.
24523
24524- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24525+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24526 movl %eax,%ds
24527 movl %eax,%es
24528
24529 movl $(__KERNEL_PERCPU), %eax
24530 movl %eax,%fs # set this cpu's percpu
24531
24532+#ifdef CONFIG_CC_STACKPROTECTOR
24533 movl $(__KERNEL_STACK_CANARY),%eax
24534+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24535+ movl $(__USER_DS),%eax
24536+#else
24537+ xorl %eax,%eax
24538+#endif
24539 movl %eax,%gs
24540
24541 xorl %eax,%eax # Clear LDT
24542@@ -512,8 +594,11 @@ setup_once:
24543 * relocation. Manually set base address in stack canary
24544 * segment descriptor.
24545 */
24546- movl $gdt_page,%eax
24547+ movl $cpu_gdt_table,%eax
24548 movl $stack_canary,%ecx
24549+#ifdef CONFIG_SMP
24550+ addl $__per_cpu_load,%ecx
24551+#endif
24552 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24553 shrl $16, %ecx
24554 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24555@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24556 cmpl $2,(%esp) # X86_TRAP_NMI
24557 je is_nmi # Ignore NMI
24558
24559- cmpl $2,%ss:early_recursion_flag
24560+ cmpl $1,%ss:early_recursion_flag
24561 je hlt_loop
24562 incl %ss:early_recursion_flag
24563
24564@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24565 pushl (20+6*4)(%esp) /* trapno */
24566 pushl $fault_msg
24567 call printk
24568-#endif
24569 call dump_stack
24570+#endif
24571 hlt_loop:
24572 hlt
24573 jmp hlt_loop
24574@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24575 /* This is the default interrupt "handler" :-) */
24576 ALIGN
24577 ignore_int:
24578- cld
24579 #ifdef CONFIG_PRINTK
24580+ cmpl $2,%ss:early_recursion_flag
24581+ je hlt_loop
24582+ incl %ss:early_recursion_flag
24583+ cld
24584 pushl %eax
24585 pushl %ecx
24586 pushl %edx
24587@@ -617,9 +705,6 @@ ignore_int:
24588 movl $(__KERNEL_DS),%eax
24589 movl %eax,%ds
24590 movl %eax,%es
24591- cmpl $2,early_recursion_flag
24592- je hlt_loop
24593- incl early_recursion_flag
24594 pushl 16(%esp)
24595 pushl 24(%esp)
24596 pushl 32(%esp)
24597@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24598 /*
24599 * BSS section
24600 */
24601-__PAGE_ALIGNED_BSS
24602- .align PAGE_SIZE
24603 #ifdef CONFIG_X86_PAE
24604+.section .initial_pg_pmd,"a",@progbits
24605 initial_pg_pmd:
24606 .fill 1024*KPMDS,4,0
24607 #else
24608+.section .initial_page_table,"a",@progbits
24609 ENTRY(initial_page_table)
24610 .fill 1024,4,0
24611 #endif
24612+.section .initial_pg_fixmap,"a",@progbits
24613 initial_pg_fixmap:
24614 .fill 1024,4,0
24615+.section .empty_zero_page,"a",@progbits
24616 ENTRY(empty_zero_page)
24617 .fill 4096,1,0
24618+.section .swapper_pg_dir,"a",@progbits
24619 ENTRY(swapper_pg_dir)
24620+#ifdef CONFIG_X86_PAE
24621+ .fill 4,8,0
24622+#else
24623 .fill 1024,4,0
24624+#endif
24625
24626 /*
24627 * This starts the data section.
24628 */
24629 #ifdef CONFIG_X86_PAE
24630-__PAGE_ALIGNED_DATA
24631- /* Page-aligned for the benefit of paravirt? */
24632- .align PAGE_SIZE
24633+.section .initial_page_table,"a",@progbits
24634 ENTRY(initial_page_table)
24635 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24636 # if KPMDS == 3
24637@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24638 # error "Kernel PMDs should be 1, 2 or 3"
24639 # endif
24640 .align PAGE_SIZE /* needs to be page-sized too */
24641+
24642+#ifdef CONFIG_PAX_PER_CPU_PGD
24643+ENTRY(cpu_pgd)
24644+ .rept 2*NR_CPUS
24645+ .fill 4,8,0
24646+ .endr
24647+#endif
24648+
24649 #endif
24650
24651 .data
24652 .balign 4
24653 ENTRY(stack_start)
24654- .long init_thread_union+THREAD_SIZE
24655+ .long init_thread_union+THREAD_SIZE-8
24656
24657 __INITRODATA
24658 int_msg:
24659@@ -727,7 +825,7 @@ fault_msg:
24660 * segment size, and 32-bit linear address value:
24661 */
24662
24663- .data
24664+.section .rodata,"a",@progbits
24665 .globl boot_gdt_descr
24666 .globl idt_descr
24667
24668@@ -736,7 +834,7 @@ fault_msg:
24669 .word 0 # 32 bit align gdt_desc.address
24670 boot_gdt_descr:
24671 .word __BOOT_DS+7
24672- .long boot_gdt - __PAGE_OFFSET
24673+ .long pa(boot_gdt)
24674
24675 .word 0 # 32-bit align idt_desc.address
24676 idt_descr:
24677@@ -747,7 +845,7 @@ idt_descr:
24678 .word 0 # 32 bit align gdt_desc.address
24679 ENTRY(early_gdt_descr)
24680 .word GDT_ENTRIES*8-1
24681- .long gdt_page /* Overwritten for secondary CPUs */
24682+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24683
24684 /*
24685 * The boot_gdt must mirror the equivalent in setup.S and is
24686@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24687 .align L1_CACHE_BYTES
24688 ENTRY(boot_gdt)
24689 .fill GDT_ENTRY_BOOT_CS,8,0
24690- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24691- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24692+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24693+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24694+
24695+ .align PAGE_SIZE_asm
24696+ENTRY(cpu_gdt_table)
24697+ .rept NR_CPUS
24698+ .quad 0x0000000000000000 /* NULL descriptor */
24699+ .quad 0x0000000000000000 /* 0x0b reserved */
24700+ .quad 0x0000000000000000 /* 0x13 reserved */
24701+ .quad 0x0000000000000000 /* 0x1b reserved */
24702+
24703+#ifdef CONFIG_PAX_KERNEXEC
24704+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24705+#else
24706+ .quad 0x0000000000000000 /* 0x20 unused */
24707+#endif
24708+
24709+ .quad 0x0000000000000000 /* 0x28 unused */
24710+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24711+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24712+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24713+ .quad 0x0000000000000000 /* 0x4b reserved */
24714+ .quad 0x0000000000000000 /* 0x53 reserved */
24715+ .quad 0x0000000000000000 /* 0x5b reserved */
24716+
24717+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24718+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24719+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24720+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24721+
24722+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24723+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24724+
24725+ /*
24726+ * Segments used for calling PnP BIOS have byte granularity.
24727+ * The code segments and data segments have fixed 64k limits,
24728+ * the transfer segment sizes are set at run time.
24729+ */
24730+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24731+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24732+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24733+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24734+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24735+
24736+ /*
24737+ * The APM segments have byte granularity and their bases
24738+ * are set at run time. All have 64k limits.
24739+ */
24740+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24741+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24742+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24743+
24744+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24745+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24746+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24747+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24748+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24749+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24750+
24751+ /* Be sure this is zeroed to avoid false validations in Xen */
24752+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24753+ .endr
24754diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24755index 6fd514d9..320367e 100644
24756--- a/arch/x86/kernel/head_64.S
24757+++ b/arch/x86/kernel/head_64.S
24758@@ -20,6 +20,8 @@
24759 #include <asm/processor-flags.h>
24760 #include <asm/percpu.h>
24761 #include <asm/nops.h>
24762+#include <asm/cpufeature.h>
24763+#include <asm/alternative-asm.h>
24764
24765 #ifdef CONFIG_PARAVIRT
24766 #include <asm/asm-offsets.h>
24767@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24768 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24769 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24770 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24771+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24772+L3_VMALLOC_START = pud_index(VMALLOC_START)
24773+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24774+L3_VMALLOC_END = pud_index(VMALLOC_END)
24775+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24776+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24777
24778 .text
24779 __HEAD
24780@@ -89,11 +97,26 @@ startup_64:
24781 * Fixup the physical addresses in the page table
24782 */
24783 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24784+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24785+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24786+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24787+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24788+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24789
24790- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24791- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24792+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24793+#ifndef CONFIG_XEN
24794+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24795+#endif
24796
24797+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24798+
24799+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24800+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24801+
24802+ addq %rbp, level2_fixmap_pgt + (504*8)(%rip)
24803+ addq %rbp, level2_fixmap_pgt + (505*8)(%rip)
24804 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24805+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24806
24807 /*
24808 * Set up the identity mapping for the switchover. These
24809@@ -174,11 +197,12 @@ ENTRY(secondary_startup_64)
24810 * after the boot processor executes this code.
24811 */
24812
24813+ orq $-1, %rbp
24814 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24815 1:
24816
24817- /* Enable PAE mode and PGE */
24818- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24819+ /* Enable PAE mode and PSE/PGE */
24820+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24821 movq %rcx, %cr4
24822
24823 /* Setup early boot stage 4 level pagetables. */
24824@@ -199,10 +223,21 @@ ENTRY(secondary_startup_64)
24825 movl $MSR_EFER, %ecx
24826 rdmsr
24827 btsl $_EFER_SCE, %eax /* Enable System Call */
24828- btl $20,%edi /* No Execute supported? */
24829+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24830 jnc 1f
24831 btsl $_EFER_NX, %eax
24832+ cmpq $-1, %rbp
24833+ je 1f
24834 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24835+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24836+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24837+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24838+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24839+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*504(%rip)
24840+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*505(%rip)
24841+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24842+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24843+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24844 1: wrmsr /* Make changes effective */
24845
24846 /* Setup cr0 */
24847@@ -282,6 +317,7 @@ ENTRY(secondary_startup_64)
24848 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24849 * address given in m16:64.
24850 */
24851+ pax_set_fptr_mask
24852 movq initial_code(%rip),%rax
24853 pushq $0 # fake return address to stop unwinder
24854 pushq $__KERNEL_CS # set correct cs
24855@@ -313,7 +349,7 @@ ENDPROC(start_cpu0)
24856 .quad INIT_PER_CPU_VAR(irq_stack_union)
24857
24858 GLOBAL(stack_start)
24859- .quad init_thread_union+THREAD_SIZE-8
24860+ .quad init_thread_union+THREAD_SIZE-16
24861 .word 0
24862 __FINITDATA
24863
24864@@ -391,7 +427,7 @@ ENTRY(early_idt_handler)
24865 call dump_stack
24866 #ifdef CONFIG_KALLSYMS
24867 leaq early_idt_ripmsg(%rip),%rdi
24868- movq 40(%rsp),%rsi # %rip again
24869+ movq 88(%rsp),%rsi # %rip again
24870 call __print_symbol
24871 #endif
24872 #endif /* EARLY_PRINTK */
24873@@ -420,6 +456,7 @@ ENDPROC(early_idt_handler)
24874 early_recursion_flag:
24875 .long 0
24876
24877+ .section .rodata,"a",@progbits
24878 #ifdef CONFIG_EARLY_PRINTK
24879 early_idt_msg:
24880 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24881@@ -447,29 +484,52 @@ NEXT_PAGE(early_level4_pgt)
24882 NEXT_PAGE(early_dynamic_pgts)
24883 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24884
24885- .data
24886+ .section .rodata,"a",@progbits
24887
24888-#ifndef CONFIG_XEN
24889 NEXT_PAGE(init_level4_pgt)
24890- .fill 512,8,0
24891-#else
24892-NEXT_PAGE(init_level4_pgt)
24893- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24894 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24895 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24896+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24897+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24898+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24899+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24900+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24901+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24902 .org init_level4_pgt + L4_START_KERNEL*8, 0
24903 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24904 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24905
24906+#ifdef CONFIG_PAX_PER_CPU_PGD
24907+NEXT_PAGE(cpu_pgd)
24908+ .rept 2*NR_CPUS
24909+ .fill 512,8,0
24910+ .endr
24911+#endif
24912+
24913 NEXT_PAGE(level3_ident_pgt)
24914 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24915+#ifdef CONFIG_XEN
24916 .fill 511, 8, 0
24917+#else
24918+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24919+ .fill 510,8,0
24920+#endif
24921+
24922+NEXT_PAGE(level3_vmalloc_start_pgt)
24923+ .fill 512,8,0
24924+
24925+NEXT_PAGE(level3_vmalloc_end_pgt)
24926+ .fill 512,8,0
24927+
24928+NEXT_PAGE(level3_vmemmap_pgt)
24929+ .fill L3_VMEMMAP_START,8,0
24930+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24931+
24932 NEXT_PAGE(level2_ident_pgt)
24933- /* Since I easily can, map the first 1G.
24934+ /* Since I easily can, map the first 2G.
24935 * Don't set NX because code runs from these pages.
24936 */
24937- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
24938-#endif
24939+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
24940
24941 NEXT_PAGE(level3_kernel_pgt)
24942 .fill L3_START_KERNEL,8,0
24943@@ -477,6 +537,9 @@ NEXT_PAGE(level3_kernel_pgt)
24944 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
24945 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24946
24947+NEXT_PAGE(level2_vmemmap_pgt)
24948+ .fill 512,8,0
24949+
24950 NEXT_PAGE(level2_kernel_pgt)
24951 /*
24952 * 512 MB kernel mapping. We spend a full page on this pagetable
24953@@ -492,23 +555,61 @@ NEXT_PAGE(level2_kernel_pgt)
24954 KERNEL_IMAGE_SIZE/PMD_SIZE)
24955
24956 NEXT_PAGE(level2_fixmap_pgt)
24957- .fill 506,8,0
24958- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24959- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
24960- .fill 5,8,0
24961+ .fill 504,8,0
24962+ .quad level1_fixmap_pgt - __START_KERNEL_map + 0 * PAGE_SIZE + _PAGE_TABLE
24963+ .quad level1_fixmap_pgt - __START_KERNEL_map + 1 * PAGE_SIZE + _PAGE_TABLE
24964+ .quad level1_fixmap_pgt - __START_KERNEL_map + 2 * PAGE_SIZE + _PAGE_TABLE
24965+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
24966+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
24967+ .fill 4,8,0
24968
24969 NEXT_PAGE(level1_fixmap_pgt)
24970+ .fill 3*512,8,0
24971+
24972+NEXT_PAGE(level1_vsyscall_pgt)
24973 .fill 512,8,0
24974
24975 #undef PMDS
24976
24977- .data
24978+ .align PAGE_SIZE
24979+ENTRY(cpu_gdt_table)
24980+ .rept NR_CPUS
24981+ .quad 0x0000000000000000 /* NULL descriptor */
24982+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
24983+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
24984+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
24985+ .quad 0x00cffb000000ffff /* __USER32_CS */
24986+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
24987+ .quad 0x00affb000000ffff /* __USER_CS */
24988+
24989+#ifdef CONFIG_PAX_KERNEXEC
24990+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
24991+#else
24992+ .quad 0x0 /* unused */
24993+#endif
24994+
24995+ .quad 0,0 /* TSS */
24996+ .quad 0,0 /* LDT */
24997+ .quad 0,0,0 /* three TLS descriptors */
24998+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
24999+ /* asm/segment.h:GDT_ENTRIES must match this */
25000+
25001+#ifdef CONFIG_PAX_MEMORY_UDEREF
25002+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25003+#else
25004+ .quad 0x0 /* unused */
25005+#endif
25006+
25007+ /* zero the remaining page */
25008+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25009+ .endr
25010+
25011 .align 16
25012 .globl early_gdt_descr
25013 early_gdt_descr:
25014 .word GDT_ENTRIES*8-1
25015 early_gdt_descr_base:
25016- .quad INIT_PER_CPU_VAR(gdt_page)
25017+ .quad cpu_gdt_table
25018
25019 ENTRY(phys_base)
25020 /* This must match the first entry in level2_kernel_pgt */
25021@@ -532,8 +633,8 @@ NEXT_PAGE(kasan_zero_pud)
25022
25023
25024 #include "../../x86/xen/xen-head.S"
25025-
25026- __PAGE_ALIGNED_BSS
25027+
25028+ .section .rodata,"a",@progbits
25029 NEXT_PAGE(empty_zero_page)
25030 .skip PAGE_SIZE
25031
25032diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25033index 05fd74f..c3548b1 100644
25034--- a/arch/x86/kernel/i386_ksyms_32.c
25035+++ b/arch/x86/kernel/i386_ksyms_32.c
25036@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25037 EXPORT_SYMBOL(cmpxchg8b_emu);
25038 #endif
25039
25040+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25041+
25042 /* Networking helper routines. */
25043 EXPORT_SYMBOL(csum_partial_copy_generic);
25044+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25045+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25046
25047 EXPORT_SYMBOL(__get_user_1);
25048 EXPORT_SYMBOL(__get_user_2);
25049@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25050 EXPORT_SYMBOL(___preempt_schedule_context);
25051 #endif
25052 #endif
25053+
25054+#ifdef CONFIG_PAX_KERNEXEC
25055+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25056+#endif
25057+
25058+#ifdef CONFIG_PAX_PER_CPU_PGD
25059+EXPORT_SYMBOL(cpu_pgd);
25060+#endif
25061diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25062index d5651fc..29c740d 100644
25063--- a/arch/x86/kernel/i387.c
25064+++ b/arch/x86/kernel/i387.c
25065@@ -68,7 +68,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25066 static inline bool interrupted_user_mode(void)
25067 {
25068 struct pt_regs *regs = get_irq_regs();
25069- return regs && user_mode_vm(regs);
25070+ return regs && user_mode(regs);
25071 }
25072
25073 /*
25074diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25075index e7cc537..67d7372 100644
25076--- a/arch/x86/kernel/i8259.c
25077+++ b/arch/x86/kernel/i8259.c
25078@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25079 static void make_8259A_irq(unsigned int irq)
25080 {
25081 disable_irq_nosync(irq);
25082- io_apic_irqs &= ~(1<<irq);
25083+ io_apic_irqs &= ~(1UL<<irq);
25084 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25085 enable_irq(irq);
25086 }
25087@@ -208,7 +208,7 @@ spurious_8259A_irq:
25088 "spurious 8259A interrupt: IRQ%d.\n", irq);
25089 spurious_irq_mask |= irqmask;
25090 }
25091- atomic_inc(&irq_err_count);
25092+ atomic_inc_unchecked(&irq_err_count);
25093 /*
25094 * Theoretically we do not have to handle this IRQ,
25095 * but in Linux this does not cause problems and is
25096@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25097 /* (slave's support for AEOI in flat mode is to be investigated) */
25098 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25099
25100+ pax_open_kernel();
25101 if (auto_eoi)
25102 /*
25103 * In AEOI mode we just have to mask the interrupt
25104 * when acking.
25105 */
25106- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25107+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25108 else
25109- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25110+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25111+ pax_close_kernel();
25112
25113 udelay(100); /* wait for 8259A to initialize */
25114
25115diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25116index a979b5b..1d6db75 100644
25117--- a/arch/x86/kernel/io_delay.c
25118+++ b/arch/x86/kernel/io_delay.c
25119@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25120 * Quirk table for systems that misbehave (lock up, etc.) if port
25121 * 0x80 is used:
25122 */
25123-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25124+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25125 {
25126 .callback = dmi_io_delay_0xed_port,
25127 .ident = "Compaq Presario V6000",
25128diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25129index 4ddaf66..49d5c18 100644
25130--- a/arch/x86/kernel/ioport.c
25131+++ b/arch/x86/kernel/ioport.c
25132@@ -6,6 +6,7 @@
25133 #include <linux/sched.h>
25134 #include <linux/kernel.h>
25135 #include <linux/capability.h>
25136+#include <linux/security.h>
25137 #include <linux/errno.h>
25138 #include <linux/types.h>
25139 #include <linux/ioport.h>
25140@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25141 return -EINVAL;
25142 if (turn_on && !capable(CAP_SYS_RAWIO))
25143 return -EPERM;
25144+#ifdef CONFIG_GRKERNSEC_IO
25145+ if (turn_on && grsec_disable_privio) {
25146+ gr_handle_ioperm();
25147+ return -ENODEV;
25148+ }
25149+#endif
25150
25151 /*
25152 * If it's the first ioperm() call in this thread's lifetime, set the
25153@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25154 * because the ->io_bitmap_max value must match the bitmap
25155 * contents:
25156 */
25157- tss = &per_cpu(init_tss, get_cpu());
25158+ tss = init_tss + get_cpu();
25159
25160 if (turn_on)
25161 bitmap_clear(t->io_bitmap_ptr, from, num);
25162@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25163 if (level > old) {
25164 if (!capable(CAP_SYS_RAWIO))
25165 return -EPERM;
25166+#ifdef CONFIG_GRKERNSEC_IO
25167+ if (grsec_disable_privio) {
25168+ gr_handle_iopl();
25169+ return -ENODEV;
25170+ }
25171+#endif
25172 }
25173 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25174 t->iopl = level << 12;
25175diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25176index 67b1cbe..6ad4cbc 100644
25177--- a/arch/x86/kernel/irq.c
25178+++ b/arch/x86/kernel/irq.c
25179@@ -22,7 +22,7 @@
25180 #define CREATE_TRACE_POINTS
25181 #include <asm/trace/irq_vectors.h>
25182
25183-atomic_t irq_err_count;
25184+atomic_unchecked_t irq_err_count;
25185
25186 /* Function pointer for generic interrupt vector handling */
25187 void (*x86_platform_ipi_callback)(void) = NULL;
25188@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25189 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25190 seq_puts(p, " Hypervisor callback interrupts\n");
25191 #endif
25192- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25193+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25194 #if defined(CONFIG_X86_IO_APIC)
25195- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25196+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25197 #endif
25198 return 0;
25199 }
25200@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25201
25202 u64 arch_irq_stat(void)
25203 {
25204- u64 sum = atomic_read(&irq_err_count);
25205+ u64 sum = atomic_read_unchecked(&irq_err_count);
25206 return sum;
25207 }
25208
25209diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25210index 28d28f5..e6cc9ae 100644
25211--- a/arch/x86/kernel/irq_32.c
25212+++ b/arch/x86/kernel/irq_32.c
25213@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25214
25215 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25216
25217+extern void gr_handle_kernel_exploit(void);
25218+
25219 int sysctl_panic_on_stackoverflow __read_mostly;
25220
25221 /* Debugging check for stack overflow: is there less than 1KB free? */
25222@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25223 __asm__ __volatile__("andl %%esp,%0" :
25224 "=r" (sp) : "0" (THREAD_SIZE - 1));
25225
25226- return sp < (sizeof(struct thread_info) + STACK_WARN);
25227+ return sp < STACK_WARN;
25228 }
25229
25230 static void print_stack_overflow(void)
25231 {
25232 printk(KERN_WARNING "low stack detected by irq handler\n");
25233 dump_stack();
25234+ gr_handle_kernel_exploit();
25235 if (sysctl_panic_on_stackoverflow)
25236 panic("low stack detected by irq handler - check messages\n");
25237 }
25238@@ -77,10 +80,9 @@ static inline void *current_stack(void)
25239 static inline int
25240 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25241 {
25242- struct irq_stack *curstk, *irqstk;
25243+ struct irq_stack *irqstk;
25244 u32 *isp, *prev_esp, arg1, arg2;
25245
25246- curstk = (struct irq_stack *) current_stack();
25247 irqstk = __this_cpu_read(hardirq_stack);
25248
25249 /*
25250@@ -89,15 +91,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25251 * handler) we can't do that and just have to keep using the
25252 * current stack (which is the irq stack already after all)
25253 */
25254- if (unlikely(curstk == irqstk))
25255+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25256 return 0;
25257
25258- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25259+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25260
25261 /* Save the next esp at the bottom of the stack */
25262 prev_esp = (u32 *)irqstk;
25263 *prev_esp = current_stack_pointer();
25264
25265+#ifdef CONFIG_PAX_MEMORY_UDEREF
25266+ __set_fs(MAKE_MM_SEG(0));
25267+#endif
25268+
25269 if (unlikely(overflow))
25270 call_on_stack(print_stack_overflow, isp);
25271
25272@@ -108,6 +114,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25273 : "0" (irq), "1" (desc), "2" (isp),
25274 "D" (desc->handle_irq)
25275 : "memory", "cc", "ecx");
25276+
25277+#ifdef CONFIG_PAX_MEMORY_UDEREF
25278+ __set_fs(current_thread_info()->addr_limit);
25279+#endif
25280+
25281 return 1;
25282 }
25283
25284@@ -116,32 +127,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25285 */
25286 void irq_ctx_init(int cpu)
25287 {
25288- struct irq_stack *irqstk;
25289-
25290 if (per_cpu(hardirq_stack, cpu))
25291 return;
25292
25293- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25294- THREADINFO_GFP,
25295- THREAD_SIZE_ORDER));
25296- per_cpu(hardirq_stack, cpu) = irqstk;
25297-
25298- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25299- THREADINFO_GFP,
25300- THREAD_SIZE_ORDER));
25301- per_cpu(softirq_stack, cpu) = irqstk;
25302-
25303- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25304- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25305+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25306+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25307 }
25308
25309 void do_softirq_own_stack(void)
25310 {
25311- struct thread_info *curstk;
25312 struct irq_stack *irqstk;
25313 u32 *isp, *prev_esp;
25314
25315- curstk = current_stack();
25316 irqstk = __this_cpu_read(softirq_stack);
25317
25318 /* build the stack frame on the softirq stack */
25319@@ -151,7 +148,16 @@ void do_softirq_own_stack(void)
25320 prev_esp = (u32 *)irqstk;
25321 *prev_esp = current_stack_pointer();
25322
25323+#ifdef CONFIG_PAX_MEMORY_UDEREF
25324+ __set_fs(MAKE_MM_SEG(0));
25325+#endif
25326+
25327 call_on_stack(__do_softirq, isp);
25328+
25329+#ifdef CONFIG_PAX_MEMORY_UDEREF
25330+ __set_fs(current_thread_info()->addr_limit);
25331+#endif
25332+
25333 }
25334
25335 bool handle_irq(unsigned irq, struct pt_regs *regs)
25336@@ -165,7 +171,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25337 if (unlikely(!desc))
25338 return false;
25339
25340- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25341+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25342 if (unlikely(overflow))
25343 print_stack_overflow();
25344 desc->handle_irq(irq, desc);
25345diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25346index e4b503d..824fce8 100644
25347--- a/arch/x86/kernel/irq_64.c
25348+++ b/arch/x86/kernel/irq_64.c
25349@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25350 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25351 EXPORT_PER_CPU_SYMBOL(irq_regs);
25352
25353+extern void gr_handle_kernel_exploit(void);
25354+
25355 int sysctl_panic_on_stackoverflow;
25356
25357 /*
25358@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25359 u64 estack_top, estack_bottom;
25360 u64 curbase = (u64)task_stack_page(current);
25361
25362- if (user_mode_vm(regs))
25363+ if (user_mode(regs))
25364 return;
25365
25366 if (regs->sp >= curbase + sizeof(struct thread_info) +
25367@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25368 irq_stack_top, irq_stack_bottom,
25369 estack_top, estack_bottom);
25370
25371+ gr_handle_kernel_exploit();
25372+
25373 if (sysctl_panic_on_stackoverflow)
25374 panic("low stack detected by irq handler - check messages\n");
25375 #endif
25376diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25377index 26d5a55..a01160a 100644
25378--- a/arch/x86/kernel/jump_label.c
25379+++ b/arch/x86/kernel/jump_label.c
25380@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25381 * Jump label is enabled for the first time.
25382 * So we expect a default_nop...
25383 */
25384- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25385+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25386 != 0))
25387 bug_at((void *)entry->code, __LINE__);
25388 } else {
25389@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25390 * ...otherwise expect an ideal_nop. Otherwise
25391 * something went horribly wrong.
25392 */
25393- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25394+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25395 != 0))
25396 bug_at((void *)entry->code, __LINE__);
25397 }
25398@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25399 * are converting the default nop to the ideal nop.
25400 */
25401 if (init) {
25402- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25403+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25404 bug_at((void *)entry->code, __LINE__);
25405 } else {
25406 code.jump = 0xe9;
25407 code.offset = entry->target -
25408 (entry->code + JUMP_LABEL_NOP_SIZE);
25409- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25410+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25411 bug_at((void *)entry->code, __LINE__);
25412 }
25413 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25414diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25415index 25ecd56..e12482f 100644
25416--- a/arch/x86/kernel/kgdb.c
25417+++ b/arch/x86/kernel/kgdb.c
25418@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25419 #ifdef CONFIG_X86_32
25420 switch (regno) {
25421 case GDB_SS:
25422- if (!user_mode_vm(regs))
25423+ if (!user_mode(regs))
25424 *(unsigned long *)mem = __KERNEL_DS;
25425 break;
25426 case GDB_SP:
25427- if (!user_mode_vm(regs))
25428+ if (!user_mode(regs))
25429 *(unsigned long *)mem = kernel_stack_pointer(regs);
25430 break;
25431 case GDB_GS:
25432@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25433 bp->attr.bp_addr = breakinfo[breakno].addr;
25434 bp->attr.bp_len = breakinfo[breakno].len;
25435 bp->attr.bp_type = breakinfo[breakno].type;
25436- info->address = breakinfo[breakno].addr;
25437+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25438+ info->address = ktla_ktva(breakinfo[breakno].addr);
25439+ else
25440+ info->address = breakinfo[breakno].addr;
25441 info->len = breakinfo[breakno].len;
25442 info->type = breakinfo[breakno].type;
25443 val = arch_install_hw_breakpoint(bp);
25444@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25445 case 'k':
25446 /* clear the trace bit */
25447 linux_regs->flags &= ~X86_EFLAGS_TF;
25448- atomic_set(&kgdb_cpu_doing_single_step, -1);
25449+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25450
25451 /* set the trace bit if we're stepping */
25452 if (remcomInBuffer[0] == 's') {
25453 linux_regs->flags |= X86_EFLAGS_TF;
25454- atomic_set(&kgdb_cpu_doing_single_step,
25455+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25456 raw_smp_processor_id());
25457 }
25458
25459@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25460
25461 switch (cmd) {
25462 case DIE_DEBUG:
25463- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25464+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25465 if (user_mode(regs))
25466 return single_step_cont(regs, args);
25467 break;
25468@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25469 #endif /* CONFIG_DEBUG_RODATA */
25470
25471 bpt->type = BP_BREAKPOINT;
25472- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25473+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25474 BREAK_INSTR_SIZE);
25475 if (err)
25476 return err;
25477- err = probe_kernel_write((char *)bpt->bpt_addr,
25478+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25479 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25480 #ifdef CONFIG_DEBUG_RODATA
25481 if (!err)
25482@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25483 return -EBUSY;
25484 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25485 BREAK_INSTR_SIZE);
25486- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25487+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25488 if (err)
25489 return err;
25490 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25491@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25492 if (mutex_is_locked(&text_mutex))
25493 goto knl_write;
25494 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25495- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25496+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25497 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25498 goto knl_write;
25499 return err;
25500 knl_write:
25501 #endif /* CONFIG_DEBUG_RODATA */
25502- return probe_kernel_write((char *)bpt->bpt_addr,
25503+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25504 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25505 }
25506
25507diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25508index 4e3d5a9..03fffd8 100644
25509--- a/arch/x86/kernel/kprobes/core.c
25510+++ b/arch/x86/kernel/kprobes/core.c
25511@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25512 s32 raddr;
25513 } __packed *insn;
25514
25515- insn = (struct __arch_relative_insn *)from;
25516+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25517+
25518+ pax_open_kernel();
25519 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25520 insn->op = op;
25521+ pax_close_kernel();
25522 }
25523
25524 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25525@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25526 kprobe_opcode_t opcode;
25527 kprobe_opcode_t *orig_opcodes = opcodes;
25528
25529- if (search_exception_tables((unsigned long)opcodes))
25530+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25531 return 0; /* Page fault may occur on this address. */
25532
25533 retry:
25534@@ -260,12 +263,12 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25535 * Fortunately, we know that the original code is the ideal 5-byte
25536 * long NOP.
25537 */
25538- memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25539+ memcpy(buf, (void *)ktla_ktva(addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25540 if (faddr)
25541 memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
25542 else
25543 buf[0] = kp->opcode;
25544- return (unsigned long)buf;
25545+ return ktva_ktla((unsigned long)buf);
25546 }
25547
25548 /*
25549@@ -364,7 +367,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25550 /* Another subsystem puts a breakpoint, failed to recover */
25551 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25552 return 0;
25553+ pax_open_kernel();
25554 memcpy(dest, insn.kaddr, insn.length);
25555+ pax_close_kernel();
25556
25557 #ifdef CONFIG_X86_64
25558 if (insn_rip_relative(&insn)) {
25559@@ -391,7 +396,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25560 return 0;
25561 }
25562 disp = (u8 *) dest + insn_offset_displacement(&insn);
25563+ pax_open_kernel();
25564 *(s32 *) disp = (s32) newdisp;
25565+ pax_close_kernel();
25566 }
25567 #endif
25568 return insn.length;
25569@@ -533,7 +540,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25570 * nor set current_kprobe, because it doesn't use single
25571 * stepping.
25572 */
25573- regs->ip = (unsigned long)p->ainsn.insn;
25574+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25575 preempt_enable_no_resched();
25576 return;
25577 }
25578@@ -550,9 +557,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25579 regs->flags &= ~X86_EFLAGS_IF;
25580 /* single step inline if the instruction is an int3 */
25581 if (p->opcode == BREAKPOINT_INSTRUCTION)
25582- regs->ip = (unsigned long)p->addr;
25583+ regs->ip = ktla_ktva((unsigned long)p->addr);
25584 else
25585- regs->ip = (unsigned long)p->ainsn.insn;
25586+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25587 }
25588 NOKPROBE_SYMBOL(setup_singlestep);
25589
25590@@ -602,7 +609,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25591 struct kprobe *p;
25592 struct kprobe_ctlblk *kcb;
25593
25594- if (user_mode_vm(regs))
25595+ if (user_mode(regs))
25596 return 0;
25597
25598 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25599@@ -637,7 +644,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25600 setup_singlestep(p, regs, kcb, 0);
25601 return 1;
25602 }
25603- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25604+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25605 /*
25606 * The breakpoint instruction was removed right
25607 * after we hit it. Another cpu has removed
25608@@ -684,6 +691,9 @@ static void __used kretprobe_trampoline_holder(void)
25609 " movq %rax, 152(%rsp)\n"
25610 RESTORE_REGS_STRING
25611 " popfq\n"
25612+#ifdef KERNEXEC_PLUGIN
25613+ " btsq $63,(%rsp)\n"
25614+#endif
25615 #else
25616 " pushf\n"
25617 SAVE_REGS_STRING
25618@@ -824,7 +834,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25619 struct kprobe_ctlblk *kcb)
25620 {
25621 unsigned long *tos = stack_addr(regs);
25622- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25623+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25624 unsigned long orig_ip = (unsigned long)p->addr;
25625 kprobe_opcode_t *insn = p->ainsn.insn;
25626
25627@@ -1007,7 +1017,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25628 struct die_args *args = data;
25629 int ret = NOTIFY_DONE;
25630
25631- if (args->regs && user_mode_vm(args->regs))
25632+ if (args->regs && user_mode(args->regs))
25633 return ret;
25634
25635 if (val == DIE_GPF) {
25636diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25637index 7b3b9d1..e2478b91 100644
25638--- a/arch/x86/kernel/kprobes/opt.c
25639+++ b/arch/x86/kernel/kprobes/opt.c
25640@@ -79,6 +79,7 @@ found:
25641 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25642 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25643 {
25644+ pax_open_kernel();
25645 #ifdef CONFIG_X86_64
25646 *addr++ = 0x48;
25647 *addr++ = 0xbf;
25648@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25649 *addr++ = 0xb8;
25650 #endif
25651 *(unsigned long *)addr = val;
25652+ pax_close_kernel();
25653 }
25654
25655 asm (
25656@@ -342,7 +344,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25657 * Verify if the address gap is in 2GB range, because this uses
25658 * a relative jump.
25659 */
25660- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25661+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25662 if (abs(rel) > 0x7fffffff) {
25663 __arch_remove_optimized_kprobe(op, 0);
25664 return -ERANGE;
25665@@ -359,16 +361,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25666 op->optinsn.size = ret;
25667
25668 /* Copy arch-dep-instance from template */
25669- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25670+ pax_open_kernel();
25671+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25672+ pax_close_kernel();
25673
25674 /* Set probe information */
25675 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25676
25677 /* Set probe function call */
25678- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25679+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25680
25681 /* Set returning jmp instruction at the tail of out-of-line buffer */
25682- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25683+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25684 (u8 *)op->kp.addr + op->optinsn.size);
25685
25686 flush_icache_range((unsigned long) buf,
25687@@ -393,7 +397,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25688 WARN_ON(kprobe_disabled(&op->kp));
25689
25690 /* Backup instructions which will be replaced by jump address */
25691- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25692+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25693 RELATIVE_ADDR_SIZE);
25694
25695 insn_buf[0] = RELATIVEJUMP_OPCODE;
25696@@ -441,7 +445,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25697 /* This kprobe is really able to run optimized path. */
25698 op = container_of(p, struct optimized_kprobe, kp);
25699 /* Detour through copied instructions */
25700- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25701+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25702 if (!reenter)
25703 reset_current_kprobe();
25704 preempt_enable_no_resched();
25705diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25706index c2bedae..25e7ab60 100644
25707--- a/arch/x86/kernel/ksysfs.c
25708+++ b/arch/x86/kernel/ksysfs.c
25709@@ -184,7 +184,7 @@ out:
25710
25711 static struct kobj_attribute type_attr = __ATTR_RO(type);
25712
25713-static struct bin_attribute data_attr = {
25714+static bin_attribute_no_const data_attr __read_only = {
25715 .attr = {
25716 .name = "data",
25717 .mode = S_IRUGO,
25718diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25719index c37886d..d851d32 100644
25720--- a/arch/x86/kernel/ldt.c
25721+++ b/arch/x86/kernel/ldt.c
25722@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25723 if (reload) {
25724 #ifdef CONFIG_SMP
25725 preempt_disable();
25726- load_LDT(pc);
25727+ load_LDT_nolock(pc);
25728 if (!cpumask_equal(mm_cpumask(current->mm),
25729 cpumask_of(smp_processor_id())))
25730 smp_call_function(flush_ldt, current->mm, 1);
25731 preempt_enable();
25732 #else
25733- load_LDT(pc);
25734+ load_LDT_nolock(pc);
25735 #endif
25736 }
25737 if (oldsize) {
25738@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25739 return err;
25740
25741 for (i = 0; i < old->size; i++)
25742- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25743+ write_ldt_entry(new->ldt, i, old->ldt + i);
25744 return 0;
25745 }
25746
25747@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25748 retval = copy_ldt(&mm->context, &old_mm->context);
25749 mutex_unlock(&old_mm->context.lock);
25750 }
25751+
25752+ if (tsk == current) {
25753+ mm->context.vdso = 0;
25754+
25755+#ifdef CONFIG_X86_32
25756+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25757+ mm->context.user_cs_base = 0UL;
25758+ mm->context.user_cs_limit = ~0UL;
25759+
25760+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25761+ cpus_clear(mm->context.cpu_user_cs_mask);
25762+#endif
25763+
25764+#endif
25765+#endif
25766+
25767+ }
25768+
25769 return retval;
25770 }
25771
25772@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25773 }
25774 }
25775
25776+#ifdef CONFIG_PAX_SEGMEXEC
25777+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25778+ error = -EINVAL;
25779+ goto out_unlock;
25780+ }
25781+#endif
25782+
25783 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25784 error = -EINVAL;
25785 goto out_unlock;
25786diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
25787index ff3c3101d..d7c0cd8 100644
25788--- a/arch/x86/kernel/livepatch.c
25789+++ b/arch/x86/kernel/livepatch.c
25790@@ -41,9 +41,10 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
25791 int ret, numpages, size = 4;
25792 bool readonly;
25793 unsigned long val;
25794- unsigned long core = (unsigned long)mod->module_core;
25795- unsigned long core_ro_size = mod->core_ro_size;
25796- unsigned long core_size = mod->core_size;
25797+ unsigned long core_rx = (unsigned long)mod->module_core_rx;
25798+ unsigned long core_rw = (unsigned long)mod->module_core_rw;
25799+ unsigned long core_size_rx = mod->core_size_rx;
25800+ unsigned long core_size_rw = mod->core_size_rw;
25801
25802 switch (type) {
25803 case R_X86_64_NONE:
25804@@ -66,11 +67,12 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
25805 return -EINVAL;
25806 }
25807
25808- if (loc < core || loc >= core + core_size)
25809+ if ((loc < core_rx || loc >= core_rx + core_size_rx) &&
25810+ (loc < core_rw || loc >= core_rw + core_size_rw))
25811 /* loc does not point to any symbol inside the module */
25812 return -EINVAL;
25813
25814- if (loc < core + core_ro_size)
25815+ if (loc < core_rx + core_size_rx)
25816 readonly = true;
25817 else
25818 readonly = false;
25819diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25820index 469b23d..5449cfe 100644
25821--- a/arch/x86/kernel/machine_kexec_32.c
25822+++ b/arch/x86/kernel/machine_kexec_32.c
25823@@ -26,7 +26,7 @@
25824 #include <asm/cacheflush.h>
25825 #include <asm/debugreg.h>
25826
25827-static void set_idt(void *newidt, __u16 limit)
25828+static void set_idt(struct desc_struct *newidt, __u16 limit)
25829 {
25830 struct desc_ptr curidt;
25831
25832@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25833 }
25834
25835
25836-static void set_gdt(void *newgdt, __u16 limit)
25837+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25838 {
25839 struct desc_ptr curgdt;
25840
25841@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25842 }
25843
25844 control_page = page_address(image->control_code_page);
25845- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25846+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25847
25848 relocate_kernel_ptr = control_page;
25849 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25850diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25851index 94ea120..4154cea 100644
25852--- a/arch/x86/kernel/mcount_64.S
25853+++ b/arch/x86/kernel/mcount_64.S
25854@@ -7,7 +7,7 @@
25855 #include <linux/linkage.h>
25856 #include <asm/ptrace.h>
25857 #include <asm/ftrace.h>
25858-
25859+#include <asm/alternative-asm.h>
25860
25861 .code64
25862 .section .entry.text, "ax"
25863@@ -148,8 +148,9 @@
25864 #ifdef CONFIG_DYNAMIC_FTRACE
25865
25866 ENTRY(function_hook)
25867+ pax_force_retaddr
25868 retq
25869-END(function_hook)
25870+ENDPROC(function_hook)
25871
25872 ENTRY(ftrace_caller)
25873 /* save_mcount_regs fills in first two parameters */
25874@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25875 #endif
25876
25877 GLOBAL(ftrace_stub)
25878+ pax_force_retaddr
25879 retq
25880-END(ftrace_caller)
25881+ENDPROC(ftrace_caller)
25882
25883 ENTRY(ftrace_regs_caller)
25884 /* Save the current flags before any operations that can change them */
25885@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
25886
25887 jmp ftrace_return
25888
25889-END(ftrace_regs_caller)
25890+ENDPROC(ftrace_regs_caller)
25891
25892
25893 #else /* ! CONFIG_DYNAMIC_FTRACE */
25894@@ -272,18 +274,20 @@ fgraph_trace:
25895 #endif
25896
25897 GLOBAL(ftrace_stub)
25898+ pax_force_retaddr
25899 retq
25900
25901 trace:
25902 /* save_mcount_regs fills in first two parameters */
25903 save_mcount_regs
25904
25905+ pax_force_fptr ftrace_trace_function
25906 call *ftrace_trace_function
25907
25908 restore_mcount_regs
25909
25910 jmp fgraph_trace
25911-END(function_hook)
25912+ENDPROC(function_hook)
25913 #endif /* CONFIG_DYNAMIC_FTRACE */
25914 #endif /* CONFIG_FUNCTION_TRACER */
25915
25916@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
25917
25918 restore_mcount_regs
25919
25920+ pax_force_retaddr
25921 retq
25922-END(ftrace_graph_caller)
25923+ENDPROC(ftrace_graph_caller)
25924
25925 GLOBAL(return_to_handler)
25926 subq $24, %rsp
25927@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
25928 movq 8(%rsp), %rdx
25929 movq (%rsp), %rax
25930 addq $24, %rsp
25931+ pax_force_fptr %rdi
25932 jmp *%rdi
25933+ENDPROC(return_to_handler)
25934 #endif
25935diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
25936index d1ac80b..f593701 100644
25937--- a/arch/x86/kernel/module.c
25938+++ b/arch/x86/kernel/module.c
25939@@ -82,17 +82,17 @@ static unsigned long int get_module_load_offset(void)
25940 }
25941 #endif
25942
25943-void *module_alloc(unsigned long size)
25944+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
25945 {
25946 void *p;
25947
25948- if (PAGE_ALIGN(size) > MODULES_LEN)
25949+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
25950 return NULL;
25951
25952 p = __vmalloc_node_range(size, MODULE_ALIGN,
25953 MODULES_VADDR + get_module_load_offset(),
25954- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
25955- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
25956+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
25957+ prot, 0, NUMA_NO_NODE,
25958 __builtin_return_address(0));
25959 if (p && (kasan_module_alloc(p, size) < 0)) {
25960 vfree(p);
25961@@ -102,6 +102,51 @@ void *module_alloc(unsigned long size)
25962 return p;
25963 }
25964
25965+void *module_alloc(unsigned long size)
25966+{
25967+
25968+#ifdef CONFIG_PAX_KERNEXEC
25969+ return __module_alloc(size, PAGE_KERNEL);
25970+#else
25971+ return __module_alloc(size, PAGE_KERNEL_EXEC);
25972+#endif
25973+
25974+}
25975+
25976+#ifdef CONFIG_PAX_KERNEXEC
25977+#ifdef CONFIG_X86_32
25978+void *module_alloc_exec(unsigned long size)
25979+{
25980+ struct vm_struct *area;
25981+
25982+ if (size == 0)
25983+ return NULL;
25984+
25985+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
25986+return area ? area->addr : NULL;
25987+}
25988+EXPORT_SYMBOL(module_alloc_exec);
25989+
25990+void module_memfree_exec(void *module_region)
25991+{
25992+ vunmap(module_region);
25993+}
25994+EXPORT_SYMBOL(module_memfree_exec);
25995+#else
25996+void module_memfree_exec(void *module_region)
25997+{
25998+ module_memfree(module_region);
25999+}
26000+EXPORT_SYMBOL(module_memfree_exec);
26001+
26002+void *module_alloc_exec(unsigned long size)
26003+{
26004+ return __module_alloc(size, PAGE_KERNEL_RX);
26005+}
26006+EXPORT_SYMBOL(module_alloc_exec);
26007+#endif
26008+#endif
26009+
26010 #ifdef CONFIG_X86_32
26011 int apply_relocate(Elf32_Shdr *sechdrs,
26012 const char *strtab,
26013@@ -112,14 +157,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26014 unsigned int i;
26015 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26016 Elf32_Sym *sym;
26017- uint32_t *location;
26018+ uint32_t *plocation, location;
26019
26020 DEBUGP("Applying relocate section %u to %u\n",
26021 relsec, sechdrs[relsec].sh_info);
26022 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26023 /* This is where to make the change */
26024- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26025- + rel[i].r_offset;
26026+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26027+ location = (uint32_t)plocation;
26028+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26029+ plocation = ktla_ktva((void *)plocation);
26030 /* This is the symbol it is referring to. Note that all
26031 undefined symbols have been resolved. */
26032 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26033@@ -128,11 +175,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26034 switch (ELF32_R_TYPE(rel[i].r_info)) {
26035 case R_386_32:
26036 /* We add the value into the location given */
26037- *location += sym->st_value;
26038+ pax_open_kernel();
26039+ *plocation += sym->st_value;
26040+ pax_close_kernel();
26041 break;
26042 case R_386_PC32:
26043 /* Add the value, subtract its position */
26044- *location += sym->st_value - (uint32_t)location;
26045+ pax_open_kernel();
26046+ *plocation += sym->st_value - location;
26047+ pax_close_kernel();
26048 break;
26049 default:
26050 pr_err("%s: Unknown relocation: %u\n",
26051@@ -177,21 +228,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26052 case R_X86_64_NONE:
26053 break;
26054 case R_X86_64_64:
26055+ pax_open_kernel();
26056 *(u64 *)loc = val;
26057+ pax_close_kernel();
26058 break;
26059 case R_X86_64_32:
26060+ pax_open_kernel();
26061 *(u32 *)loc = val;
26062+ pax_close_kernel();
26063 if (val != *(u32 *)loc)
26064 goto overflow;
26065 break;
26066 case R_X86_64_32S:
26067+ pax_open_kernel();
26068 *(s32 *)loc = val;
26069+ pax_close_kernel();
26070 if ((s64)val != *(s32 *)loc)
26071 goto overflow;
26072 break;
26073 case R_X86_64_PC32:
26074 val -= (u64)loc;
26075+ pax_open_kernel();
26076 *(u32 *)loc = val;
26077+ pax_close_kernel();
26078+
26079 #if 0
26080 if ((s64)val != *(s32 *)loc)
26081 goto overflow;
26082diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26083index 113e707..0a690e1 100644
26084--- a/arch/x86/kernel/msr.c
26085+++ b/arch/x86/kernel/msr.c
26086@@ -39,6 +39,7 @@
26087 #include <linux/notifier.h>
26088 #include <linux/uaccess.h>
26089 #include <linux/gfp.h>
26090+#include <linux/grsecurity.h>
26091
26092 #include <asm/processor.h>
26093 #include <asm/msr.h>
26094@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26095 int err = 0;
26096 ssize_t bytes = 0;
26097
26098+#ifdef CONFIG_GRKERNSEC_KMEM
26099+ gr_handle_msr_write();
26100+ return -EPERM;
26101+#endif
26102+
26103 if (count % 8)
26104 return -EINVAL; /* Invalid chunk size */
26105
26106@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26107 err = -EBADF;
26108 break;
26109 }
26110+#ifdef CONFIG_GRKERNSEC_KMEM
26111+ gr_handle_msr_write();
26112+ return -EPERM;
26113+#endif
26114 if (copy_from_user(&regs, uregs, sizeof regs)) {
26115 err = -EFAULT;
26116 break;
26117@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26118 return notifier_from_errno(err);
26119 }
26120
26121-static struct notifier_block __refdata msr_class_cpu_notifier = {
26122+static struct notifier_block msr_class_cpu_notifier = {
26123 .notifier_call = msr_class_cpu_callback,
26124 };
26125
26126diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26127index c3e985d..110a36a 100644
26128--- a/arch/x86/kernel/nmi.c
26129+++ b/arch/x86/kernel/nmi.c
26130@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26131
26132 static void nmi_max_handler(struct irq_work *w)
26133 {
26134- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26135+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26136 int remainder_ns, decimal_msecs;
26137- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26138+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26139
26140 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26141 decimal_msecs = remainder_ns / 1000;
26142
26143 printk_ratelimited(KERN_INFO
26144 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26145- a->handler, whole_msecs, decimal_msecs);
26146+ n->action->handler, whole_msecs, decimal_msecs);
26147 }
26148
26149 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26150@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26151 delta = sched_clock() - delta;
26152 trace_nmi_handler(a->handler, (int)delta, thishandled);
26153
26154- if (delta < nmi_longest_ns || delta < a->max_duration)
26155+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26156 continue;
26157
26158- a->max_duration = delta;
26159- irq_work_queue(&a->irq_work);
26160+ a->work->max_duration = delta;
26161+ irq_work_queue(&a->work->irq_work);
26162 }
26163
26164 rcu_read_unlock();
26165@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26166 }
26167 NOKPROBE_SYMBOL(nmi_handle);
26168
26169-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26170+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26171 {
26172 struct nmi_desc *desc = nmi_to_desc(type);
26173 unsigned long flags;
26174@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26175 if (!action->handler)
26176 return -EINVAL;
26177
26178- init_irq_work(&action->irq_work, nmi_max_handler);
26179+ action->work->action = action;
26180+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26181
26182 spin_lock_irqsave(&desc->lock, flags);
26183
26184@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26185 * event confuses some handlers (kdump uses this flag)
26186 */
26187 if (action->flags & NMI_FLAG_FIRST)
26188- list_add_rcu(&action->list, &desc->head);
26189+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26190 else
26191- list_add_tail_rcu(&action->list, &desc->head);
26192+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26193
26194 spin_unlock_irqrestore(&desc->lock, flags);
26195 return 0;
26196@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26197 if (!strcmp(n->name, name)) {
26198 WARN(in_nmi(),
26199 "Trying to free NMI (%s) from NMI context!\n", n->name);
26200- list_del_rcu(&n->list);
26201+ pax_list_del_rcu((struct list_head *)&n->list);
26202 break;
26203 }
26204 }
26205@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26206 dotraplinkage notrace void
26207 do_nmi(struct pt_regs *regs, long error_code)
26208 {
26209+
26210+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26211+ if (!user_mode(regs)) {
26212+ unsigned long cs = regs->cs & 0xFFFF;
26213+ unsigned long ip = ktva_ktla(regs->ip);
26214+
26215+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26216+ regs->ip = ip;
26217+ }
26218+#endif
26219+
26220 nmi_nesting_preprocess(regs);
26221
26222 nmi_enter();
26223diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26224index 6d9582e..f746287 100644
26225--- a/arch/x86/kernel/nmi_selftest.c
26226+++ b/arch/x86/kernel/nmi_selftest.c
26227@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26228 {
26229 /* trap all the unknown NMIs we may generate */
26230 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26231- __initdata);
26232+ __initconst);
26233 }
26234
26235 static void __init cleanup_nmi_testsuite(void)
26236@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26237 unsigned long timeout;
26238
26239 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26240- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26241+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26242 nmi_fail = FAILURE;
26243 return;
26244 }
26245diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26246index bbb6c73..24a58ef 100644
26247--- a/arch/x86/kernel/paravirt-spinlocks.c
26248+++ b/arch/x86/kernel/paravirt-spinlocks.c
26249@@ -8,7 +8,7 @@
26250
26251 #include <asm/paravirt.h>
26252
26253-struct pv_lock_ops pv_lock_ops = {
26254+struct pv_lock_ops pv_lock_ops __read_only = {
26255 #ifdef CONFIG_SMP
26256 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26257 .unlock_kick = paravirt_nop,
26258diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26259index 548d25f..f8fb99c 100644
26260--- a/arch/x86/kernel/paravirt.c
26261+++ b/arch/x86/kernel/paravirt.c
26262@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26263 {
26264 return x;
26265 }
26266+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26267+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26268+#endif
26269
26270 void __init default_banner(void)
26271 {
26272@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26273
26274 if (opfunc == NULL)
26275 /* If there's no function, patch it with a ud2a (BUG) */
26276- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26277- else if (opfunc == _paravirt_nop)
26278+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26279+ else if (opfunc == (void *)_paravirt_nop)
26280 /* If the operation is a nop, then nop the callsite */
26281 ret = paravirt_patch_nop();
26282
26283 /* identity functions just return their single argument */
26284- else if (opfunc == _paravirt_ident_32)
26285+ else if (opfunc == (void *)_paravirt_ident_32)
26286 ret = paravirt_patch_ident_32(insnbuf, len);
26287- else if (opfunc == _paravirt_ident_64)
26288+ else if (opfunc == (void *)_paravirt_ident_64)
26289 ret = paravirt_patch_ident_64(insnbuf, len);
26290+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26291+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26292+ ret = paravirt_patch_ident_64(insnbuf, len);
26293+#endif
26294
26295 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26296 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26297@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26298 if (insn_len > len || start == NULL)
26299 insn_len = len;
26300 else
26301- memcpy(insnbuf, start, insn_len);
26302+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26303
26304 return insn_len;
26305 }
26306@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26307 return this_cpu_read(paravirt_lazy_mode);
26308 }
26309
26310-struct pv_info pv_info = {
26311+struct pv_info pv_info __read_only = {
26312 .name = "bare hardware",
26313 .paravirt_enabled = 0,
26314 .kernel_rpl = 0,
26315@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26316 #endif
26317 };
26318
26319-struct pv_init_ops pv_init_ops = {
26320+struct pv_init_ops pv_init_ops __read_only = {
26321 .patch = native_patch,
26322 };
26323
26324-struct pv_time_ops pv_time_ops = {
26325+struct pv_time_ops pv_time_ops __read_only = {
26326 .sched_clock = native_sched_clock,
26327 .steal_clock = native_steal_clock,
26328 };
26329
26330-__visible struct pv_irq_ops pv_irq_ops = {
26331+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26332 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26333 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26334 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26335@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26336 #endif
26337 };
26338
26339-__visible struct pv_cpu_ops pv_cpu_ops = {
26340+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26341 .cpuid = native_cpuid,
26342 .get_debugreg = native_get_debugreg,
26343 .set_debugreg = native_set_debugreg,
26344@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26345 NOKPROBE_SYMBOL(native_set_debugreg);
26346 NOKPROBE_SYMBOL(native_load_idt);
26347
26348-struct pv_apic_ops pv_apic_ops = {
26349+struct pv_apic_ops pv_apic_ops __read_only= {
26350 #ifdef CONFIG_X86_LOCAL_APIC
26351 .startup_ipi_hook = paravirt_nop,
26352 #endif
26353 };
26354
26355-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26356+#ifdef CONFIG_X86_32
26357+#ifdef CONFIG_X86_PAE
26358+/* 64-bit pagetable entries */
26359+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26360+#else
26361 /* 32-bit pagetable entries */
26362 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26363+#endif
26364 #else
26365 /* 64-bit pagetable entries */
26366 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26367 #endif
26368
26369-struct pv_mmu_ops pv_mmu_ops = {
26370+struct pv_mmu_ops pv_mmu_ops __read_only = {
26371
26372 .read_cr2 = native_read_cr2,
26373 .write_cr2 = native_write_cr2,
26374@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26375 .make_pud = PTE_IDENT,
26376
26377 .set_pgd = native_set_pgd,
26378+ .set_pgd_batched = native_set_pgd_batched,
26379 #endif
26380 #endif /* PAGETABLE_LEVELS >= 3 */
26381
26382@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26383 },
26384
26385 .set_fixmap = native_set_fixmap,
26386+
26387+#ifdef CONFIG_PAX_KERNEXEC
26388+ .pax_open_kernel = native_pax_open_kernel,
26389+ .pax_close_kernel = native_pax_close_kernel,
26390+#endif
26391+
26392 };
26393
26394 EXPORT_SYMBOL_GPL(pv_time_ops);
26395diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26396index a1da673..b6f5831 100644
26397--- a/arch/x86/kernel/paravirt_patch_64.c
26398+++ b/arch/x86/kernel/paravirt_patch_64.c
26399@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26400 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26401 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26402 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26403+
26404+#ifndef CONFIG_PAX_MEMORY_UDEREF
26405 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26406+#endif
26407+
26408 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26409 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26410
26411@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26412 PATCH_SITE(pv_mmu_ops, read_cr3);
26413 PATCH_SITE(pv_mmu_ops, write_cr3);
26414 PATCH_SITE(pv_cpu_ops, clts);
26415+
26416+#ifndef CONFIG_PAX_MEMORY_UDEREF
26417 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26418+#endif
26419+
26420 PATCH_SITE(pv_cpu_ops, wbinvd);
26421
26422 patch_site:
26423diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26424index 0497f71..7186c0d 100644
26425--- a/arch/x86/kernel/pci-calgary_64.c
26426+++ b/arch/x86/kernel/pci-calgary_64.c
26427@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26428 tce_space = be64_to_cpu(readq(target));
26429 tce_space = tce_space & TAR_SW_BITS;
26430
26431- tce_space = tce_space & (~specified_table_size);
26432+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26433 info->tce_space = (u64 *)__va(tce_space);
26434 }
26435 }
26436diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26437index 35ccf75..7a15747 100644
26438--- a/arch/x86/kernel/pci-iommu_table.c
26439+++ b/arch/x86/kernel/pci-iommu_table.c
26440@@ -2,7 +2,7 @@
26441 #include <asm/iommu_table.h>
26442 #include <linux/string.h>
26443 #include <linux/kallsyms.h>
26444-
26445+#include <linux/sched.h>
26446
26447 #define DEBUG 1
26448
26449diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26450index 77dd0ad..9ec4723 100644
26451--- a/arch/x86/kernel/pci-swiotlb.c
26452+++ b/arch/x86/kernel/pci-swiotlb.c
26453@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26454 struct dma_attrs *attrs)
26455 {
26456 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26457- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26458+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26459 else
26460 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26461 }
26462diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26463index a388bb8..97064ad 100644
26464--- a/arch/x86/kernel/process.c
26465+++ b/arch/x86/kernel/process.c
26466@@ -38,7 +38,8 @@
26467 * section. Since TSS's are completely CPU-local, we want them
26468 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26469 */
26470-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26471+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26472+EXPORT_SYMBOL(init_tss);
26473
26474 #ifdef CONFIG_X86_64
26475 static DEFINE_PER_CPU(unsigned char, is_idle);
26476@@ -96,7 +97,7 @@ void arch_task_cache_init(void)
26477 task_xstate_cachep =
26478 kmem_cache_create("task_xstate", xstate_size,
26479 __alignof__(union thread_xstate),
26480- SLAB_PANIC | SLAB_NOTRACK, NULL);
26481+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26482 setup_xstate_comp();
26483 }
26484
26485@@ -110,7 +111,7 @@ void exit_thread(void)
26486 unsigned long *bp = t->io_bitmap_ptr;
26487
26488 if (bp) {
26489- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26490+ struct tss_struct *tss = init_tss + get_cpu();
26491
26492 t->io_bitmap_ptr = NULL;
26493 clear_thread_flag(TIF_IO_BITMAP);
26494@@ -130,6 +131,9 @@ void flush_thread(void)
26495 {
26496 struct task_struct *tsk = current;
26497
26498+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26499+ loadsegment(gs, 0);
26500+#endif
26501 flush_ptrace_hw_breakpoint(tsk);
26502 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26503 drop_init_fpu(tsk);
26504@@ -276,7 +280,7 @@ static void __exit_idle(void)
26505 void exit_idle(void)
26506 {
26507 /* idle loop has pid 0 */
26508- if (current->pid)
26509+ if (task_pid_nr(current))
26510 return;
26511 __exit_idle();
26512 }
26513@@ -329,7 +333,7 @@ bool xen_set_default_idle(void)
26514 return ret;
26515 }
26516 #endif
26517-void stop_this_cpu(void *dummy)
26518+__noreturn void stop_this_cpu(void *dummy)
26519 {
26520 local_irq_disable();
26521 /*
26522@@ -508,16 +512,37 @@ static int __init idle_setup(char *str)
26523 }
26524 early_param("idle", idle_setup);
26525
26526-unsigned long arch_align_stack(unsigned long sp)
26527+#ifdef CONFIG_PAX_RANDKSTACK
26528+void pax_randomize_kstack(struct pt_regs *regs)
26529 {
26530- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26531- sp -= get_random_int() % 8192;
26532- return sp & ~0xf;
26533-}
26534+ struct thread_struct *thread = &current->thread;
26535+ unsigned long time;
26536
26537-unsigned long arch_randomize_brk(struct mm_struct *mm)
26538-{
26539- unsigned long range_end = mm->brk + 0x02000000;
26540- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26541-}
26542+ if (!randomize_va_space)
26543+ return;
26544+
26545+ if (v8086_mode(regs))
26546+ return;
26547
26548+ rdtscl(time);
26549+
26550+ /* P4 seems to return a 0 LSB, ignore it */
26551+#ifdef CONFIG_MPENTIUM4
26552+ time &= 0x3EUL;
26553+ time <<= 2;
26554+#elif defined(CONFIG_X86_64)
26555+ time &= 0xFUL;
26556+ time <<= 4;
26557+#else
26558+ time &= 0x1FUL;
26559+ time <<= 3;
26560+#endif
26561+
26562+ thread->sp0 ^= time;
26563+ load_sp0(init_tss + smp_processor_id(), thread);
26564+
26565+#ifdef CONFIG_X86_64
26566+ this_cpu_write(kernel_stack, thread->sp0);
26567+#endif
26568+}
26569+#endif
26570diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26571index 603c4f9..3a105d7 100644
26572--- a/arch/x86/kernel/process_32.c
26573+++ b/arch/x86/kernel/process_32.c
26574@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26575 unsigned long thread_saved_pc(struct task_struct *tsk)
26576 {
26577 return ((unsigned long *)tsk->thread.sp)[3];
26578+//XXX return tsk->thread.eip;
26579 }
26580
26581 void __show_regs(struct pt_regs *regs, int all)
26582@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26583 unsigned long sp;
26584 unsigned short ss, gs;
26585
26586- if (user_mode_vm(regs)) {
26587+ if (user_mode(regs)) {
26588 sp = regs->sp;
26589 ss = regs->ss & 0xffff;
26590- gs = get_user_gs(regs);
26591 } else {
26592 sp = kernel_stack_pointer(regs);
26593 savesegment(ss, ss);
26594- savesegment(gs, gs);
26595 }
26596+ gs = get_user_gs(regs);
26597
26598 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26599 (u16)regs->cs, regs->ip, regs->flags,
26600- smp_processor_id());
26601+ raw_smp_processor_id());
26602 print_symbol("EIP is at %s\n", regs->ip);
26603
26604 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26605@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26606 int copy_thread(unsigned long clone_flags, unsigned long sp,
26607 unsigned long arg, struct task_struct *p)
26608 {
26609- struct pt_regs *childregs = task_pt_regs(p);
26610+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26611 struct task_struct *tsk;
26612 int err;
26613
26614 p->thread.sp = (unsigned long) childregs;
26615 p->thread.sp0 = (unsigned long) (childregs+1);
26616+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26617 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26618
26619 if (unlikely(p->flags & PF_KTHREAD)) {
26620 /* kernel thread */
26621 memset(childregs, 0, sizeof(struct pt_regs));
26622 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26623- task_user_gs(p) = __KERNEL_STACK_CANARY;
26624- childregs->ds = __USER_DS;
26625- childregs->es = __USER_DS;
26626+ savesegment(gs, childregs->gs);
26627+ childregs->ds = __KERNEL_DS;
26628+ childregs->es = __KERNEL_DS;
26629 childregs->fs = __KERNEL_PERCPU;
26630 childregs->bx = sp; /* function */
26631 childregs->bp = arg;
26632@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26633 struct thread_struct *prev = &prev_p->thread,
26634 *next = &next_p->thread;
26635 int cpu = smp_processor_id();
26636- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26637+ struct tss_struct *tss = init_tss + cpu;
26638 fpu_switch_t fpu;
26639
26640 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26641@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26642 */
26643 lazy_save_gs(prev->gs);
26644
26645+#ifdef CONFIG_PAX_MEMORY_UDEREF
26646+ __set_fs(task_thread_info(next_p)->addr_limit);
26647+#endif
26648+
26649 /*
26650 * Load the per-thread Thread-Local Storage descriptor.
26651 */
26652@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26653 */
26654 arch_end_context_switch(next_p);
26655
26656- this_cpu_write(kernel_stack,
26657- (unsigned long)task_stack_page(next_p) +
26658- THREAD_SIZE - KERNEL_STACK_OFFSET);
26659+ this_cpu_write(current_task, next_p);
26660+ this_cpu_write(current_tinfo, &next_p->tinfo);
26661+ this_cpu_write(kernel_stack, next->sp0);
26662
26663 /*
26664 * Restore %gs if needed (which is common)
26665@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26666
26667 switch_fpu_finish(next_p, fpu);
26668
26669- this_cpu_write(current_task, next_p);
26670-
26671 return prev_p;
26672 }
26673
26674@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26675 } while (count++ < 16);
26676 return 0;
26677 }
26678-
26679diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26680index 67fcc43..0d2c630 100644
26681--- a/arch/x86/kernel/process_64.c
26682+++ b/arch/x86/kernel/process_64.c
26683@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26684 struct pt_regs *childregs;
26685 struct task_struct *me = current;
26686
26687- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26688+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26689 childregs = task_pt_regs(p);
26690 p->thread.sp = (unsigned long) childregs;
26691 p->thread.usersp = me->thread.usersp;
26692+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26693 set_tsk_thread_flag(p, TIF_FORK);
26694 p->thread.io_bitmap_ptr = NULL;
26695
26696@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26697 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26698 savesegment(es, p->thread.es);
26699 savesegment(ds, p->thread.ds);
26700+ savesegment(ss, p->thread.ss);
26701+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26702 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26703
26704 if (unlikely(p->flags & PF_KTHREAD)) {
26705@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26706 struct thread_struct *prev = &prev_p->thread;
26707 struct thread_struct *next = &next_p->thread;
26708 int cpu = smp_processor_id();
26709- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26710+ struct tss_struct *tss = init_tss + cpu;
26711 unsigned fsindex, gsindex;
26712 fpu_switch_t fpu;
26713
26714@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26715 if (unlikely(next->ds | prev->ds))
26716 loadsegment(ds, next->ds);
26717
26718+ savesegment(ss, prev->ss);
26719+ if (unlikely(next->ss != prev->ss))
26720+ loadsegment(ss, next->ss);
26721+
26722 /*
26723 * Switch FS and GS.
26724 *
26725@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26726 prev->usersp = this_cpu_read(old_rsp);
26727 this_cpu_write(old_rsp, next->usersp);
26728 this_cpu_write(current_task, next_p);
26729+ this_cpu_write(current_tinfo, &next_p->tinfo);
26730
26731 /*
26732 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26733@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26734 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26735 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26736
26737- this_cpu_write(kernel_stack,
26738- (unsigned long)task_stack_page(next_p) +
26739- THREAD_SIZE - KERNEL_STACK_OFFSET);
26740+ this_cpu_write(kernel_stack, next->sp0);
26741
26742 /*
26743 * Now maybe reload the debug registers and handle I/O bitmaps
26744@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26745 if (!p || p == current || p->state == TASK_RUNNING)
26746 return 0;
26747 stack = (unsigned long)task_stack_page(p);
26748- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26749+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26750 return 0;
26751 fp = *(u64 *)(p->thread.sp);
26752 do {
26753- if (fp < (unsigned long)stack ||
26754- fp >= (unsigned long)stack+THREAD_SIZE)
26755+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26756 return 0;
26757 ip = *(u64 *)(fp+8);
26758 if (!in_sched_functions(ip))
26759diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26760index e510618..5165ac0 100644
26761--- a/arch/x86/kernel/ptrace.c
26762+++ b/arch/x86/kernel/ptrace.c
26763@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26764 unsigned long sp = (unsigned long)&regs->sp;
26765 u32 *prev_esp;
26766
26767- if (context == (sp & ~(THREAD_SIZE - 1)))
26768+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26769 return sp;
26770
26771- prev_esp = (u32 *)(context);
26772+ prev_esp = *(u32 **)(context);
26773 if (prev_esp)
26774 return (unsigned long)prev_esp;
26775
26776@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26777 if (child->thread.gs != value)
26778 return do_arch_prctl(child, ARCH_SET_GS, value);
26779 return 0;
26780+
26781+ case offsetof(struct user_regs_struct,ip):
26782+ /*
26783+ * Protect against any attempt to set ip to an
26784+ * impossible address. There are dragons lurking if the
26785+ * address is noncanonical. (This explicitly allows
26786+ * setting ip to TASK_SIZE_MAX, because user code can do
26787+ * that all by itself by running off the end of its
26788+ * address space.
26789+ */
26790+ if (value > TASK_SIZE_MAX)
26791+ return -EIO;
26792+ break;
26793+
26794 #endif
26795 }
26796
26797@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26798 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26799 {
26800 int i;
26801- int dr7 = 0;
26802+ unsigned long dr7 = 0;
26803 struct arch_hw_breakpoint *info;
26804
26805 for (i = 0; i < HBP_NUM; i++) {
26806@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26807 unsigned long addr, unsigned long data)
26808 {
26809 int ret;
26810- unsigned long __user *datap = (unsigned long __user *)data;
26811+ unsigned long __user *datap = (__force unsigned long __user *)data;
26812
26813 switch (request) {
26814 /* read the word at location addr in the USER area. */
26815@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26816 if ((int) addr < 0)
26817 return -EIO;
26818 ret = do_get_thread_area(child, addr,
26819- (struct user_desc __user *)data);
26820+ (__force struct user_desc __user *) data);
26821 break;
26822
26823 case PTRACE_SET_THREAD_AREA:
26824 if ((int) addr < 0)
26825 return -EIO;
26826 ret = do_set_thread_area(child, addr,
26827- (struct user_desc __user *)data, 0);
26828+ (__force struct user_desc __user *) data, 0);
26829 break;
26830 #endif
26831
26832@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26833
26834 #ifdef CONFIG_X86_64
26835
26836-static struct user_regset x86_64_regsets[] __read_mostly = {
26837+static user_regset_no_const x86_64_regsets[] __read_only = {
26838 [REGSET_GENERAL] = {
26839 .core_note_type = NT_PRSTATUS,
26840 .n = sizeof(struct user_regs_struct) / sizeof(long),
26841@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26842 #endif /* CONFIG_X86_64 */
26843
26844 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26845-static struct user_regset x86_32_regsets[] __read_mostly = {
26846+static user_regset_no_const x86_32_regsets[] __read_only = {
26847 [REGSET_GENERAL] = {
26848 .core_note_type = NT_PRSTATUS,
26849 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26850@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26851 */
26852 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26853
26854-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26855+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26856 {
26857 #ifdef CONFIG_X86_64
26858 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26859@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26860 memset(info, 0, sizeof(*info));
26861 info->si_signo = SIGTRAP;
26862 info->si_code = si_code;
26863- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26864+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26865 }
26866
26867 void user_single_step_siginfo(struct task_struct *tsk,
26868@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26869 }
26870 }
26871
26872+#ifdef CONFIG_GRKERNSEC_SETXID
26873+extern void gr_delayed_cred_worker(void);
26874+#endif
26875+
26876 /*
26877 * We can return 0 to resume the syscall or anything else to go to phase
26878 * 2. If we resume the syscall, we need to put something appropriate in
26879@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26880
26881 BUG_ON(regs != task_pt_regs(current));
26882
26883+#ifdef CONFIG_GRKERNSEC_SETXID
26884+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26885+ gr_delayed_cred_worker();
26886+#endif
26887+
26888 /*
26889 * If we stepped into a sysenter/syscall insn, it trapped in
26890 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26891@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26892 */
26893 user_exit();
26894
26895+#ifdef CONFIG_GRKERNSEC_SETXID
26896+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26897+ gr_delayed_cred_worker();
26898+#endif
26899+
26900 audit_syscall_exit(regs);
26901
26902 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26903diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26904index e5ecd20..60f7eef 100644
26905--- a/arch/x86/kernel/pvclock.c
26906+++ b/arch/x86/kernel/pvclock.c
26907@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26908 reset_hung_task_detector();
26909 }
26910
26911-static atomic64_t last_value = ATOMIC64_INIT(0);
26912+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26913
26914 void pvclock_resume(void)
26915 {
26916- atomic64_set(&last_value, 0);
26917+ atomic64_set_unchecked(&last_value, 0);
26918 }
26919
26920 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26921@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26922 * updating at the same time, and one of them could be slightly behind,
26923 * making the assumption that last_value always go forward fail to hold.
26924 */
26925- last = atomic64_read(&last_value);
26926+ last = atomic64_read_unchecked(&last_value);
26927 do {
26928 if (ret < last)
26929 return last;
26930- last = atomic64_cmpxchg(&last_value, last, ret);
26931+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
26932 } while (unlikely(last != ret));
26933
26934 return ret;
26935diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
26936index 86db4bc..a50a54a 100644
26937--- a/arch/x86/kernel/reboot.c
26938+++ b/arch/x86/kernel/reboot.c
26939@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
26940
26941 void __noreturn machine_real_restart(unsigned int type)
26942 {
26943+
26944+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
26945+ struct desc_struct *gdt;
26946+#endif
26947+
26948 local_irq_disable();
26949
26950 /*
26951@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
26952
26953 /* Jump to the identity-mapped low memory code */
26954 #ifdef CONFIG_X86_32
26955- asm volatile("jmpl *%0" : :
26956+
26957+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
26958+ gdt = get_cpu_gdt_table(smp_processor_id());
26959+ pax_open_kernel();
26960+#ifdef CONFIG_PAX_MEMORY_UDEREF
26961+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
26962+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
26963+ loadsegment(ds, __KERNEL_DS);
26964+ loadsegment(es, __KERNEL_DS);
26965+ loadsegment(ss, __KERNEL_DS);
26966+#endif
26967+#ifdef CONFIG_PAX_KERNEXEC
26968+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
26969+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
26970+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
26971+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
26972+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
26973+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
26974+#endif
26975+ pax_close_kernel();
26976+#endif
26977+
26978+ asm volatile("ljmpl *%0" : :
26979 "rm" (real_mode_header->machine_real_restart_asm),
26980 "a" (type));
26981 #else
26982@@ -137,7 +164,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
26983 /*
26984 * This is a single dmi_table handling all reboot quirks.
26985 */
26986-static struct dmi_system_id __initdata reboot_dmi_table[] = {
26987+static const struct dmi_system_id __initconst reboot_dmi_table[] = {
26988
26989 /* Acer */
26990 { /* Handle reboot issue on Acer Aspire one */
26991@@ -511,7 +538,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
26992 * This means that this function can never return, it can misbehave
26993 * by not rebooting properly and hanging.
26994 */
26995-static void native_machine_emergency_restart(void)
26996+static void __noreturn native_machine_emergency_restart(void)
26997 {
26998 int i;
26999 int attempt = 0;
27000@@ -631,13 +658,13 @@ void native_machine_shutdown(void)
27001 #endif
27002 }
27003
27004-static void __machine_emergency_restart(int emergency)
27005+static void __noreturn __machine_emergency_restart(int emergency)
27006 {
27007 reboot_emergency = emergency;
27008 machine_ops.emergency_restart();
27009 }
27010
27011-static void native_machine_restart(char *__unused)
27012+static void __noreturn native_machine_restart(char *__unused)
27013 {
27014 pr_notice("machine restart\n");
27015
27016@@ -646,7 +673,7 @@ static void native_machine_restart(char *__unused)
27017 __machine_emergency_restart(0);
27018 }
27019
27020-static void native_machine_halt(void)
27021+static void __noreturn native_machine_halt(void)
27022 {
27023 /* Stop other cpus and apics */
27024 machine_shutdown();
27025@@ -656,7 +683,7 @@ static void native_machine_halt(void)
27026 stop_this_cpu(NULL);
27027 }
27028
27029-static void native_machine_power_off(void)
27030+static void __noreturn native_machine_power_off(void)
27031 {
27032 if (pm_power_off) {
27033 if (!reboot_force)
27034@@ -665,9 +692,10 @@ static void native_machine_power_off(void)
27035 }
27036 /* A fallback in case there is no PM info available */
27037 tboot_shutdown(TB_SHUTDOWN_HALT);
27038+ unreachable();
27039 }
27040
27041-struct machine_ops machine_ops = {
27042+struct machine_ops machine_ops __read_only = {
27043 .power_off = native_machine_power_off,
27044 .shutdown = native_machine_shutdown,
27045 .emergency_restart = native_machine_emergency_restart,
27046diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27047index c8e41e9..64049ef 100644
27048--- a/arch/x86/kernel/reboot_fixups_32.c
27049+++ b/arch/x86/kernel/reboot_fixups_32.c
27050@@ -57,7 +57,7 @@ struct device_fixup {
27051 unsigned int vendor;
27052 unsigned int device;
27053 void (*reboot_fixup)(struct pci_dev *);
27054-};
27055+} __do_const;
27056
27057 /*
27058 * PCI ids solely used for fixups_table go here
27059diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27060index 3fd2c69..a444264 100644
27061--- a/arch/x86/kernel/relocate_kernel_64.S
27062+++ b/arch/x86/kernel/relocate_kernel_64.S
27063@@ -96,8 +96,7 @@ relocate_kernel:
27064
27065 /* jump to identity mapped page */
27066 addq $(identity_mapped - relocate_kernel), %r8
27067- pushq %r8
27068- ret
27069+ jmp *%r8
27070
27071 identity_mapped:
27072 /* set return address to 0 if not preserving context */
27073diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27074index 0a2421c..11f3f36 100644
27075--- a/arch/x86/kernel/setup.c
27076+++ b/arch/x86/kernel/setup.c
27077@@ -111,6 +111,7 @@
27078 #include <asm/mce.h>
27079 #include <asm/alternative.h>
27080 #include <asm/prom.h>
27081+#include <asm/boot.h>
27082
27083 /*
27084 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27085@@ -206,10 +207,12 @@ EXPORT_SYMBOL(boot_cpu_data);
27086 #endif
27087
27088
27089-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27090-__visible unsigned long mmu_cr4_features;
27091+#ifdef CONFIG_X86_64
27092+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27093+#elif defined(CONFIG_X86_PAE)
27094+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27095 #else
27096-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27097+__visible unsigned long mmu_cr4_features __read_only;
27098 #endif
27099
27100 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27101@@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
27102 * area (640->1Mb) as ram even though it is not.
27103 * take them out.
27104 */
27105- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27106+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27107
27108 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27109 }
27110@@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
27111 /* called before trim_bios_range() to spare extra sanitize */
27112 static void __init e820_add_kernel_range(void)
27113 {
27114- u64 start = __pa_symbol(_text);
27115+ u64 start = __pa_symbol(ktla_ktva(_text));
27116 u64 size = __pa_symbol(_end) - start;
27117
27118 /*
27119@@ -855,8 +858,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27120
27121 void __init setup_arch(char **cmdline_p)
27122 {
27123+#ifdef CONFIG_X86_32
27124+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27125+#else
27126 memblock_reserve(__pa_symbol(_text),
27127 (unsigned long)__bss_stop - (unsigned long)_text);
27128+#endif
27129
27130 early_reserve_initrd();
27131
27132@@ -954,16 +961,16 @@ void __init setup_arch(char **cmdline_p)
27133
27134 if (!boot_params.hdr.root_flags)
27135 root_mountflags &= ~MS_RDONLY;
27136- init_mm.start_code = (unsigned long) _text;
27137- init_mm.end_code = (unsigned long) _etext;
27138+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27139+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27140 init_mm.end_data = (unsigned long) _edata;
27141 init_mm.brk = _brk_end;
27142
27143 mpx_mm_init(&init_mm);
27144
27145- code_resource.start = __pa_symbol(_text);
27146- code_resource.end = __pa_symbol(_etext)-1;
27147- data_resource.start = __pa_symbol(_etext);
27148+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27149+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27150+ data_resource.start = __pa_symbol(_sdata);
27151 data_resource.end = __pa_symbol(_edata)-1;
27152 bss_resource.start = __pa_symbol(__bss_start);
27153 bss_resource.end = __pa_symbol(__bss_stop)-1;
27154diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27155index e4fcb87..9c06c55 100644
27156--- a/arch/x86/kernel/setup_percpu.c
27157+++ b/arch/x86/kernel/setup_percpu.c
27158@@ -21,19 +21,17 @@
27159 #include <asm/cpu.h>
27160 #include <asm/stackprotector.h>
27161
27162-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27163+#ifdef CONFIG_SMP
27164+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27165 EXPORT_PER_CPU_SYMBOL(cpu_number);
27166+#endif
27167
27168-#ifdef CONFIG_X86_64
27169 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27170-#else
27171-#define BOOT_PERCPU_OFFSET 0
27172-#endif
27173
27174 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27175 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27176
27177-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27178+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27179 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27180 };
27181 EXPORT_SYMBOL(__per_cpu_offset);
27182@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27183 {
27184 #ifdef CONFIG_NEED_MULTIPLE_NODES
27185 pg_data_t *last = NULL;
27186- unsigned int cpu;
27187+ int cpu;
27188
27189 for_each_possible_cpu(cpu) {
27190 int node = early_cpu_to_node(cpu);
27191@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27192 {
27193 #ifdef CONFIG_X86_32
27194 struct desc_struct gdt;
27195+ unsigned long base = per_cpu_offset(cpu);
27196
27197- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27198- 0x2 | DESCTYPE_S, 0x8);
27199- gdt.s = 1;
27200+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27201+ 0x83 | DESCTYPE_S, 0xC);
27202 write_gdt_entry(get_cpu_gdt_table(cpu),
27203 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27204 #endif
27205@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27206 /* alrighty, percpu areas up and running */
27207 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27208 for_each_possible_cpu(cpu) {
27209+#ifdef CONFIG_CC_STACKPROTECTOR
27210+#ifdef CONFIG_X86_32
27211+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27212+#endif
27213+#endif
27214 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27215 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27216 per_cpu(cpu_number, cpu) = cpu;
27217@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27218 */
27219 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27220 #endif
27221+#ifdef CONFIG_CC_STACKPROTECTOR
27222+#ifdef CONFIG_X86_32
27223+ if (!cpu)
27224+ per_cpu(stack_canary.canary, cpu) = canary;
27225+#endif
27226+#endif
27227 /*
27228 * Up to this point, the boot CPU has been using .init.data
27229 * area. Reload any changed state for the boot CPU.
27230diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27231index e504246..ba10432 100644
27232--- a/arch/x86/kernel/signal.c
27233+++ b/arch/x86/kernel/signal.c
27234@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27235 * Align the stack pointer according to the i386 ABI,
27236 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27237 */
27238- sp = ((sp + 4) & -16ul) - 4;
27239+ sp = ((sp - 12) & -16ul) - 4;
27240 #else /* !CONFIG_X86_32 */
27241 sp = round_down(sp, 16) - 8;
27242 #endif
27243@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27244 }
27245
27246 if (current->mm->context.vdso)
27247- restorer = current->mm->context.vdso +
27248- selected_vdso32->sym___kernel_sigreturn;
27249+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27250 else
27251- restorer = &frame->retcode;
27252+ restorer = (void __user *)&frame->retcode;
27253 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27254 restorer = ksig->ka.sa.sa_restorer;
27255
27256@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27257 * reasons and because gdb uses it as a signature to notice
27258 * signal handler stack frames.
27259 */
27260- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27261+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27262
27263 if (err)
27264 return -EFAULT;
27265@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27266 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27267
27268 /* Set up to return from userspace. */
27269- restorer = current->mm->context.vdso +
27270- selected_vdso32->sym___kernel_rt_sigreturn;
27271+ if (current->mm->context.vdso)
27272+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27273+ else
27274+ restorer = (void __user *)&frame->retcode;
27275 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27276 restorer = ksig->ka.sa.sa_restorer;
27277 put_user_ex(restorer, &frame->pretcode);
27278@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27279 * reasons and because gdb uses it as a signature to notice
27280 * signal handler stack frames.
27281 */
27282- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27283+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27284 } put_user_catch(err);
27285
27286 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27287@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27288 {
27289 int usig = signr_convert(ksig->sig);
27290 sigset_t *set = sigmask_to_save();
27291- compat_sigset_t *cset = (compat_sigset_t *) set;
27292+ sigset_t sigcopy;
27293+ compat_sigset_t *cset;
27294+
27295+ sigcopy = *set;
27296+
27297+ cset = (compat_sigset_t *) &sigcopy;
27298
27299 /* Set up the stack frame */
27300 if (is_ia32_frame()) {
27301@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27302 } else if (is_x32_frame()) {
27303 return x32_setup_rt_frame(ksig, cset, regs);
27304 } else {
27305- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27306+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27307 }
27308 }
27309
27310diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27311index be8e1bd..a3d93fa 100644
27312--- a/arch/x86/kernel/smp.c
27313+++ b/arch/x86/kernel/smp.c
27314@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27315
27316 __setup("nonmi_ipi", nonmi_ipi_setup);
27317
27318-struct smp_ops smp_ops = {
27319+struct smp_ops smp_ops __read_only = {
27320 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27321 .smp_prepare_cpus = native_smp_prepare_cpus,
27322 .smp_cpus_done = native_smp_cpus_done,
27323diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27324index febc6aa..37d8edf 100644
27325--- a/arch/x86/kernel/smpboot.c
27326+++ b/arch/x86/kernel/smpboot.c
27327@@ -229,14 +229,17 @@ static void notrace start_secondary(void *unused)
27328
27329 enable_start_cpu0 = 0;
27330
27331-#ifdef CONFIG_X86_32
27332+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27333+ barrier();
27334+
27335 /* switch away from the initial page table */
27336+#ifdef CONFIG_PAX_PER_CPU_PGD
27337+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27338+#else
27339 load_cr3(swapper_pg_dir);
27340+#endif
27341 __flush_tlb_all();
27342-#endif
27343
27344- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27345- barrier();
27346 /*
27347 * Check TSC synchronization with the BP:
27348 */
27349@@ -800,8 +803,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27350 alternatives_enable_smp();
27351
27352 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27353- (THREAD_SIZE + task_stack_page(idle))) - 1);
27354+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27355 per_cpu(current_task, cpu) = idle;
27356+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27357
27358 #ifdef CONFIG_X86_32
27359 /* Stack for startup_32 can be just as for start_secondary onwards */
27360@@ -810,10 +814,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27361 clear_tsk_thread_flag(idle, TIF_FORK);
27362 initial_gs = per_cpu_offset(cpu);
27363 #endif
27364- per_cpu(kernel_stack, cpu) =
27365- (unsigned long)task_stack_page(idle) -
27366- KERNEL_STACK_OFFSET + THREAD_SIZE;
27367+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27368+ pax_open_kernel();
27369 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27370+ pax_close_kernel();
27371 initial_code = (unsigned long)start_secondary;
27372 stack_start = idle->thread.sp;
27373
27374@@ -953,6 +957,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27375 /* the FPU context is blank, nobody can own it */
27376 __cpu_disable_lazy_restore(cpu);
27377
27378+#ifdef CONFIG_PAX_PER_CPU_PGD
27379+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27380+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27381+ KERNEL_PGD_PTRS);
27382+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27383+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27384+ KERNEL_PGD_PTRS);
27385+#endif
27386+
27387 err = do_boot_cpu(apicid, cpu, tidle);
27388 if (err) {
27389 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27390diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27391index 9b4d51d..5d28b58 100644
27392--- a/arch/x86/kernel/step.c
27393+++ b/arch/x86/kernel/step.c
27394@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27395 struct desc_struct *desc;
27396 unsigned long base;
27397
27398- seg &= ~7UL;
27399+ seg >>= 3;
27400
27401 mutex_lock(&child->mm->context.lock);
27402- if (unlikely((seg >> 3) >= child->mm->context.size))
27403+ if (unlikely(seg >= child->mm->context.size))
27404 addr = -1L; /* bogus selector, access would fault */
27405 else {
27406 desc = child->mm->context.ldt + seg;
27407@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27408 addr += base;
27409 }
27410 mutex_unlock(&child->mm->context.lock);
27411- }
27412+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27413+ addr = ktla_ktva(addr);
27414
27415 return addr;
27416 }
27417@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27418 unsigned char opcode[15];
27419 unsigned long addr = convert_ip_to_linear(child, regs);
27420
27421+ if (addr == -EINVAL)
27422+ return 0;
27423+
27424 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27425 for (i = 0; i < copied; i++) {
27426 switch (opcode[i]) {
27427diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27428new file mode 100644
27429index 0000000..5877189
27430--- /dev/null
27431+++ b/arch/x86/kernel/sys_i386_32.c
27432@@ -0,0 +1,189 @@
27433+/*
27434+ * This file contains various random system calls that
27435+ * have a non-standard calling sequence on the Linux/i386
27436+ * platform.
27437+ */
27438+
27439+#include <linux/errno.h>
27440+#include <linux/sched.h>
27441+#include <linux/mm.h>
27442+#include <linux/fs.h>
27443+#include <linux/smp.h>
27444+#include <linux/sem.h>
27445+#include <linux/msg.h>
27446+#include <linux/shm.h>
27447+#include <linux/stat.h>
27448+#include <linux/syscalls.h>
27449+#include <linux/mman.h>
27450+#include <linux/file.h>
27451+#include <linux/utsname.h>
27452+#include <linux/ipc.h>
27453+#include <linux/elf.h>
27454+
27455+#include <linux/uaccess.h>
27456+#include <linux/unistd.h>
27457+
27458+#include <asm/syscalls.h>
27459+
27460+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27461+{
27462+ unsigned long pax_task_size = TASK_SIZE;
27463+
27464+#ifdef CONFIG_PAX_SEGMEXEC
27465+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27466+ pax_task_size = SEGMEXEC_TASK_SIZE;
27467+#endif
27468+
27469+ if (flags & MAP_FIXED)
27470+ if (len > pax_task_size || addr > pax_task_size - len)
27471+ return -EINVAL;
27472+
27473+ return 0;
27474+}
27475+
27476+/*
27477+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27478+ */
27479+static unsigned long get_align_mask(void)
27480+{
27481+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27482+ return 0;
27483+
27484+ if (!(current->flags & PF_RANDOMIZE))
27485+ return 0;
27486+
27487+ return va_align.mask;
27488+}
27489+
27490+unsigned long
27491+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27492+ unsigned long len, unsigned long pgoff, unsigned long flags)
27493+{
27494+ struct mm_struct *mm = current->mm;
27495+ struct vm_area_struct *vma;
27496+ unsigned long pax_task_size = TASK_SIZE;
27497+ struct vm_unmapped_area_info info;
27498+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27499+
27500+#ifdef CONFIG_PAX_SEGMEXEC
27501+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27502+ pax_task_size = SEGMEXEC_TASK_SIZE;
27503+#endif
27504+
27505+ pax_task_size -= PAGE_SIZE;
27506+
27507+ if (len > pax_task_size)
27508+ return -ENOMEM;
27509+
27510+ if (flags & MAP_FIXED)
27511+ return addr;
27512+
27513+#ifdef CONFIG_PAX_RANDMMAP
27514+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27515+#endif
27516+
27517+ if (addr) {
27518+ addr = PAGE_ALIGN(addr);
27519+ if (pax_task_size - len >= addr) {
27520+ vma = find_vma(mm, addr);
27521+ if (check_heap_stack_gap(vma, addr, len, offset))
27522+ return addr;
27523+ }
27524+ }
27525+
27526+ info.flags = 0;
27527+ info.length = len;
27528+ info.align_mask = filp ? get_align_mask() : 0;
27529+ info.align_offset = pgoff << PAGE_SHIFT;
27530+ info.threadstack_offset = offset;
27531+
27532+#ifdef CONFIG_PAX_PAGEEXEC
27533+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27534+ info.low_limit = 0x00110000UL;
27535+ info.high_limit = mm->start_code;
27536+
27537+#ifdef CONFIG_PAX_RANDMMAP
27538+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27539+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27540+#endif
27541+
27542+ if (info.low_limit < info.high_limit) {
27543+ addr = vm_unmapped_area(&info);
27544+ if (!IS_ERR_VALUE(addr))
27545+ return addr;
27546+ }
27547+ } else
27548+#endif
27549+
27550+ info.low_limit = mm->mmap_base;
27551+ info.high_limit = pax_task_size;
27552+
27553+ return vm_unmapped_area(&info);
27554+}
27555+
27556+unsigned long
27557+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27558+ const unsigned long len, const unsigned long pgoff,
27559+ const unsigned long flags)
27560+{
27561+ struct vm_area_struct *vma;
27562+ struct mm_struct *mm = current->mm;
27563+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27564+ struct vm_unmapped_area_info info;
27565+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27566+
27567+#ifdef CONFIG_PAX_SEGMEXEC
27568+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27569+ pax_task_size = SEGMEXEC_TASK_SIZE;
27570+#endif
27571+
27572+ pax_task_size -= PAGE_SIZE;
27573+
27574+ /* requested length too big for entire address space */
27575+ if (len > pax_task_size)
27576+ return -ENOMEM;
27577+
27578+ if (flags & MAP_FIXED)
27579+ return addr;
27580+
27581+#ifdef CONFIG_PAX_PAGEEXEC
27582+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27583+ goto bottomup;
27584+#endif
27585+
27586+#ifdef CONFIG_PAX_RANDMMAP
27587+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27588+#endif
27589+
27590+ /* requesting a specific address */
27591+ if (addr) {
27592+ addr = PAGE_ALIGN(addr);
27593+ if (pax_task_size - len >= addr) {
27594+ vma = find_vma(mm, addr);
27595+ if (check_heap_stack_gap(vma, addr, len, offset))
27596+ return addr;
27597+ }
27598+ }
27599+
27600+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27601+ info.length = len;
27602+ info.low_limit = PAGE_SIZE;
27603+ info.high_limit = mm->mmap_base;
27604+ info.align_mask = filp ? get_align_mask() : 0;
27605+ info.align_offset = pgoff << PAGE_SHIFT;
27606+ info.threadstack_offset = offset;
27607+
27608+ addr = vm_unmapped_area(&info);
27609+ if (!(addr & ~PAGE_MASK))
27610+ return addr;
27611+ VM_BUG_ON(addr != -ENOMEM);
27612+
27613+bottomup:
27614+ /*
27615+ * A failed mmap() very likely causes application failure,
27616+ * so fall back to the bottom-up function here. This scenario
27617+ * can happen with large stack limits and large mmap()
27618+ * allocations.
27619+ */
27620+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27621+}
27622diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27623index 30277e2..5664a29 100644
27624--- a/arch/x86/kernel/sys_x86_64.c
27625+++ b/arch/x86/kernel/sys_x86_64.c
27626@@ -81,8 +81,8 @@ out:
27627 return error;
27628 }
27629
27630-static void find_start_end(unsigned long flags, unsigned long *begin,
27631- unsigned long *end)
27632+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27633+ unsigned long *begin, unsigned long *end)
27634 {
27635 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27636 unsigned long new_begin;
27637@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27638 *begin = new_begin;
27639 }
27640 } else {
27641- *begin = current->mm->mmap_legacy_base;
27642+ *begin = mm->mmap_legacy_base;
27643 *end = TASK_SIZE;
27644 }
27645 }
27646@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27647 struct vm_area_struct *vma;
27648 struct vm_unmapped_area_info info;
27649 unsigned long begin, end;
27650+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27651
27652 if (flags & MAP_FIXED)
27653 return addr;
27654
27655- find_start_end(flags, &begin, &end);
27656+ find_start_end(mm, flags, &begin, &end);
27657
27658 if (len > end)
27659 return -ENOMEM;
27660
27661+#ifdef CONFIG_PAX_RANDMMAP
27662+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27663+#endif
27664+
27665 if (addr) {
27666 addr = PAGE_ALIGN(addr);
27667 vma = find_vma(mm, addr);
27668- if (end - len >= addr &&
27669- (!vma || addr + len <= vma->vm_start))
27670+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27671 return addr;
27672 }
27673
27674@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27675 info.high_limit = end;
27676 info.align_mask = filp ? get_align_mask() : 0;
27677 info.align_offset = pgoff << PAGE_SHIFT;
27678+ info.threadstack_offset = offset;
27679 return vm_unmapped_area(&info);
27680 }
27681
27682@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27683 struct mm_struct *mm = current->mm;
27684 unsigned long addr = addr0;
27685 struct vm_unmapped_area_info info;
27686+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27687
27688 /* requested length too big for entire address space */
27689 if (len > TASK_SIZE)
27690@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27691 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27692 goto bottomup;
27693
27694+#ifdef CONFIG_PAX_RANDMMAP
27695+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27696+#endif
27697+
27698 /* requesting a specific address */
27699 if (addr) {
27700 addr = PAGE_ALIGN(addr);
27701 vma = find_vma(mm, addr);
27702- if (TASK_SIZE - len >= addr &&
27703- (!vma || addr + len <= vma->vm_start))
27704+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27705 return addr;
27706 }
27707
27708@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27709 info.high_limit = mm->mmap_base;
27710 info.align_mask = filp ? get_align_mask() : 0;
27711 info.align_offset = pgoff << PAGE_SHIFT;
27712+ info.threadstack_offset = offset;
27713 addr = vm_unmapped_area(&info);
27714 if (!(addr & ~PAGE_MASK))
27715 return addr;
27716diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27717index 91a4496..42fc304 100644
27718--- a/arch/x86/kernel/tboot.c
27719+++ b/arch/x86/kernel/tboot.c
27720@@ -44,6 +44,7 @@
27721 #include <asm/setup.h>
27722 #include <asm/e820.h>
27723 #include <asm/io.h>
27724+#include <asm/tlbflush.h>
27725
27726 #include "../realmode/rm/wakeup.h"
27727
27728@@ -221,7 +222,7 @@ static int tboot_setup_sleep(void)
27729
27730 void tboot_shutdown(u32 shutdown_type)
27731 {
27732- void (*shutdown)(void);
27733+ void (* __noreturn shutdown)(void);
27734
27735 if (!tboot_enabled())
27736 return;
27737@@ -242,8 +243,9 @@ void tboot_shutdown(u32 shutdown_type)
27738 tboot->shutdown_type = shutdown_type;
27739
27740 switch_to_tboot_pt();
27741+ cr4_clear_bits(X86_CR4_PCIDE);
27742
27743- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27744+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27745 shutdown();
27746
27747 /* should not reach here */
27748@@ -310,7 +312,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27749 return -ENODEV;
27750 }
27751
27752-static atomic_t ap_wfs_count;
27753+static atomic_unchecked_t ap_wfs_count;
27754
27755 static int tboot_wait_for_aps(int num_aps)
27756 {
27757@@ -334,9 +336,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27758 {
27759 switch (action) {
27760 case CPU_DYING:
27761- atomic_inc(&ap_wfs_count);
27762+ atomic_inc_unchecked(&ap_wfs_count);
27763 if (num_online_cpus() == 1)
27764- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27765+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27766 return NOTIFY_BAD;
27767 break;
27768 }
27769@@ -422,7 +424,7 @@ static __init int tboot_late_init(void)
27770
27771 tboot_create_trampoline();
27772
27773- atomic_set(&ap_wfs_count, 0);
27774+ atomic_set_unchecked(&ap_wfs_count, 0);
27775 register_hotcpu_notifier(&tboot_cpu_notifier);
27776
27777 #ifdef CONFIG_DEBUG_FS
27778diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27779index 25adc0e..1df4349 100644
27780--- a/arch/x86/kernel/time.c
27781+++ b/arch/x86/kernel/time.c
27782@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27783 {
27784 unsigned long pc = instruction_pointer(regs);
27785
27786- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27787+ if (!user_mode(regs) && in_lock_functions(pc)) {
27788 #ifdef CONFIG_FRAME_POINTER
27789- return *(unsigned long *)(regs->bp + sizeof(long));
27790+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27791 #else
27792 unsigned long *sp =
27793 (unsigned long *)kernel_stack_pointer(regs);
27794@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27795 * or above a saved flags. Eflags has bits 22-31 zero,
27796 * kernel addresses don't.
27797 */
27798+
27799+#ifdef CONFIG_PAX_KERNEXEC
27800+ return ktla_ktva(sp[0]);
27801+#else
27802 if (sp[0] >> 22)
27803 return sp[0];
27804 if (sp[1] >> 22)
27805 return sp[1];
27806 #endif
27807+
27808+#endif
27809 }
27810 return pc;
27811 }
27812diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27813index 7fc5e84..c6e445a 100644
27814--- a/arch/x86/kernel/tls.c
27815+++ b/arch/x86/kernel/tls.c
27816@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27817 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27818 return -EINVAL;
27819
27820+#ifdef CONFIG_PAX_SEGMEXEC
27821+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27822+ return -EINVAL;
27823+#endif
27824+
27825 set_tls_desc(p, idx, &info, 1);
27826
27827 return 0;
27828@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27829
27830 if (kbuf)
27831 info = kbuf;
27832- else if (__copy_from_user(infobuf, ubuf, count))
27833+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27834 return -EFAULT;
27835 else
27836 info = infobuf;
27837diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27838index 1c113db..287b42e 100644
27839--- a/arch/x86/kernel/tracepoint.c
27840+++ b/arch/x86/kernel/tracepoint.c
27841@@ -9,11 +9,11 @@
27842 #include <linux/atomic.h>
27843
27844 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27845-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27846+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27847 (unsigned long) trace_idt_table };
27848
27849 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27850-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27851+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27852
27853 static int trace_irq_vector_refcount;
27854 static DEFINE_MUTEX(irq_vector_mutex);
27855diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27856index 4ff5d16..736e3e1 100644
27857--- a/arch/x86/kernel/traps.c
27858+++ b/arch/x86/kernel/traps.c
27859@@ -68,7 +68,7 @@
27860 #include <asm/proto.h>
27861
27862 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27863-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27864+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27865 #else
27866 #include <asm/processor-flags.h>
27867 #include <asm/setup.h>
27868@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27869 #endif
27870
27871 /* Must be page-aligned because the real IDT is used in a fixmap. */
27872-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27873+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27874
27875 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27876 EXPORT_SYMBOL_GPL(used_vectors);
27877@@ -112,7 +112,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
27878 {
27879 enum ctx_state prev_state;
27880
27881- if (user_mode_vm(regs)) {
27882+ if (user_mode(regs)) {
27883 /* Other than that, we're just an exception. */
27884 prev_state = exception_enter();
27885 } else {
27886@@ -146,7 +146,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27887 /* Must be before exception_exit. */
27888 preempt_count_sub(HARDIRQ_OFFSET);
27889
27890- if (user_mode_vm(regs))
27891+ if (user_mode(regs))
27892 return exception_exit(prev_state);
27893 else
27894 rcu_nmi_exit();
27895@@ -158,7 +158,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27896 *
27897 * IST exception handlers normally cannot schedule. As a special
27898 * exception, if the exception interrupted userspace code (i.e.
27899- * user_mode_vm(regs) would return true) and the exception was not
27900+ * user_mode(regs) would return true) and the exception was not
27901 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
27902 * begins a non-atomic section within an ist_enter()/ist_exit() region.
27903 * Callers are responsible for enabling interrupts themselves inside
27904@@ -167,7 +167,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27905 */
27906 void ist_begin_non_atomic(struct pt_regs *regs)
27907 {
27908- BUG_ON(!user_mode_vm(regs));
27909+ BUG_ON(!user_mode(regs));
27910
27911 /*
27912 * Sanity check: we need to be on the normal thread stack. This
27913@@ -191,11 +191,11 @@ void ist_end_non_atomic(void)
27914 }
27915
27916 static nokprobe_inline int
27917-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27918+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27919 struct pt_regs *regs, long error_code)
27920 {
27921 #ifdef CONFIG_X86_32
27922- if (regs->flags & X86_VM_MASK) {
27923+ if (v8086_mode(regs)) {
27924 /*
27925 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27926 * On nmi (interrupt 2), do_trap should not be called.
27927@@ -208,12 +208,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27928 return -1;
27929 }
27930 #endif
27931- if (!user_mode(regs)) {
27932+ if (!user_mode_novm(regs)) {
27933 if (!fixup_exception(regs)) {
27934 tsk->thread.error_code = error_code;
27935 tsk->thread.trap_nr = trapnr;
27936+
27937+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27938+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
27939+ str = "PAX: suspicious stack segment fault";
27940+#endif
27941+
27942 die(str, regs, error_code);
27943 }
27944+
27945+#ifdef CONFIG_PAX_REFCOUNT
27946+ if (trapnr == X86_TRAP_OF)
27947+ pax_report_refcount_overflow(regs);
27948+#endif
27949+
27950 return 0;
27951 }
27952
27953@@ -252,7 +264,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
27954 }
27955
27956 static void
27957-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27958+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
27959 long error_code, siginfo_t *info)
27960 {
27961 struct task_struct *tsk = current;
27962@@ -276,7 +288,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27963 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
27964 printk_ratelimit()) {
27965 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
27966- tsk->comm, tsk->pid, str,
27967+ tsk->comm, task_pid_nr(tsk), str,
27968 regs->ip, regs->sp, error_code);
27969 print_vma_addr(" in ", regs->ip);
27970 pr_cont("\n");
27971@@ -358,6 +370,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
27972 tsk->thread.error_code = error_code;
27973 tsk->thread.trap_nr = X86_TRAP_DF;
27974
27975+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
27976+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
27977+ die("grsec: kernel stack overflow detected", regs, error_code);
27978+#endif
27979+
27980 #ifdef CONFIG_DOUBLEFAULT
27981 df_debug(regs, error_code);
27982 #endif
27983@@ -384,7 +401,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
27984 goto exit;
27985 conditional_sti(regs);
27986
27987- if (!user_mode_vm(regs))
27988+ if (!user_mode(regs))
27989 die("bounds", regs, error_code);
27990
27991 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
27992@@ -463,7 +480,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
27993 conditional_sti(regs);
27994
27995 #ifdef CONFIG_X86_32
27996- if (regs->flags & X86_VM_MASK) {
27997+ if (v8086_mode(regs)) {
27998 local_irq_enable();
27999 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28000 goto exit;
28001@@ -471,18 +488,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28002 #endif
28003
28004 tsk = current;
28005- if (!user_mode(regs)) {
28006+ if (!user_mode_novm(regs)) {
28007 if (fixup_exception(regs))
28008 goto exit;
28009
28010 tsk->thread.error_code = error_code;
28011 tsk->thread.trap_nr = X86_TRAP_GP;
28012 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28013- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28014+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28015+
28016+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28017+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28018+ die("PAX: suspicious general protection fault", regs, error_code);
28019+ else
28020+#endif
28021+
28022 die("general protection fault", regs, error_code);
28023+ }
28024 goto exit;
28025 }
28026
28027+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28028+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28029+ struct mm_struct *mm = tsk->mm;
28030+ unsigned long limit;
28031+
28032+ down_write(&mm->mmap_sem);
28033+ limit = mm->context.user_cs_limit;
28034+ if (limit < TASK_SIZE) {
28035+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28036+ up_write(&mm->mmap_sem);
28037+ return;
28038+ }
28039+ up_write(&mm->mmap_sem);
28040+ }
28041+#endif
28042+
28043 tsk->thread.error_code = error_code;
28044 tsk->thread.trap_nr = X86_TRAP_GP;
28045
28046@@ -581,13 +622,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28047 container_of(task_pt_regs(current),
28048 struct bad_iret_stack, regs);
28049
28050+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28051+ new_stack = s;
28052+
28053 /* Copy the IRET target to the new stack. */
28054 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28055
28056 /* Copy the remainder of the stack from the current stack. */
28057 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28058
28059- BUG_ON(!user_mode_vm(&new_stack->regs));
28060+ BUG_ON(!user_mode(&new_stack->regs));
28061 return new_stack;
28062 }
28063 NOKPROBE_SYMBOL(fixup_bad_iret);
28064@@ -637,7 +681,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28065 * then it's very likely the result of an icebp/int01 trap.
28066 * User wants a sigtrap for that.
28067 */
28068- if (!dr6 && user_mode_vm(regs))
28069+ if (!dr6 && user_mode(regs))
28070 user_icebp = 1;
28071
28072 /* Catch kmemcheck conditions first of all! */
28073@@ -673,7 +717,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28074 /* It's safe to allow irq's after DR6 has been saved */
28075 preempt_conditional_sti(regs);
28076
28077- if (regs->flags & X86_VM_MASK) {
28078+ if (v8086_mode(regs)) {
28079 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28080 X86_TRAP_DB);
28081 preempt_conditional_cli(regs);
28082@@ -688,7 +732,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28083 * We already checked v86 mode above, so we can check for kernel mode
28084 * by just checking the CPL of CS.
28085 */
28086- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28087+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28088 tsk->thread.debugreg6 &= ~DR_STEP;
28089 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28090 regs->flags &= ~X86_EFLAGS_TF;
28091@@ -721,7 +765,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28092 return;
28093 conditional_sti(regs);
28094
28095- if (!user_mode_vm(regs))
28096+ if (!user_mode(regs))
28097 {
28098 if (!fixup_exception(regs)) {
28099 task->thread.error_code = error_code;
28100diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28101index 5054497..139f8f8 100644
28102--- a/arch/x86/kernel/tsc.c
28103+++ b/arch/x86/kernel/tsc.c
28104@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28105 */
28106 smp_wmb();
28107
28108- ACCESS_ONCE(c2n->head) = data;
28109+ ACCESS_ONCE_RW(c2n->head) = data;
28110 }
28111
28112 /*
28113diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28114index 81f8adb0..fff670e 100644
28115--- a/arch/x86/kernel/uprobes.c
28116+++ b/arch/x86/kernel/uprobes.c
28117@@ -912,7 +912,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28118 int ret = NOTIFY_DONE;
28119
28120 /* We are only interested in userspace traps */
28121- if (regs && !user_mode_vm(regs))
28122+ if (regs && !user_mode(regs))
28123 return NOTIFY_DONE;
28124
28125 switch (val) {
28126@@ -986,7 +986,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28127
28128 if (nleft != rasize) {
28129 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28130- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28131+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28132
28133 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28134 }
28135diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28136index b9242ba..50c5edd 100644
28137--- a/arch/x86/kernel/verify_cpu.S
28138+++ b/arch/x86/kernel/verify_cpu.S
28139@@ -20,6 +20,7 @@
28140 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28141 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28142 * arch/x86/kernel/head_32.S: processor startup
28143+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28144 *
28145 * verify_cpu, returns the status of longmode and SSE in register %eax.
28146 * 0: Success 1: Failure
28147diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28148index e8edcf5..27f9344 100644
28149--- a/arch/x86/kernel/vm86_32.c
28150+++ b/arch/x86/kernel/vm86_32.c
28151@@ -44,6 +44,7 @@
28152 #include <linux/ptrace.h>
28153 #include <linux/audit.h>
28154 #include <linux/stddef.h>
28155+#include <linux/grsecurity.h>
28156
28157 #include <asm/uaccess.h>
28158 #include <asm/io.h>
28159@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28160 do_exit(SIGSEGV);
28161 }
28162
28163- tss = &per_cpu(init_tss, get_cpu());
28164+ tss = init_tss + get_cpu();
28165 current->thread.sp0 = current->thread.saved_sp0;
28166 current->thread.sysenter_cs = __KERNEL_CS;
28167 load_sp0(tss, &current->thread);
28168@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28169
28170 if (tsk->thread.saved_sp0)
28171 return -EPERM;
28172+
28173+#ifdef CONFIG_GRKERNSEC_VM86
28174+ if (!capable(CAP_SYS_RAWIO)) {
28175+ gr_handle_vm86();
28176+ return -EPERM;
28177+ }
28178+#endif
28179+
28180 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28181 offsetof(struct kernel_vm86_struct, vm86plus) -
28182 sizeof(info.regs));
28183@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28184 int tmp;
28185 struct vm86plus_struct __user *v86;
28186
28187+#ifdef CONFIG_GRKERNSEC_VM86
28188+ if (!capable(CAP_SYS_RAWIO)) {
28189+ gr_handle_vm86();
28190+ return -EPERM;
28191+ }
28192+#endif
28193+
28194 tsk = current;
28195 switch (cmd) {
28196 case VM86_REQUEST_IRQ:
28197@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28198 tsk->thread.saved_fs = info->regs32->fs;
28199 tsk->thread.saved_gs = get_user_gs(info->regs32);
28200
28201- tss = &per_cpu(init_tss, get_cpu());
28202+ tss = init_tss + get_cpu();
28203 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28204 if (cpu_has_sep)
28205 tsk->thread.sysenter_cs = 0;
28206@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28207 goto cannot_handle;
28208 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28209 goto cannot_handle;
28210- intr_ptr = (unsigned long __user *) (i << 2);
28211+ intr_ptr = (__force unsigned long __user *) (i << 2);
28212 if (get_user(segoffs, intr_ptr))
28213 goto cannot_handle;
28214 if ((segoffs >> 16) == BIOSSEG)
28215diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28216index 00bf300..129df8e 100644
28217--- a/arch/x86/kernel/vmlinux.lds.S
28218+++ b/arch/x86/kernel/vmlinux.lds.S
28219@@ -26,6 +26,13 @@
28220 #include <asm/page_types.h>
28221 #include <asm/cache.h>
28222 #include <asm/boot.h>
28223+#include <asm/segment.h>
28224+
28225+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28226+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28227+#else
28228+#define __KERNEL_TEXT_OFFSET 0
28229+#endif
28230
28231 #undef i386 /* in case the preprocessor is a 32bit one */
28232
28233@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28234
28235 PHDRS {
28236 text PT_LOAD FLAGS(5); /* R_E */
28237+#ifdef CONFIG_X86_32
28238+ module PT_LOAD FLAGS(5); /* R_E */
28239+#endif
28240+#ifdef CONFIG_XEN
28241+ rodata PT_LOAD FLAGS(5); /* R_E */
28242+#else
28243+ rodata PT_LOAD FLAGS(4); /* R__ */
28244+#endif
28245 data PT_LOAD FLAGS(6); /* RW_ */
28246-#ifdef CONFIG_X86_64
28247+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28248 #ifdef CONFIG_SMP
28249 percpu PT_LOAD FLAGS(6); /* RW_ */
28250 #endif
28251+ text.init PT_LOAD FLAGS(5); /* R_E */
28252+ text.exit PT_LOAD FLAGS(5); /* R_E */
28253 init PT_LOAD FLAGS(7); /* RWE */
28254-#endif
28255 note PT_NOTE FLAGS(0); /* ___ */
28256 }
28257
28258 SECTIONS
28259 {
28260 #ifdef CONFIG_X86_32
28261- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28262- phys_startup_32 = startup_32 - LOAD_OFFSET;
28263+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28264 #else
28265- . = __START_KERNEL;
28266- phys_startup_64 = startup_64 - LOAD_OFFSET;
28267+ . = __START_KERNEL;
28268 #endif
28269
28270 /* Text and read-only data */
28271- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28272- _text = .;
28273+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28274 /* bootstrapping code */
28275+#ifdef CONFIG_X86_32
28276+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28277+#else
28278+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28279+#endif
28280+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28281+ _text = .;
28282 HEAD_TEXT
28283 . = ALIGN(8);
28284 _stext = .;
28285@@ -104,13 +124,47 @@ SECTIONS
28286 IRQENTRY_TEXT
28287 *(.fixup)
28288 *(.gnu.warning)
28289- /* End of text section */
28290- _etext = .;
28291 } :text = 0x9090
28292
28293- NOTES :text :note
28294+ . += __KERNEL_TEXT_OFFSET;
28295
28296- EXCEPTION_TABLE(16) :text = 0x9090
28297+#ifdef CONFIG_X86_32
28298+ . = ALIGN(PAGE_SIZE);
28299+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28300+
28301+#ifdef CONFIG_PAX_KERNEXEC
28302+ MODULES_EXEC_VADDR = .;
28303+ BYTE(0)
28304+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28305+ . = ALIGN(HPAGE_SIZE) - 1;
28306+ MODULES_EXEC_END = .;
28307+#endif
28308+
28309+ } :module
28310+#endif
28311+
28312+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28313+ /* End of text section */
28314+ BYTE(0)
28315+ _etext = . - __KERNEL_TEXT_OFFSET;
28316+ }
28317+
28318+#ifdef CONFIG_X86_32
28319+ . = ALIGN(PAGE_SIZE);
28320+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28321+ . = ALIGN(PAGE_SIZE);
28322+ *(.empty_zero_page)
28323+ *(.initial_pg_fixmap)
28324+ *(.initial_pg_pmd)
28325+ *(.initial_page_table)
28326+ *(.swapper_pg_dir)
28327+ } :rodata
28328+#endif
28329+
28330+ . = ALIGN(PAGE_SIZE);
28331+ NOTES :rodata :note
28332+
28333+ EXCEPTION_TABLE(16) :rodata
28334
28335 #if defined(CONFIG_DEBUG_RODATA)
28336 /* .text should occupy whole number of pages */
28337@@ -122,16 +176,20 @@ SECTIONS
28338
28339 /* Data */
28340 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28341+
28342+#ifdef CONFIG_PAX_KERNEXEC
28343+ . = ALIGN(HPAGE_SIZE);
28344+#else
28345+ . = ALIGN(PAGE_SIZE);
28346+#endif
28347+
28348 /* Start of data section */
28349 _sdata = .;
28350
28351 /* init_task */
28352 INIT_TASK_DATA(THREAD_SIZE)
28353
28354-#ifdef CONFIG_X86_32
28355- /* 32 bit has nosave before _edata */
28356 NOSAVE_DATA
28357-#endif
28358
28359 PAGE_ALIGNED_DATA(PAGE_SIZE)
28360
28361@@ -174,12 +232,19 @@ SECTIONS
28362 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28363
28364 /* Init code and data - will be freed after init */
28365- . = ALIGN(PAGE_SIZE);
28366 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28367+ BYTE(0)
28368+
28369+#ifdef CONFIG_PAX_KERNEXEC
28370+ . = ALIGN(HPAGE_SIZE);
28371+#else
28372+ . = ALIGN(PAGE_SIZE);
28373+#endif
28374+
28375 __init_begin = .; /* paired with __init_end */
28376- }
28377+ } :init.begin
28378
28379-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28380+#ifdef CONFIG_SMP
28381 /*
28382 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28383 * output PHDR, so the next output section - .init.text - should
28384@@ -190,12 +255,27 @@ SECTIONS
28385 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28386 #endif
28387
28388- INIT_TEXT_SECTION(PAGE_SIZE)
28389-#ifdef CONFIG_X86_64
28390- :init
28391-#endif
28392+ . = ALIGN(PAGE_SIZE);
28393+ init_begin = .;
28394+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28395+ VMLINUX_SYMBOL(_sinittext) = .;
28396+ INIT_TEXT
28397+ . = ALIGN(PAGE_SIZE);
28398+ } :text.init
28399
28400- INIT_DATA_SECTION(16)
28401+ /*
28402+ * .exit.text is discard at runtime, not link time, to deal with
28403+ * references from .altinstructions and .eh_frame
28404+ */
28405+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28406+ EXIT_TEXT
28407+ VMLINUX_SYMBOL(_einittext) = .;
28408+ . = ALIGN(16);
28409+ } :text.exit
28410+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28411+
28412+ . = ALIGN(PAGE_SIZE);
28413+ INIT_DATA_SECTION(16) :init
28414
28415 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28416 __x86_cpu_dev_start = .;
28417@@ -266,19 +346,12 @@ SECTIONS
28418 }
28419
28420 . = ALIGN(8);
28421- /*
28422- * .exit.text is discard at runtime, not link time, to deal with
28423- * references from .altinstructions and .eh_frame
28424- */
28425- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28426- EXIT_TEXT
28427- }
28428
28429 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28430 EXIT_DATA
28431 }
28432
28433-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28434+#ifndef CONFIG_SMP
28435 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28436 #endif
28437
28438@@ -297,16 +370,10 @@ SECTIONS
28439 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28440 __smp_locks = .;
28441 *(.smp_locks)
28442- . = ALIGN(PAGE_SIZE);
28443 __smp_locks_end = .;
28444+ . = ALIGN(PAGE_SIZE);
28445 }
28446
28447-#ifdef CONFIG_X86_64
28448- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28449- NOSAVE_DATA
28450- }
28451-#endif
28452-
28453 /* BSS */
28454 . = ALIGN(PAGE_SIZE);
28455 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28456@@ -322,6 +389,7 @@ SECTIONS
28457 __brk_base = .;
28458 . += 64 * 1024; /* 64k alignment slop space */
28459 *(.brk_reservation) /* areas brk users have reserved */
28460+ . = ALIGN(HPAGE_SIZE);
28461 __brk_limit = .;
28462 }
28463
28464@@ -348,13 +416,12 @@ SECTIONS
28465 * for the boot processor.
28466 */
28467 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28468-INIT_PER_CPU(gdt_page);
28469 INIT_PER_CPU(irq_stack_union);
28470
28471 /*
28472 * Build-time check on the image size:
28473 */
28474-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28475+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28476 "kernel image bigger than KERNEL_IMAGE_SIZE");
28477
28478 #ifdef CONFIG_SMP
28479diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28480index 2dcc6ff..082dc7a 100644
28481--- a/arch/x86/kernel/vsyscall_64.c
28482+++ b/arch/x86/kernel/vsyscall_64.c
28483@@ -38,15 +38,13 @@
28484 #define CREATE_TRACE_POINTS
28485 #include "vsyscall_trace.h"
28486
28487-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28488+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28489
28490 static int __init vsyscall_setup(char *str)
28491 {
28492 if (str) {
28493 if (!strcmp("emulate", str))
28494 vsyscall_mode = EMULATE;
28495- else if (!strcmp("native", str))
28496- vsyscall_mode = NATIVE;
28497 else if (!strcmp("none", str))
28498 vsyscall_mode = NONE;
28499 else
28500@@ -264,8 +262,7 @@ do_ret:
28501 return true;
28502
28503 sigsegv:
28504- force_sig(SIGSEGV, current);
28505- return true;
28506+ do_group_exit(SIGKILL);
28507 }
28508
28509 /*
28510@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28511 static struct vm_area_struct gate_vma = {
28512 .vm_start = VSYSCALL_ADDR,
28513 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28514- .vm_page_prot = PAGE_READONLY_EXEC,
28515- .vm_flags = VM_READ | VM_EXEC,
28516+ .vm_page_prot = PAGE_READONLY,
28517+ .vm_flags = VM_READ,
28518 .vm_ops = &gate_vma_ops,
28519 };
28520
28521@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28522 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28523
28524 if (vsyscall_mode != NONE)
28525- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28526- vsyscall_mode == NATIVE
28527- ? PAGE_KERNEL_VSYSCALL
28528- : PAGE_KERNEL_VVAR);
28529+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28530
28531 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28532 (unsigned long)VSYSCALL_ADDR);
28533diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28534index 37d8fa4..66e319a 100644
28535--- a/arch/x86/kernel/x8664_ksyms_64.c
28536+++ b/arch/x86/kernel/x8664_ksyms_64.c
28537@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28538 EXPORT_SYMBOL(copy_user_generic_unrolled);
28539 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28540 EXPORT_SYMBOL(__copy_user_nocache);
28541-EXPORT_SYMBOL(_copy_from_user);
28542-EXPORT_SYMBOL(_copy_to_user);
28543
28544 EXPORT_SYMBOL(copy_page);
28545 EXPORT_SYMBOL(clear_page);
28546@@ -79,3 +77,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28547 EXPORT_SYMBOL(___preempt_schedule_context);
28548 #endif
28549 #endif
28550+
28551+#ifdef CONFIG_PAX_PER_CPU_PGD
28552+EXPORT_SYMBOL(cpu_pgd);
28553+#endif
28554diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28555index 234b072..b7ab191 100644
28556--- a/arch/x86/kernel/x86_init.c
28557+++ b/arch/x86/kernel/x86_init.c
28558@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28559 static void default_nmi_init(void) { };
28560 static int default_i8042_detect(void) { return 1; };
28561
28562-struct x86_platform_ops x86_platform = {
28563+struct x86_platform_ops x86_platform __read_only = {
28564 .calibrate_tsc = native_calibrate_tsc,
28565 .get_wallclock = mach_get_cmos_time,
28566 .set_wallclock = mach_set_rtc_mmss,
28567@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28568 EXPORT_SYMBOL_GPL(x86_platform);
28569
28570 #if defined(CONFIG_PCI_MSI)
28571-struct x86_msi_ops x86_msi = {
28572+struct x86_msi_ops x86_msi __read_only = {
28573 .setup_msi_irqs = native_setup_msi_irqs,
28574 .compose_msi_msg = native_compose_msi_msg,
28575 .teardown_msi_irq = native_teardown_msi_irq,
28576@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28577 }
28578 #endif
28579
28580-struct x86_io_apic_ops x86_io_apic_ops = {
28581+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28582 .init = native_io_apic_init_mappings,
28583 .read = native_io_apic_read,
28584 .write = native_io_apic_write,
28585diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28586index cdc6cf9..e04f495 100644
28587--- a/arch/x86/kernel/xsave.c
28588+++ b/arch/x86/kernel/xsave.c
28589@@ -168,18 +168,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28590
28591 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28592 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28593- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28594+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28595
28596 if (!use_xsave())
28597 return err;
28598
28599- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28600+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28601
28602 /*
28603 * Read the xstate_bv which we copied (directly from the cpu or
28604 * from the state in task struct) to the user buffers.
28605 */
28606- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28607+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28608
28609 /*
28610 * For legacy compatible, we always set FP/SSE bits in the bit
28611@@ -194,7 +194,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28612 */
28613 xstate_bv |= XSTATE_FPSSE;
28614
28615- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28616+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28617
28618 return err;
28619 }
28620@@ -203,6 +203,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28621 {
28622 int err;
28623
28624+ buf = (struct xsave_struct __user *)____m(buf);
28625 if (use_xsave())
28626 err = xsave_user(buf);
28627 else if (use_fxsr())
28628@@ -313,6 +314,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28629 */
28630 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28631 {
28632+ buf = (void __user *)____m(buf);
28633 if (use_xsave()) {
28634 if ((unsigned long)buf % 64 || fx_only) {
28635 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28636diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28637index 8a80737..bac4961 100644
28638--- a/arch/x86/kvm/cpuid.c
28639+++ b/arch/x86/kvm/cpuid.c
28640@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28641 struct kvm_cpuid2 *cpuid,
28642 struct kvm_cpuid_entry2 __user *entries)
28643 {
28644- int r;
28645+ int r, i;
28646
28647 r = -E2BIG;
28648 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28649 goto out;
28650 r = -EFAULT;
28651- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28652- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28653+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28654 goto out;
28655+ for (i = 0; i < cpuid->nent; ++i) {
28656+ struct kvm_cpuid_entry2 cpuid_entry;
28657+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28658+ goto out;
28659+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28660+ }
28661 vcpu->arch.cpuid_nent = cpuid->nent;
28662 kvm_apic_set_version(vcpu);
28663 kvm_x86_ops->cpuid_update(vcpu);
28664@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28665 struct kvm_cpuid2 *cpuid,
28666 struct kvm_cpuid_entry2 __user *entries)
28667 {
28668- int r;
28669+ int r, i;
28670
28671 r = -E2BIG;
28672 if (cpuid->nent < vcpu->arch.cpuid_nent)
28673 goto out;
28674 r = -EFAULT;
28675- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28676- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28677+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28678 goto out;
28679+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28680+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28681+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28682+ goto out;
28683+ }
28684 return 0;
28685
28686 out:
28687diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28688index 106c015..2db7161 100644
28689--- a/arch/x86/kvm/emulate.c
28690+++ b/arch/x86/kvm/emulate.c
28691@@ -3572,7 +3572,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28692 int cr = ctxt->modrm_reg;
28693 u64 efer = 0;
28694
28695- static u64 cr_reserved_bits[] = {
28696+ static const u64 cr_reserved_bits[] = {
28697 0xffffffff00000000ULL,
28698 0, 0, 0, /* CR3 checked later */
28699 CR4_RESERVED_BITS,
28700diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28701index 4ee827d..a14eff9 100644
28702--- a/arch/x86/kvm/lapic.c
28703+++ b/arch/x86/kvm/lapic.c
28704@@ -56,7 +56,7 @@
28705 #define APIC_BUS_CYCLE_NS 1
28706
28707 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28708-#define apic_debug(fmt, arg...)
28709+#define apic_debug(fmt, arg...) do {} while (0)
28710
28711 #define APIC_LVT_NUM 6
28712 /* 14 is the version for Xeon and Pentium 8.4.8*/
28713diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28714index fd49c86..77e1aa0 100644
28715--- a/arch/x86/kvm/paging_tmpl.h
28716+++ b/arch/x86/kvm/paging_tmpl.h
28717@@ -343,7 +343,7 @@ retry_walk:
28718 if (unlikely(kvm_is_error_hva(host_addr)))
28719 goto error;
28720
28721- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28722+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28723 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28724 goto error;
28725 walker->ptep_user[walker->level - 1] = ptep_user;
28726diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28727index cc618c8..3f72f76 100644
28728--- a/arch/x86/kvm/svm.c
28729+++ b/arch/x86/kvm/svm.c
28730@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28731 int cpu = raw_smp_processor_id();
28732
28733 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28734+
28735+ pax_open_kernel();
28736 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28737+ pax_close_kernel();
28738+
28739 load_TR_desc();
28740 }
28741
28742@@ -3964,6 +3968,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28743 #endif
28744 #endif
28745
28746+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28747+ __set_fs(current_thread_info()->addr_limit);
28748+#endif
28749+
28750 reload_tss(vcpu);
28751
28752 local_irq_disable();
28753diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28754index a60bd3a..748e856 100644
28755--- a/arch/x86/kvm/vmx.c
28756+++ b/arch/x86/kvm/vmx.c
28757@@ -1440,12 +1440,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28758 #endif
28759 }
28760
28761-static void vmcs_clear_bits(unsigned long field, u32 mask)
28762+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28763 {
28764 vmcs_writel(field, vmcs_readl(field) & ~mask);
28765 }
28766
28767-static void vmcs_set_bits(unsigned long field, u32 mask)
28768+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28769 {
28770 vmcs_writel(field, vmcs_readl(field) | mask);
28771 }
28772@@ -1705,7 +1705,11 @@ static void reload_tss(void)
28773 struct desc_struct *descs;
28774
28775 descs = (void *)gdt->address;
28776+
28777+ pax_open_kernel();
28778 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28779+ pax_close_kernel();
28780+
28781 load_TR_desc();
28782 }
28783
28784@@ -1941,6 +1945,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28785 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28786 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28787
28788+#ifdef CONFIG_PAX_PER_CPU_PGD
28789+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28790+#endif
28791+
28792 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28793 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28794 vmx->loaded_vmcs->cpu = cpu;
28795@@ -2233,7 +2241,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28796 * reads and returns guest's timestamp counter "register"
28797 * guest_tsc = host_tsc + tsc_offset -- 21.3
28798 */
28799-static u64 guest_read_tsc(void)
28800+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28801 {
28802 u64 host_tsc, tsc_offset;
28803
28804@@ -4466,7 +4474,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28805 unsigned long cr4;
28806
28807 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28808+
28809+#ifndef CONFIG_PAX_PER_CPU_PGD
28810 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28811+#endif
28812
28813 /* Save the most likely value for this task's CR4 in the VMCS. */
28814 cr4 = cr4_read_shadow();
28815@@ -4493,7 +4504,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28816 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28817 vmx->host_idt_base = dt.address;
28818
28819- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28820+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28821
28822 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28823 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28824@@ -6104,11 +6115,17 @@ static __init int hardware_setup(void)
28825 * page upon invalidation. No need to do anything if not
28826 * using the APIC_ACCESS_ADDR VMCS field.
28827 */
28828- if (!flexpriority_enabled)
28829- kvm_x86_ops->set_apic_access_page_addr = NULL;
28830+ if (!flexpriority_enabled) {
28831+ pax_open_kernel();
28832+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28833+ pax_close_kernel();
28834+ }
28835
28836- if (!cpu_has_vmx_tpr_shadow())
28837- kvm_x86_ops->update_cr8_intercept = NULL;
28838+ if (!cpu_has_vmx_tpr_shadow()) {
28839+ pax_open_kernel();
28840+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28841+ pax_close_kernel();
28842+ }
28843
28844 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28845 kvm_disable_largepages();
28846@@ -6119,14 +6136,16 @@ static __init int hardware_setup(void)
28847 if (!cpu_has_vmx_apicv())
28848 enable_apicv = 0;
28849
28850+ pax_open_kernel();
28851 if (enable_apicv)
28852- kvm_x86_ops->update_cr8_intercept = NULL;
28853+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28854 else {
28855- kvm_x86_ops->hwapic_irr_update = NULL;
28856- kvm_x86_ops->hwapic_isr_update = NULL;
28857- kvm_x86_ops->deliver_posted_interrupt = NULL;
28858- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28859+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28860+ *(void **)&kvm_x86_ops->hwapic_isr_update = NULL;
28861+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28862+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28863 }
28864+ pax_close_kernel();
28865
28866 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
28867 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
28868@@ -6179,10 +6198,12 @@ static __init int hardware_setup(void)
28869 enable_pml = 0;
28870
28871 if (!enable_pml) {
28872- kvm_x86_ops->slot_enable_log_dirty = NULL;
28873- kvm_x86_ops->slot_disable_log_dirty = NULL;
28874- kvm_x86_ops->flush_log_dirty = NULL;
28875- kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
28876+ pax_open_kernel();
28877+ *(void **)&kvm_x86_ops->slot_enable_log_dirty = NULL;
28878+ *(void **)&kvm_x86_ops->slot_disable_log_dirty = NULL;
28879+ *(void **)&kvm_x86_ops->flush_log_dirty = NULL;
28880+ *(void **)&kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
28881+ pax_close_kernel();
28882 }
28883
28884 return alloc_kvm_area();
28885@@ -8227,6 +8248,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28886 "jmp 2f \n\t"
28887 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28888 "2: "
28889+
28890+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28891+ "ljmp %[cs],$3f\n\t"
28892+ "3: "
28893+#endif
28894+
28895 /* Save guest registers, load host registers, keep flags */
28896 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28897 "pop %0 \n\t"
28898@@ -8279,6 +8306,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28899 #endif
28900 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28901 [wordsize]"i"(sizeof(ulong))
28902+
28903+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28904+ ,[cs]"i"(__KERNEL_CS)
28905+#endif
28906+
28907 : "cc", "memory"
28908 #ifdef CONFIG_X86_64
28909 , "rax", "rbx", "rdi", "rsi"
28910@@ -8292,7 +8324,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28911 if (debugctlmsr)
28912 update_debugctlmsr(debugctlmsr);
28913
28914-#ifndef CONFIG_X86_64
28915+#ifdef CONFIG_X86_32
28916 /*
28917 * The sysexit path does not restore ds/es, so we must set them to
28918 * a reasonable value ourselves.
28919@@ -8301,8 +8333,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28920 * may be executed in interrupt context, which saves and restore segments
28921 * around it, nullifying its effect.
28922 */
28923- loadsegment(ds, __USER_DS);
28924- loadsegment(es, __USER_DS);
28925+ loadsegment(ds, __KERNEL_DS);
28926+ loadsegment(es, __KERNEL_DS);
28927+ loadsegment(ss, __KERNEL_DS);
28928+
28929+#ifdef CONFIG_PAX_KERNEXEC
28930+ loadsegment(fs, __KERNEL_PERCPU);
28931+#endif
28932+
28933+#ifdef CONFIG_PAX_MEMORY_UDEREF
28934+ __set_fs(current_thread_info()->addr_limit);
28935+#endif
28936+
28937 #endif
28938
28939 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28940diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28941index e222ba5..6f0f2de 100644
28942--- a/arch/x86/kvm/x86.c
28943+++ b/arch/x86/kvm/x86.c
28944@@ -1897,8 +1897,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28945 {
28946 struct kvm *kvm = vcpu->kvm;
28947 int lm = is_long_mode(vcpu);
28948- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28949- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28950+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28951+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28952 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28953 : kvm->arch.xen_hvm_config.blob_size_32;
28954 u32 page_num = data & ~PAGE_MASK;
28955@@ -2835,6 +2835,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28956 if (n < msr_list.nmsrs)
28957 goto out;
28958 r = -EFAULT;
28959+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28960+ goto out;
28961 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28962 num_msrs_to_save * sizeof(u32)))
28963 goto out;
28964@@ -5739,7 +5741,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28965 };
28966 #endif
28967
28968-int kvm_arch_init(void *opaque)
28969+int kvm_arch_init(const void *opaque)
28970 {
28971 int r;
28972 struct kvm_x86_ops *ops = opaque;
28973diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28974index ac4453d..1f43bf3 100644
28975--- a/arch/x86/lguest/boot.c
28976+++ b/arch/x86/lguest/boot.c
28977@@ -1340,9 +1340,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28978 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28979 * Launcher to reboot us.
28980 */
28981-static void lguest_restart(char *reason)
28982+static __noreturn void lguest_restart(char *reason)
28983 {
28984 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
28985+ BUG();
28986 }
28987
28988 /*G:050
28989diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
28990index 00933d5..3a64af9 100644
28991--- a/arch/x86/lib/atomic64_386_32.S
28992+++ b/arch/x86/lib/atomic64_386_32.S
28993@@ -48,6 +48,10 @@ BEGIN(read)
28994 movl (v), %eax
28995 movl 4(v), %edx
28996 RET_ENDP
28997+BEGIN(read_unchecked)
28998+ movl (v), %eax
28999+ movl 4(v), %edx
29000+RET_ENDP
29001 #undef v
29002
29003 #define v %esi
29004@@ -55,6 +59,10 @@ BEGIN(set)
29005 movl %ebx, (v)
29006 movl %ecx, 4(v)
29007 RET_ENDP
29008+BEGIN(set_unchecked)
29009+ movl %ebx, (v)
29010+ movl %ecx, 4(v)
29011+RET_ENDP
29012 #undef v
29013
29014 #define v %esi
29015@@ -70,6 +78,20 @@ RET_ENDP
29016 BEGIN(add)
29017 addl %eax, (v)
29018 adcl %edx, 4(v)
29019+
29020+#ifdef CONFIG_PAX_REFCOUNT
29021+ jno 0f
29022+ subl %eax, (v)
29023+ sbbl %edx, 4(v)
29024+ int $4
29025+0:
29026+ _ASM_EXTABLE(0b, 0b)
29027+#endif
29028+
29029+RET_ENDP
29030+BEGIN(add_unchecked)
29031+ addl %eax, (v)
29032+ adcl %edx, 4(v)
29033 RET_ENDP
29034 #undef v
29035
29036@@ -77,6 +99,24 @@ RET_ENDP
29037 BEGIN(add_return)
29038 addl (v), %eax
29039 adcl 4(v), %edx
29040+
29041+#ifdef CONFIG_PAX_REFCOUNT
29042+ into
29043+1234:
29044+ _ASM_EXTABLE(1234b, 2f)
29045+#endif
29046+
29047+ movl %eax, (v)
29048+ movl %edx, 4(v)
29049+
29050+#ifdef CONFIG_PAX_REFCOUNT
29051+2:
29052+#endif
29053+
29054+RET_ENDP
29055+BEGIN(add_return_unchecked)
29056+ addl (v), %eax
29057+ adcl 4(v), %edx
29058 movl %eax, (v)
29059 movl %edx, 4(v)
29060 RET_ENDP
29061@@ -86,6 +126,20 @@ RET_ENDP
29062 BEGIN(sub)
29063 subl %eax, (v)
29064 sbbl %edx, 4(v)
29065+
29066+#ifdef CONFIG_PAX_REFCOUNT
29067+ jno 0f
29068+ addl %eax, (v)
29069+ adcl %edx, 4(v)
29070+ int $4
29071+0:
29072+ _ASM_EXTABLE(0b, 0b)
29073+#endif
29074+
29075+RET_ENDP
29076+BEGIN(sub_unchecked)
29077+ subl %eax, (v)
29078+ sbbl %edx, 4(v)
29079 RET_ENDP
29080 #undef v
29081
29082@@ -96,6 +150,27 @@ BEGIN(sub_return)
29083 sbbl $0, %edx
29084 addl (v), %eax
29085 adcl 4(v), %edx
29086+
29087+#ifdef CONFIG_PAX_REFCOUNT
29088+ into
29089+1234:
29090+ _ASM_EXTABLE(1234b, 2f)
29091+#endif
29092+
29093+ movl %eax, (v)
29094+ movl %edx, 4(v)
29095+
29096+#ifdef CONFIG_PAX_REFCOUNT
29097+2:
29098+#endif
29099+
29100+RET_ENDP
29101+BEGIN(sub_return_unchecked)
29102+ negl %edx
29103+ negl %eax
29104+ sbbl $0, %edx
29105+ addl (v), %eax
29106+ adcl 4(v), %edx
29107 movl %eax, (v)
29108 movl %edx, 4(v)
29109 RET_ENDP
29110@@ -105,6 +180,20 @@ RET_ENDP
29111 BEGIN(inc)
29112 addl $1, (v)
29113 adcl $0, 4(v)
29114+
29115+#ifdef CONFIG_PAX_REFCOUNT
29116+ jno 0f
29117+ subl $1, (v)
29118+ sbbl $0, 4(v)
29119+ int $4
29120+0:
29121+ _ASM_EXTABLE(0b, 0b)
29122+#endif
29123+
29124+RET_ENDP
29125+BEGIN(inc_unchecked)
29126+ addl $1, (v)
29127+ adcl $0, 4(v)
29128 RET_ENDP
29129 #undef v
29130
29131@@ -114,6 +203,26 @@ BEGIN(inc_return)
29132 movl 4(v), %edx
29133 addl $1, %eax
29134 adcl $0, %edx
29135+
29136+#ifdef CONFIG_PAX_REFCOUNT
29137+ into
29138+1234:
29139+ _ASM_EXTABLE(1234b, 2f)
29140+#endif
29141+
29142+ movl %eax, (v)
29143+ movl %edx, 4(v)
29144+
29145+#ifdef CONFIG_PAX_REFCOUNT
29146+2:
29147+#endif
29148+
29149+RET_ENDP
29150+BEGIN(inc_return_unchecked)
29151+ movl (v), %eax
29152+ movl 4(v), %edx
29153+ addl $1, %eax
29154+ adcl $0, %edx
29155 movl %eax, (v)
29156 movl %edx, 4(v)
29157 RET_ENDP
29158@@ -123,6 +232,20 @@ RET_ENDP
29159 BEGIN(dec)
29160 subl $1, (v)
29161 sbbl $0, 4(v)
29162+
29163+#ifdef CONFIG_PAX_REFCOUNT
29164+ jno 0f
29165+ addl $1, (v)
29166+ adcl $0, 4(v)
29167+ int $4
29168+0:
29169+ _ASM_EXTABLE(0b, 0b)
29170+#endif
29171+
29172+RET_ENDP
29173+BEGIN(dec_unchecked)
29174+ subl $1, (v)
29175+ sbbl $0, 4(v)
29176 RET_ENDP
29177 #undef v
29178
29179@@ -132,6 +255,26 @@ BEGIN(dec_return)
29180 movl 4(v), %edx
29181 subl $1, %eax
29182 sbbl $0, %edx
29183+
29184+#ifdef CONFIG_PAX_REFCOUNT
29185+ into
29186+1234:
29187+ _ASM_EXTABLE(1234b, 2f)
29188+#endif
29189+
29190+ movl %eax, (v)
29191+ movl %edx, 4(v)
29192+
29193+#ifdef CONFIG_PAX_REFCOUNT
29194+2:
29195+#endif
29196+
29197+RET_ENDP
29198+BEGIN(dec_return_unchecked)
29199+ movl (v), %eax
29200+ movl 4(v), %edx
29201+ subl $1, %eax
29202+ sbbl $0, %edx
29203 movl %eax, (v)
29204 movl %edx, 4(v)
29205 RET_ENDP
29206@@ -143,6 +286,13 @@ BEGIN(add_unless)
29207 adcl %edx, %edi
29208 addl (v), %eax
29209 adcl 4(v), %edx
29210+
29211+#ifdef CONFIG_PAX_REFCOUNT
29212+ into
29213+1234:
29214+ _ASM_EXTABLE(1234b, 2f)
29215+#endif
29216+
29217 cmpl %eax, %ecx
29218 je 3f
29219 1:
29220@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29221 1:
29222 addl $1, %eax
29223 adcl $0, %edx
29224+
29225+#ifdef CONFIG_PAX_REFCOUNT
29226+ into
29227+1234:
29228+ _ASM_EXTABLE(1234b, 2f)
29229+#endif
29230+
29231 movl %eax, (v)
29232 movl %edx, 4(v)
29233 movl $1, %eax
29234@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29235 movl 4(v), %edx
29236 subl $1, %eax
29237 sbbl $0, %edx
29238+
29239+#ifdef CONFIG_PAX_REFCOUNT
29240+ into
29241+1234:
29242+ _ASM_EXTABLE(1234b, 1f)
29243+#endif
29244+
29245 js 1f
29246 movl %eax, (v)
29247 movl %edx, 4(v)
29248diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29249index f5cc9eb..51fa319 100644
29250--- a/arch/x86/lib/atomic64_cx8_32.S
29251+++ b/arch/x86/lib/atomic64_cx8_32.S
29252@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29253 CFI_STARTPROC
29254
29255 read64 %ecx
29256+ pax_force_retaddr
29257 ret
29258 CFI_ENDPROC
29259 ENDPROC(atomic64_read_cx8)
29260
29261+ENTRY(atomic64_read_unchecked_cx8)
29262+ CFI_STARTPROC
29263+
29264+ read64 %ecx
29265+ pax_force_retaddr
29266+ ret
29267+ CFI_ENDPROC
29268+ENDPROC(atomic64_read_unchecked_cx8)
29269+
29270 ENTRY(atomic64_set_cx8)
29271 CFI_STARTPROC
29272
29273@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29274 cmpxchg8b (%esi)
29275 jne 1b
29276
29277+ pax_force_retaddr
29278 ret
29279 CFI_ENDPROC
29280 ENDPROC(atomic64_set_cx8)
29281
29282+ENTRY(atomic64_set_unchecked_cx8)
29283+ CFI_STARTPROC
29284+
29285+1:
29286+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29287+ * are atomic on 586 and newer */
29288+ cmpxchg8b (%esi)
29289+ jne 1b
29290+
29291+ pax_force_retaddr
29292+ ret
29293+ CFI_ENDPROC
29294+ENDPROC(atomic64_set_unchecked_cx8)
29295+
29296 ENTRY(atomic64_xchg_cx8)
29297 CFI_STARTPROC
29298
29299@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29300 cmpxchg8b (%esi)
29301 jne 1b
29302
29303+ pax_force_retaddr
29304 ret
29305 CFI_ENDPROC
29306 ENDPROC(atomic64_xchg_cx8)
29307
29308-.macro addsub_return func ins insc
29309-ENTRY(atomic64_\func\()_return_cx8)
29310+.macro addsub_return func ins insc unchecked=""
29311+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29312 CFI_STARTPROC
29313 SAVE ebp
29314 SAVE ebx
29315@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29316 movl %edx, %ecx
29317 \ins\()l %esi, %ebx
29318 \insc\()l %edi, %ecx
29319+
29320+.ifb \unchecked
29321+#ifdef CONFIG_PAX_REFCOUNT
29322+ into
29323+2:
29324+ _ASM_EXTABLE(2b, 3f)
29325+#endif
29326+.endif
29327+
29328 LOCK_PREFIX
29329 cmpxchg8b (%ebp)
29330 jne 1b
29331-
29332-10:
29333 movl %ebx, %eax
29334 movl %ecx, %edx
29335+
29336+.ifb \unchecked
29337+#ifdef CONFIG_PAX_REFCOUNT
29338+3:
29339+#endif
29340+.endif
29341+
29342 RESTORE edi
29343 RESTORE esi
29344 RESTORE ebx
29345 RESTORE ebp
29346+ pax_force_retaddr
29347 ret
29348 CFI_ENDPROC
29349-ENDPROC(atomic64_\func\()_return_cx8)
29350+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29351 .endm
29352
29353 addsub_return add add adc
29354 addsub_return sub sub sbb
29355+addsub_return add add adc _unchecked
29356+addsub_return sub sub sbb _unchecked
29357
29358-.macro incdec_return func ins insc
29359-ENTRY(atomic64_\func\()_return_cx8)
29360+.macro incdec_return func ins insc unchecked=""
29361+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29362 CFI_STARTPROC
29363 SAVE ebx
29364
29365@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29366 movl %edx, %ecx
29367 \ins\()l $1, %ebx
29368 \insc\()l $0, %ecx
29369+
29370+.ifb \unchecked
29371+#ifdef CONFIG_PAX_REFCOUNT
29372+ into
29373+2:
29374+ _ASM_EXTABLE(2b, 3f)
29375+#endif
29376+.endif
29377+
29378 LOCK_PREFIX
29379 cmpxchg8b (%esi)
29380 jne 1b
29381
29382-10:
29383 movl %ebx, %eax
29384 movl %ecx, %edx
29385+
29386+.ifb \unchecked
29387+#ifdef CONFIG_PAX_REFCOUNT
29388+3:
29389+#endif
29390+.endif
29391+
29392 RESTORE ebx
29393+ pax_force_retaddr
29394 ret
29395 CFI_ENDPROC
29396-ENDPROC(atomic64_\func\()_return_cx8)
29397+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29398 .endm
29399
29400 incdec_return inc add adc
29401 incdec_return dec sub sbb
29402+incdec_return inc add adc _unchecked
29403+incdec_return dec sub sbb _unchecked
29404
29405 ENTRY(atomic64_dec_if_positive_cx8)
29406 CFI_STARTPROC
29407@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29408 movl %edx, %ecx
29409 subl $1, %ebx
29410 sbb $0, %ecx
29411+
29412+#ifdef CONFIG_PAX_REFCOUNT
29413+ into
29414+1234:
29415+ _ASM_EXTABLE(1234b, 2f)
29416+#endif
29417+
29418 js 2f
29419 LOCK_PREFIX
29420 cmpxchg8b (%esi)
29421@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29422 movl %ebx, %eax
29423 movl %ecx, %edx
29424 RESTORE ebx
29425+ pax_force_retaddr
29426 ret
29427 CFI_ENDPROC
29428 ENDPROC(atomic64_dec_if_positive_cx8)
29429@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29430 movl %edx, %ecx
29431 addl %ebp, %ebx
29432 adcl %edi, %ecx
29433+
29434+#ifdef CONFIG_PAX_REFCOUNT
29435+ into
29436+1234:
29437+ _ASM_EXTABLE(1234b, 3f)
29438+#endif
29439+
29440 LOCK_PREFIX
29441 cmpxchg8b (%esi)
29442 jne 1b
29443@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29444 CFI_ADJUST_CFA_OFFSET -8
29445 RESTORE ebx
29446 RESTORE ebp
29447+ pax_force_retaddr
29448 ret
29449 4:
29450 cmpl %edx, 4(%esp)
29451@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29452 xorl %ecx, %ecx
29453 addl $1, %ebx
29454 adcl %edx, %ecx
29455+
29456+#ifdef CONFIG_PAX_REFCOUNT
29457+ into
29458+1234:
29459+ _ASM_EXTABLE(1234b, 3f)
29460+#endif
29461+
29462 LOCK_PREFIX
29463 cmpxchg8b (%esi)
29464 jne 1b
29465@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29466 movl $1, %eax
29467 3:
29468 RESTORE ebx
29469+ pax_force_retaddr
29470 ret
29471 CFI_ENDPROC
29472 ENDPROC(atomic64_inc_not_zero_cx8)
29473diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29474index e78b8eee..7e173a8 100644
29475--- a/arch/x86/lib/checksum_32.S
29476+++ b/arch/x86/lib/checksum_32.S
29477@@ -29,7 +29,8 @@
29478 #include <asm/dwarf2.h>
29479 #include <asm/errno.h>
29480 #include <asm/asm.h>
29481-
29482+#include <asm/segment.h>
29483+
29484 /*
29485 * computes a partial checksum, e.g. for TCP/UDP fragments
29486 */
29487@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29488
29489 #define ARGBASE 16
29490 #define FP 12
29491-
29492-ENTRY(csum_partial_copy_generic)
29493+
29494+ENTRY(csum_partial_copy_generic_to_user)
29495 CFI_STARTPROC
29496+
29497+#ifdef CONFIG_PAX_MEMORY_UDEREF
29498+ pushl_cfi %gs
29499+ popl_cfi %es
29500+ jmp csum_partial_copy_generic
29501+#endif
29502+
29503+ENTRY(csum_partial_copy_generic_from_user)
29504+
29505+#ifdef CONFIG_PAX_MEMORY_UDEREF
29506+ pushl_cfi %gs
29507+ popl_cfi %ds
29508+#endif
29509+
29510+ENTRY(csum_partial_copy_generic)
29511 subl $4,%esp
29512 CFI_ADJUST_CFA_OFFSET 4
29513 pushl_cfi %edi
29514@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29515 jmp 4f
29516 SRC(1: movw (%esi), %bx )
29517 addl $2, %esi
29518-DST( movw %bx, (%edi) )
29519+DST( movw %bx, %es:(%edi) )
29520 addl $2, %edi
29521 addw %bx, %ax
29522 adcl $0, %eax
29523@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29524 SRC(1: movl (%esi), %ebx )
29525 SRC( movl 4(%esi), %edx )
29526 adcl %ebx, %eax
29527-DST( movl %ebx, (%edi) )
29528+DST( movl %ebx, %es:(%edi) )
29529 adcl %edx, %eax
29530-DST( movl %edx, 4(%edi) )
29531+DST( movl %edx, %es:4(%edi) )
29532
29533 SRC( movl 8(%esi), %ebx )
29534 SRC( movl 12(%esi), %edx )
29535 adcl %ebx, %eax
29536-DST( movl %ebx, 8(%edi) )
29537+DST( movl %ebx, %es:8(%edi) )
29538 adcl %edx, %eax
29539-DST( movl %edx, 12(%edi) )
29540+DST( movl %edx, %es:12(%edi) )
29541
29542 SRC( movl 16(%esi), %ebx )
29543 SRC( movl 20(%esi), %edx )
29544 adcl %ebx, %eax
29545-DST( movl %ebx, 16(%edi) )
29546+DST( movl %ebx, %es:16(%edi) )
29547 adcl %edx, %eax
29548-DST( movl %edx, 20(%edi) )
29549+DST( movl %edx, %es:20(%edi) )
29550
29551 SRC( movl 24(%esi), %ebx )
29552 SRC( movl 28(%esi), %edx )
29553 adcl %ebx, %eax
29554-DST( movl %ebx, 24(%edi) )
29555+DST( movl %ebx, %es:24(%edi) )
29556 adcl %edx, %eax
29557-DST( movl %edx, 28(%edi) )
29558+DST( movl %edx, %es:28(%edi) )
29559
29560 lea 32(%esi), %esi
29561 lea 32(%edi), %edi
29562@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29563 shrl $2, %edx # This clears CF
29564 SRC(3: movl (%esi), %ebx )
29565 adcl %ebx, %eax
29566-DST( movl %ebx, (%edi) )
29567+DST( movl %ebx, %es:(%edi) )
29568 lea 4(%esi), %esi
29569 lea 4(%edi), %edi
29570 dec %edx
29571@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29572 jb 5f
29573 SRC( movw (%esi), %cx )
29574 leal 2(%esi), %esi
29575-DST( movw %cx, (%edi) )
29576+DST( movw %cx, %es:(%edi) )
29577 leal 2(%edi), %edi
29578 je 6f
29579 shll $16,%ecx
29580 SRC(5: movb (%esi), %cl )
29581-DST( movb %cl, (%edi) )
29582+DST( movb %cl, %es:(%edi) )
29583 6: addl %ecx, %eax
29584 adcl $0, %eax
29585 7:
29586@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29587
29588 6001:
29589 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29590- movl $-EFAULT, (%ebx)
29591+ movl $-EFAULT, %ss:(%ebx)
29592
29593 # zero the complete destination - computing the rest
29594 # is too much work
29595@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29596
29597 6002:
29598 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29599- movl $-EFAULT,(%ebx)
29600+ movl $-EFAULT,%ss:(%ebx)
29601 jmp 5000b
29602
29603 .previous
29604
29605+ pushl_cfi %ss
29606+ popl_cfi %ds
29607+ pushl_cfi %ss
29608+ popl_cfi %es
29609 popl_cfi %ebx
29610 CFI_RESTORE ebx
29611 popl_cfi %esi
29612@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29613 popl_cfi %ecx # equivalent to addl $4,%esp
29614 ret
29615 CFI_ENDPROC
29616-ENDPROC(csum_partial_copy_generic)
29617+ENDPROC(csum_partial_copy_generic_to_user)
29618
29619 #else
29620
29621 /* Version for PentiumII/PPro */
29622
29623 #define ROUND1(x) \
29624+ nop; nop; nop; \
29625 SRC(movl x(%esi), %ebx ) ; \
29626 addl %ebx, %eax ; \
29627- DST(movl %ebx, x(%edi) ) ;
29628+ DST(movl %ebx, %es:x(%edi)) ;
29629
29630 #define ROUND(x) \
29631+ nop; nop; nop; \
29632 SRC(movl x(%esi), %ebx ) ; \
29633 adcl %ebx, %eax ; \
29634- DST(movl %ebx, x(%edi) ) ;
29635+ DST(movl %ebx, %es:x(%edi)) ;
29636
29637 #define ARGBASE 12
29638-
29639-ENTRY(csum_partial_copy_generic)
29640+
29641+ENTRY(csum_partial_copy_generic_to_user)
29642 CFI_STARTPROC
29643+
29644+#ifdef CONFIG_PAX_MEMORY_UDEREF
29645+ pushl_cfi %gs
29646+ popl_cfi %es
29647+ jmp csum_partial_copy_generic
29648+#endif
29649+
29650+ENTRY(csum_partial_copy_generic_from_user)
29651+
29652+#ifdef CONFIG_PAX_MEMORY_UDEREF
29653+ pushl_cfi %gs
29654+ popl_cfi %ds
29655+#endif
29656+
29657+ENTRY(csum_partial_copy_generic)
29658 pushl_cfi %ebx
29659 CFI_REL_OFFSET ebx, 0
29660 pushl_cfi %edi
29661@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29662 subl %ebx, %edi
29663 lea -1(%esi),%edx
29664 andl $-32,%edx
29665- lea 3f(%ebx,%ebx), %ebx
29666+ lea 3f(%ebx,%ebx,2), %ebx
29667 testl %esi, %esi
29668 jmp *%ebx
29669 1: addl $64,%esi
29670@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29671 jb 5f
29672 SRC( movw (%esi), %dx )
29673 leal 2(%esi), %esi
29674-DST( movw %dx, (%edi) )
29675+DST( movw %dx, %es:(%edi) )
29676 leal 2(%edi), %edi
29677 je 6f
29678 shll $16,%edx
29679 5:
29680 SRC( movb (%esi), %dl )
29681-DST( movb %dl, (%edi) )
29682+DST( movb %dl, %es:(%edi) )
29683 6: addl %edx, %eax
29684 adcl $0, %eax
29685 7:
29686 .section .fixup, "ax"
29687 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29688- movl $-EFAULT, (%ebx)
29689+ movl $-EFAULT, %ss:(%ebx)
29690 # zero the complete destination (computing the rest is too much work)
29691 movl ARGBASE+8(%esp),%edi # dst
29692 movl ARGBASE+12(%esp),%ecx # len
29693@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29694 rep; stosb
29695 jmp 7b
29696 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29697- movl $-EFAULT, (%ebx)
29698+ movl $-EFAULT, %ss:(%ebx)
29699 jmp 7b
29700 .previous
29701
29702+#ifdef CONFIG_PAX_MEMORY_UDEREF
29703+ pushl_cfi %ss
29704+ popl_cfi %ds
29705+ pushl_cfi %ss
29706+ popl_cfi %es
29707+#endif
29708+
29709 popl_cfi %esi
29710 CFI_RESTORE esi
29711 popl_cfi %edi
29712@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29713 CFI_RESTORE ebx
29714 ret
29715 CFI_ENDPROC
29716-ENDPROC(csum_partial_copy_generic)
29717+ENDPROC(csum_partial_copy_generic_to_user)
29718
29719 #undef ROUND
29720 #undef ROUND1
29721diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29722index f2145cf..cea889d 100644
29723--- a/arch/x86/lib/clear_page_64.S
29724+++ b/arch/x86/lib/clear_page_64.S
29725@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29726 movl $4096/8,%ecx
29727 xorl %eax,%eax
29728 rep stosq
29729+ pax_force_retaddr
29730 ret
29731 CFI_ENDPROC
29732 ENDPROC(clear_page_c)
29733@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29734 movl $4096,%ecx
29735 xorl %eax,%eax
29736 rep stosb
29737+ pax_force_retaddr
29738 ret
29739 CFI_ENDPROC
29740 ENDPROC(clear_page_c_e)
29741@@ -43,6 +45,7 @@ ENTRY(clear_page)
29742 leaq 64(%rdi),%rdi
29743 jnz .Lloop
29744 nop
29745+ pax_force_retaddr
29746 ret
29747 CFI_ENDPROC
29748 .Lclear_page_end:
29749@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29750
29751 #include <asm/cpufeature.h>
29752
29753- .section .altinstr_replacement,"ax"
29754+ .section .altinstr_replacement,"a"
29755 1: .byte 0xeb /* jmp <disp8> */
29756 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29757 2: .byte 0xeb /* jmp <disp8> */
29758diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29759index 40a1725..5d12ac4 100644
29760--- a/arch/x86/lib/cmpxchg16b_emu.S
29761+++ b/arch/x86/lib/cmpxchg16b_emu.S
29762@@ -8,6 +8,7 @@
29763 #include <linux/linkage.h>
29764 #include <asm/dwarf2.h>
29765 #include <asm/percpu.h>
29766+#include <asm/alternative-asm.h>
29767
29768 .text
29769
29770@@ -46,12 +47,14 @@ CFI_STARTPROC
29771 CFI_REMEMBER_STATE
29772 popfq_cfi
29773 mov $1, %al
29774+ pax_force_retaddr
29775 ret
29776
29777 CFI_RESTORE_STATE
29778 .Lnot_same:
29779 popfq_cfi
29780 xor %al,%al
29781+ pax_force_retaddr
29782 ret
29783
29784 CFI_ENDPROC
29785diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29786index 176cca6..e0d658e 100644
29787--- a/arch/x86/lib/copy_page_64.S
29788+++ b/arch/x86/lib/copy_page_64.S
29789@@ -9,6 +9,7 @@ copy_page_rep:
29790 CFI_STARTPROC
29791 movl $4096/8, %ecx
29792 rep movsq
29793+ pax_force_retaddr
29794 ret
29795 CFI_ENDPROC
29796 ENDPROC(copy_page_rep)
29797@@ -24,8 +25,8 @@ ENTRY(copy_page)
29798 CFI_ADJUST_CFA_OFFSET 2*8
29799 movq %rbx, (%rsp)
29800 CFI_REL_OFFSET rbx, 0
29801- movq %r12, 1*8(%rsp)
29802- CFI_REL_OFFSET r12, 1*8
29803+ movq %r13, 1*8(%rsp)
29804+ CFI_REL_OFFSET r13, 1*8
29805
29806 movl $(4096/64)-5, %ecx
29807 .p2align 4
29808@@ -38,7 +39,7 @@ ENTRY(copy_page)
29809 movq 0x8*4(%rsi), %r9
29810 movq 0x8*5(%rsi), %r10
29811 movq 0x8*6(%rsi), %r11
29812- movq 0x8*7(%rsi), %r12
29813+ movq 0x8*7(%rsi), %r13
29814
29815 prefetcht0 5*64(%rsi)
29816
29817@@ -49,7 +50,7 @@ ENTRY(copy_page)
29818 movq %r9, 0x8*4(%rdi)
29819 movq %r10, 0x8*5(%rdi)
29820 movq %r11, 0x8*6(%rdi)
29821- movq %r12, 0x8*7(%rdi)
29822+ movq %r13, 0x8*7(%rdi)
29823
29824 leaq 64 (%rsi), %rsi
29825 leaq 64 (%rdi), %rdi
29826@@ -68,7 +69,7 @@ ENTRY(copy_page)
29827 movq 0x8*4(%rsi), %r9
29828 movq 0x8*5(%rsi), %r10
29829 movq 0x8*6(%rsi), %r11
29830- movq 0x8*7(%rsi), %r12
29831+ movq 0x8*7(%rsi), %r13
29832
29833 movq %rax, 0x8*0(%rdi)
29834 movq %rbx, 0x8*1(%rdi)
29835@@ -77,7 +78,7 @@ ENTRY(copy_page)
29836 movq %r9, 0x8*4(%rdi)
29837 movq %r10, 0x8*5(%rdi)
29838 movq %r11, 0x8*6(%rdi)
29839- movq %r12, 0x8*7(%rdi)
29840+ movq %r13, 0x8*7(%rdi)
29841
29842 leaq 64(%rdi), %rdi
29843 leaq 64(%rsi), %rsi
29844@@ -85,10 +86,11 @@ ENTRY(copy_page)
29845
29846 movq (%rsp), %rbx
29847 CFI_RESTORE rbx
29848- movq 1*8(%rsp), %r12
29849- CFI_RESTORE r12
29850+ movq 1*8(%rsp), %r13
29851+ CFI_RESTORE r13
29852 addq $2*8, %rsp
29853 CFI_ADJUST_CFA_OFFSET -2*8
29854+ pax_force_retaddr
29855 ret
29856 .Lcopy_page_end:
29857 CFI_ENDPROC
29858@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29859
29860 #include <asm/cpufeature.h>
29861
29862- .section .altinstr_replacement,"ax"
29863+ .section .altinstr_replacement,"a"
29864 1: .byte 0xeb /* jmp <disp8> */
29865 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29866 2:
29867diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29868index dee945d..a84067b 100644
29869--- a/arch/x86/lib/copy_user_64.S
29870+++ b/arch/x86/lib/copy_user_64.S
29871@@ -18,31 +18,7 @@
29872 #include <asm/alternative-asm.h>
29873 #include <asm/asm.h>
29874 #include <asm/smap.h>
29875-
29876-/*
29877- * By placing feature2 after feature1 in altinstructions section, we logically
29878- * implement:
29879- * If CPU has feature2, jmp to alt2 is used
29880- * else if CPU has feature1, jmp to alt1 is used
29881- * else jmp to orig is used.
29882- */
29883- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29884-0:
29885- .byte 0xe9 /* 32bit jump */
29886- .long \orig-1f /* by default jump to orig */
29887-1:
29888- .section .altinstr_replacement,"ax"
29889-2: .byte 0xe9 /* near jump with 32bit immediate */
29890- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29891-3: .byte 0xe9 /* near jump with 32bit immediate */
29892- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29893- .previous
29894-
29895- .section .altinstructions,"a"
29896- altinstruction_entry 0b,2b,\feature1,5,5
29897- altinstruction_entry 0b,3b,\feature2,5,5
29898- .previous
29899- .endm
29900+#include <asm/pgtable.h>
29901
29902 .macro ALIGN_DESTINATION
29903 #ifdef FIX_ALIGNMENT
29904@@ -70,52 +46,6 @@
29905 #endif
29906 .endm
29907
29908-/* Standard copy_to_user with segment limit checking */
29909-ENTRY(_copy_to_user)
29910- CFI_STARTPROC
29911- GET_THREAD_INFO(%rax)
29912- movq %rdi,%rcx
29913- addq %rdx,%rcx
29914- jc bad_to_user
29915- cmpq TI_addr_limit(%rax),%rcx
29916- ja bad_to_user
29917- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29918- copy_user_generic_unrolled,copy_user_generic_string, \
29919- copy_user_enhanced_fast_string
29920- CFI_ENDPROC
29921-ENDPROC(_copy_to_user)
29922-
29923-/* Standard copy_from_user with segment limit checking */
29924-ENTRY(_copy_from_user)
29925- CFI_STARTPROC
29926- GET_THREAD_INFO(%rax)
29927- movq %rsi,%rcx
29928- addq %rdx,%rcx
29929- jc bad_from_user
29930- cmpq TI_addr_limit(%rax),%rcx
29931- ja bad_from_user
29932- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29933- copy_user_generic_unrolled,copy_user_generic_string, \
29934- copy_user_enhanced_fast_string
29935- CFI_ENDPROC
29936-ENDPROC(_copy_from_user)
29937-
29938- .section .fixup,"ax"
29939- /* must zero dest */
29940-ENTRY(bad_from_user)
29941-bad_from_user:
29942- CFI_STARTPROC
29943- movl %edx,%ecx
29944- xorl %eax,%eax
29945- rep
29946- stosb
29947-bad_to_user:
29948- movl %edx,%eax
29949- ret
29950- CFI_ENDPROC
29951-ENDPROC(bad_from_user)
29952- .previous
29953-
29954 /*
29955 * copy_user_generic_unrolled - memory copy with exception handling.
29956 * This version is for CPUs like P4 that don't have efficient micro
29957@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29958 */
29959 ENTRY(copy_user_generic_unrolled)
29960 CFI_STARTPROC
29961+ ASM_PAX_OPEN_USERLAND
29962 ASM_STAC
29963 cmpl $8,%edx
29964 jb 20f /* less then 8 bytes, go to byte copy loop */
29965@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29966 jnz 21b
29967 23: xor %eax,%eax
29968 ASM_CLAC
29969+ ASM_PAX_CLOSE_USERLAND
29970+ pax_force_retaddr
29971 ret
29972
29973 .section .fixup,"ax"
29974@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29975 */
29976 ENTRY(copy_user_generic_string)
29977 CFI_STARTPROC
29978+ ASM_PAX_OPEN_USERLAND
29979 ASM_STAC
29980 cmpl $8,%edx
29981 jb 2f /* less than 8 bytes, go to byte copy loop */
29982@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
29983 movsb
29984 xorl %eax,%eax
29985 ASM_CLAC
29986+ ASM_PAX_CLOSE_USERLAND
29987+ pax_force_retaddr
29988 ret
29989
29990 .section .fixup,"ax"
29991@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
29992 */
29993 ENTRY(copy_user_enhanced_fast_string)
29994 CFI_STARTPROC
29995+ ASM_PAX_OPEN_USERLAND
29996 ASM_STAC
29997 movl %edx,%ecx
29998 1: rep
29999 movsb
30000 xorl %eax,%eax
30001 ASM_CLAC
30002+ ASM_PAX_CLOSE_USERLAND
30003+ pax_force_retaddr
30004 ret
30005
30006 .section .fixup,"ax"
30007diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30008index 6a4f43c..c70fb52 100644
30009--- a/arch/x86/lib/copy_user_nocache_64.S
30010+++ b/arch/x86/lib/copy_user_nocache_64.S
30011@@ -8,6 +8,7 @@
30012
30013 #include <linux/linkage.h>
30014 #include <asm/dwarf2.h>
30015+#include <asm/alternative-asm.h>
30016
30017 #define FIX_ALIGNMENT 1
30018
30019@@ -16,6 +17,7 @@
30020 #include <asm/thread_info.h>
30021 #include <asm/asm.h>
30022 #include <asm/smap.h>
30023+#include <asm/pgtable.h>
30024
30025 .macro ALIGN_DESTINATION
30026 #ifdef FIX_ALIGNMENT
30027@@ -49,6 +51,16 @@
30028 */
30029 ENTRY(__copy_user_nocache)
30030 CFI_STARTPROC
30031+
30032+#ifdef CONFIG_PAX_MEMORY_UDEREF
30033+ mov pax_user_shadow_base,%rcx
30034+ cmp %rcx,%rsi
30035+ jae 1f
30036+ add %rcx,%rsi
30037+1:
30038+#endif
30039+
30040+ ASM_PAX_OPEN_USERLAND
30041 ASM_STAC
30042 cmpl $8,%edx
30043 jb 20f /* less then 8 bytes, go to byte copy loop */
30044@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30045 jnz 21b
30046 23: xorl %eax,%eax
30047 ASM_CLAC
30048+ ASM_PAX_CLOSE_USERLAND
30049 sfence
30050+ pax_force_retaddr
30051 ret
30052
30053 .section .fixup,"ax"
30054diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30055index 2419d5f..fe52d0e 100644
30056--- a/arch/x86/lib/csum-copy_64.S
30057+++ b/arch/x86/lib/csum-copy_64.S
30058@@ -9,6 +9,7 @@
30059 #include <asm/dwarf2.h>
30060 #include <asm/errno.h>
30061 #include <asm/asm.h>
30062+#include <asm/alternative-asm.h>
30063
30064 /*
30065 * Checksum copy with exception handling.
30066@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30067 CFI_ADJUST_CFA_OFFSET 7*8
30068 movq %rbx, 2*8(%rsp)
30069 CFI_REL_OFFSET rbx, 2*8
30070- movq %r12, 3*8(%rsp)
30071- CFI_REL_OFFSET r12, 3*8
30072+ movq %r15, 3*8(%rsp)
30073+ CFI_REL_OFFSET r15, 3*8
30074 movq %r14, 4*8(%rsp)
30075 CFI_REL_OFFSET r14, 4*8
30076 movq %r13, 5*8(%rsp)
30077@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30078 movl %edx, %ecx
30079
30080 xorl %r9d, %r9d
30081- movq %rcx, %r12
30082+ movq %rcx, %r15
30083
30084- shrq $6, %r12
30085+ shrq $6, %r15
30086 jz .Lhandle_tail /* < 64 */
30087
30088 clc
30089
30090 /* main loop. clear in 64 byte blocks */
30091 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30092- /* r11: temp3, rdx: temp4, r12 loopcnt */
30093+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30094 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30095 .p2align 4
30096 .Lloop:
30097@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30098 adcq %r14, %rax
30099 adcq %r13, %rax
30100
30101- decl %r12d
30102+ decl %r15d
30103
30104 dest
30105 movq %rbx, (%rsi)
30106@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30107 .Lende:
30108 movq 2*8(%rsp), %rbx
30109 CFI_RESTORE rbx
30110- movq 3*8(%rsp), %r12
30111- CFI_RESTORE r12
30112+ movq 3*8(%rsp), %r15
30113+ CFI_RESTORE r15
30114 movq 4*8(%rsp), %r14
30115 CFI_RESTORE r14
30116 movq 5*8(%rsp), %r13
30117@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30118 CFI_RESTORE rbp
30119 addq $7*8, %rsp
30120 CFI_ADJUST_CFA_OFFSET -7*8
30121+ pax_force_retaddr
30122 ret
30123 CFI_RESTORE_STATE
30124
30125diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30126index 1318f75..44c30fd 100644
30127--- a/arch/x86/lib/csum-wrappers_64.c
30128+++ b/arch/x86/lib/csum-wrappers_64.c
30129@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30130 len -= 2;
30131 }
30132 }
30133+ pax_open_userland();
30134 stac();
30135- isum = csum_partial_copy_generic((__force const void *)src,
30136+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30137 dst, len, isum, errp, NULL);
30138 clac();
30139+ pax_close_userland();
30140 if (unlikely(*errp))
30141 goto out_err;
30142
30143@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30144 }
30145
30146 *errp = 0;
30147+ pax_open_userland();
30148 stac();
30149- ret = csum_partial_copy_generic(src, (void __force *)dst,
30150+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30151 len, isum, NULL, errp);
30152 clac();
30153+ pax_close_userland();
30154 return ret;
30155 }
30156 EXPORT_SYMBOL(csum_partial_copy_to_user);
30157diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30158index a451235..a74bfa3 100644
30159--- a/arch/x86/lib/getuser.S
30160+++ b/arch/x86/lib/getuser.S
30161@@ -33,17 +33,40 @@
30162 #include <asm/thread_info.h>
30163 #include <asm/asm.h>
30164 #include <asm/smap.h>
30165+#include <asm/segment.h>
30166+#include <asm/pgtable.h>
30167+#include <asm/alternative-asm.h>
30168+
30169+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30170+#define __copyuser_seg gs;
30171+#else
30172+#define __copyuser_seg
30173+#endif
30174
30175 .text
30176 ENTRY(__get_user_1)
30177 CFI_STARTPROC
30178+
30179+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30180 GET_THREAD_INFO(%_ASM_DX)
30181 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30182 jae bad_get_user
30183+
30184+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30185+ mov pax_user_shadow_base,%_ASM_DX
30186+ cmp %_ASM_DX,%_ASM_AX
30187+ jae 1234f
30188+ add %_ASM_DX,%_ASM_AX
30189+1234:
30190+#endif
30191+
30192+#endif
30193+
30194 ASM_STAC
30195-1: movzbl (%_ASM_AX),%edx
30196+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30197 xor %eax,%eax
30198 ASM_CLAC
30199+ pax_force_retaddr
30200 ret
30201 CFI_ENDPROC
30202 ENDPROC(__get_user_1)
30203@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30204 ENTRY(__get_user_2)
30205 CFI_STARTPROC
30206 add $1,%_ASM_AX
30207+
30208+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30209 jc bad_get_user
30210 GET_THREAD_INFO(%_ASM_DX)
30211 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30212 jae bad_get_user
30213+
30214+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30215+ mov pax_user_shadow_base,%_ASM_DX
30216+ cmp %_ASM_DX,%_ASM_AX
30217+ jae 1234f
30218+ add %_ASM_DX,%_ASM_AX
30219+1234:
30220+#endif
30221+
30222+#endif
30223+
30224 ASM_STAC
30225-2: movzwl -1(%_ASM_AX),%edx
30226+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30227 xor %eax,%eax
30228 ASM_CLAC
30229+ pax_force_retaddr
30230 ret
30231 CFI_ENDPROC
30232 ENDPROC(__get_user_2)
30233@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30234 ENTRY(__get_user_4)
30235 CFI_STARTPROC
30236 add $3,%_ASM_AX
30237+
30238+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30239 jc bad_get_user
30240 GET_THREAD_INFO(%_ASM_DX)
30241 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30242 jae bad_get_user
30243+
30244+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30245+ mov pax_user_shadow_base,%_ASM_DX
30246+ cmp %_ASM_DX,%_ASM_AX
30247+ jae 1234f
30248+ add %_ASM_DX,%_ASM_AX
30249+1234:
30250+#endif
30251+
30252+#endif
30253+
30254 ASM_STAC
30255-3: movl -3(%_ASM_AX),%edx
30256+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30257 xor %eax,%eax
30258 ASM_CLAC
30259+ pax_force_retaddr
30260 ret
30261 CFI_ENDPROC
30262 ENDPROC(__get_user_4)
30263@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30264 GET_THREAD_INFO(%_ASM_DX)
30265 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30266 jae bad_get_user
30267+
30268+#ifdef CONFIG_PAX_MEMORY_UDEREF
30269+ mov pax_user_shadow_base,%_ASM_DX
30270+ cmp %_ASM_DX,%_ASM_AX
30271+ jae 1234f
30272+ add %_ASM_DX,%_ASM_AX
30273+1234:
30274+#endif
30275+
30276 ASM_STAC
30277 4: movq -7(%_ASM_AX),%rdx
30278 xor %eax,%eax
30279 ASM_CLAC
30280+ pax_force_retaddr
30281 ret
30282 #else
30283 add $7,%_ASM_AX
30284@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30285 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30286 jae bad_get_user_8
30287 ASM_STAC
30288-4: movl -7(%_ASM_AX),%edx
30289-5: movl -3(%_ASM_AX),%ecx
30290+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30291+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30292 xor %eax,%eax
30293 ASM_CLAC
30294+ pax_force_retaddr
30295 ret
30296 #endif
30297 CFI_ENDPROC
30298@@ -113,6 +175,7 @@ bad_get_user:
30299 xor %edx,%edx
30300 mov $(-EFAULT),%_ASM_AX
30301 ASM_CLAC
30302+ pax_force_retaddr
30303 ret
30304 CFI_ENDPROC
30305 END(bad_get_user)
30306@@ -124,6 +187,7 @@ bad_get_user_8:
30307 xor %ecx,%ecx
30308 mov $(-EFAULT),%_ASM_AX
30309 ASM_CLAC
30310+ pax_force_retaddr
30311 ret
30312 CFI_ENDPROC
30313 END(bad_get_user_8)
30314diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30315index 85994f5..9929d7f 100644
30316--- a/arch/x86/lib/insn.c
30317+++ b/arch/x86/lib/insn.c
30318@@ -20,8 +20,10 @@
30319
30320 #ifdef __KERNEL__
30321 #include <linux/string.h>
30322+#include <asm/pgtable_types.h>
30323 #else
30324 #include <string.h>
30325+#define ktla_ktva(addr) addr
30326 #endif
30327 #include <asm/inat.h>
30328 #include <asm/insn.h>
30329@@ -60,9 +62,9 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30330 buf_len = MAX_INSN_SIZE;
30331
30332 memset(insn, 0, sizeof(*insn));
30333- insn->kaddr = kaddr;
30334- insn->end_kaddr = kaddr + buf_len;
30335- insn->next_byte = kaddr;
30336+ insn->kaddr = ktla_ktva(kaddr);
30337+ insn->end_kaddr = insn->kaddr + buf_len;
30338+ insn->next_byte = insn->kaddr;
30339 insn->x86_64 = x86_64 ? 1 : 0;
30340 insn->opnd_bytes = 4;
30341 if (x86_64)
30342diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30343index 05a95e7..326f2fa 100644
30344--- a/arch/x86/lib/iomap_copy_64.S
30345+++ b/arch/x86/lib/iomap_copy_64.S
30346@@ -17,6 +17,7 @@
30347
30348 #include <linux/linkage.h>
30349 #include <asm/dwarf2.h>
30350+#include <asm/alternative-asm.h>
30351
30352 /*
30353 * override generic version in lib/iomap_copy.c
30354@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30355 CFI_STARTPROC
30356 movl %edx,%ecx
30357 rep movsd
30358+ pax_force_retaddr
30359 ret
30360 CFI_ENDPROC
30361 ENDPROC(__iowrite32_copy)
30362diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30363index 89b53c9..97357ca 100644
30364--- a/arch/x86/lib/memcpy_64.S
30365+++ b/arch/x86/lib/memcpy_64.S
30366@@ -24,7 +24,7 @@
30367 * This gets patched over the unrolled variant (below) via the
30368 * alternative instructions framework:
30369 */
30370- .section .altinstr_replacement, "ax", @progbits
30371+ .section .altinstr_replacement, "a", @progbits
30372 .Lmemcpy_c:
30373 movq %rdi, %rax
30374 movq %rdx, %rcx
30375@@ -33,6 +33,7 @@
30376 rep movsq
30377 movl %edx, %ecx
30378 rep movsb
30379+ pax_force_retaddr
30380 ret
30381 .Lmemcpy_e:
30382 .previous
30383@@ -44,11 +45,12 @@
30384 * This gets patched over the unrolled variant (below) via the
30385 * alternative instructions framework:
30386 */
30387- .section .altinstr_replacement, "ax", @progbits
30388+ .section .altinstr_replacement, "a", @progbits
30389 .Lmemcpy_c_e:
30390 movq %rdi, %rax
30391 movq %rdx, %rcx
30392 rep movsb
30393+ pax_force_retaddr
30394 ret
30395 .Lmemcpy_e_e:
30396 .previous
30397@@ -138,6 +140,7 @@ ENTRY(memcpy)
30398 movq %r9, 1*8(%rdi)
30399 movq %r10, -2*8(%rdi, %rdx)
30400 movq %r11, -1*8(%rdi, %rdx)
30401+ pax_force_retaddr
30402 retq
30403 .p2align 4
30404 .Lless_16bytes:
30405@@ -150,6 +153,7 @@ ENTRY(memcpy)
30406 movq -1*8(%rsi, %rdx), %r9
30407 movq %r8, 0*8(%rdi)
30408 movq %r9, -1*8(%rdi, %rdx)
30409+ pax_force_retaddr
30410 retq
30411 .p2align 4
30412 .Lless_8bytes:
30413@@ -163,6 +167,7 @@ ENTRY(memcpy)
30414 movl -4(%rsi, %rdx), %r8d
30415 movl %ecx, (%rdi)
30416 movl %r8d, -4(%rdi, %rdx)
30417+ pax_force_retaddr
30418 retq
30419 .p2align 4
30420 .Lless_3bytes:
30421@@ -181,6 +186,7 @@ ENTRY(memcpy)
30422 movb %cl, (%rdi)
30423
30424 .Lend:
30425+ pax_force_retaddr
30426 retq
30427 CFI_ENDPROC
30428 ENDPROC(memcpy)
30429diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30430index 9c4b530..830b77a 100644
30431--- a/arch/x86/lib/memmove_64.S
30432+++ b/arch/x86/lib/memmove_64.S
30433@@ -205,14 +205,16 @@ ENTRY(__memmove)
30434 movb (%rsi), %r11b
30435 movb %r11b, (%rdi)
30436 13:
30437+ pax_force_retaddr
30438 retq
30439 CFI_ENDPROC
30440
30441- .section .altinstr_replacement,"ax"
30442+ .section .altinstr_replacement,"a"
30443 .Lmemmove_begin_forward_efs:
30444 /* Forward moving data. */
30445 movq %rdx, %rcx
30446 rep movsb
30447+ pax_force_retaddr
30448 retq
30449 .Lmemmove_end_forward_efs:
30450 .previous
30451diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30452index 6f44935..fbf5f6d 100644
30453--- a/arch/x86/lib/memset_64.S
30454+++ b/arch/x86/lib/memset_64.S
30455@@ -16,7 +16,7 @@
30456 *
30457 * rax original destination
30458 */
30459- .section .altinstr_replacement, "ax", @progbits
30460+ .section .altinstr_replacement, "a", @progbits
30461 .Lmemset_c:
30462 movq %rdi,%r9
30463 movq %rdx,%rcx
30464@@ -30,6 +30,7 @@
30465 movl %edx,%ecx
30466 rep stosb
30467 movq %r9,%rax
30468+ pax_force_retaddr
30469 ret
30470 .Lmemset_e:
30471 .previous
30472@@ -45,13 +46,14 @@
30473 *
30474 * rax original destination
30475 */
30476- .section .altinstr_replacement, "ax", @progbits
30477+ .section .altinstr_replacement, "a", @progbits
30478 .Lmemset_c_e:
30479 movq %rdi,%r9
30480 movb %sil,%al
30481 movq %rdx,%rcx
30482 rep stosb
30483 movq %r9,%rax
30484+ pax_force_retaddr
30485 ret
30486 .Lmemset_e_e:
30487 .previous
30488@@ -120,6 +122,7 @@ ENTRY(__memset)
30489
30490 .Lende:
30491 movq %r10,%rax
30492+ pax_force_retaddr
30493 ret
30494
30495 CFI_RESTORE_STATE
30496diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30497index c9f2d9b..e7fd2c0 100644
30498--- a/arch/x86/lib/mmx_32.c
30499+++ b/arch/x86/lib/mmx_32.c
30500@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30501 {
30502 void *p;
30503 int i;
30504+ unsigned long cr0;
30505
30506 if (unlikely(in_interrupt()))
30507 return __memcpy(to, from, len);
30508@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30509 kernel_fpu_begin();
30510
30511 __asm__ __volatile__ (
30512- "1: prefetch (%0)\n" /* This set is 28 bytes */
30513- " prefetch 64(%0)\n"
30514- " prefetch 128(%0)\n"
30515- " prefetch 192(%0)\n"
30516- " prefetch 256(%0)\n"
30517+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30518+ " prefetch 64(%1)\n"
30519+ " prefetch 128(%1)\n"
30520+ " prefetch 192(%1)\n"
30521+ " prefetch 256(%1)\n"
30522 "2: \n"
30523 ".section .fixup, \"ax\"\n"
30524- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30525+ "3: \n"
30526+
30527+#ifdef CONFIG_PAX_KERNEXEC
30528+ " movl %%cr0, %0\n"
30529+ " movl %0, %%eax\n"
30530+ " andl $0xFFFEFFFF, %%eax\n"
30531+ " movl %%eax, %%cr0\n"
30532+#endif
30533+
30534+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30535+
30536+#ifdef CONFIG_PAX_KERNEXEC
30537+ " movl %0, %%cr0\n"
30538+#endif
30539+
30540 " jmp 2b\n"
30541 ".previous\n"
30542 _ASM_EXTABLE(1b, 3b)
30543- : : "r" (from));
30544+ : "=&r" (cr0) : "r" (from) : "ax");
30545
30546 for ( ; i > 5; i--) {
30547 __asm__ __volatile__ (
30548- "1: prefetch 320(%0)\n"
30549- "2: movq (%0), %%mm0\n"
30550- " movq 8(%0), %%mm1\n"
30551- " movq 16(%0), %%mm2\n"
30552- " movq 24(%0), %%mm3\n"
30553- " movq %%mm0, (%1)\n"
30554- " movq %%mm1, 8(%1)\n"
30555- " movq %%mm2, 16(%1)\n"
30556- " movq %%mm3, 24(%1)\n"
30557- " movq 32(%0), %%mm0\n"
30558- " movq 40(%0), %%mm1\n"
30559- " movq 48(%0), %%mm2\n"
30560- " movq 56(%0), %%mm3\n"
30561- " movq %%mm0, 32(%1)\n"
30562- " movq %%mm1, 40(%1)\n"
30563- " movq %%mm2, 48(%1)\n"
30564- " movq %%mm3, 56(%1)\n"
30565+ "1: prefetch 320(%1)\n"
30566+ "2: movq (%1), %%mm0\n"
30567+ " movq 8(%1), %%mm1\n"
30568+ " movq 16(%1), %%mm2\n"
30569+ " movq 24(%1), %%mm3\n"
30570+ " movq %%mm0, (%2)\n"
30571+ " movq %%mm1, 8(%2)\n"
30572+ " movq %%mm2, 16(%2)\n"
30573+ " movq %%mm3, 24(%2)\n"
30574+ " movq 32(%1), %%mm0\n"
30575+ " movq 40(%1), %%mm1\n"
30576+ " movq 48(%1), %%mm2\n"
30577+ " movq 56(%1), %%mm3\n"
30578+ " movq %%mm0, 32(%2)\n"
30579+ " movq %%mm1, 40(%2)\n"
30580+ " movq %%mm2, 48(%2)\n"
30581+ " movq %%mm3, 56(%2)\n"
30582 ".section .fixup, \"ax\"\n"
30583- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30584+ "3:\n"
30585+
30586+#ifdef CONFIG_PAX_KERNEXEC
30587+ " movl %%cr0, %0\n"
30588+ " movl %0, %%eax\n"
30589+ " andl $0xFFFEFFFF, %%eax\n"
30590+ " movl %%eax, %%cr0\n"
30591+#endif
30592+
30593+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30594+
30595+#ifdef CONFIG_PAX_KERNEXEC
30596+ " movl %0, %%cr0\n"
30597+#endif
30598+
30599 " jmp 2b\n"
30600 ".previous\n"
30601 _ASM_EXTABLE(1b, 3b)
30602- : : "r" (from), "r" (to) : "memory");
30603+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30604
30605 from += 64;
30606 to += 64;
30607@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30608 static void fast_copy_page(void *to, void *from)
30609 {
30610 int i;
30611+ unsigned long cr0;
30612
30613 kernel_fpu_begin();
30614
30615@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30616 * but that is for later. -AV
30617 */
30618 __asm__ __volatile__(
30619- "1: prefetch (%0)\n"
30620- " prefetch 64(%0)\n"
30621- " prefetch 128(%0)\n"
30622- " prefetch 192(%0)\n"
30623- " prefetch 256(%0)\n"
30624+ "1: prefetch (%1)\n"
30625+ " prefetch 64(%1)\n"
30626+ " prefetch 128(%1)\n"
30627+ " prefetch 192(%1)\n"
30628+ " prefetch 256(%1)\n"
30629 "2: \n"
30630 ".section .fixup, \"ax\"\n"
30631- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30632+ "3: \n"
30633+
30634+#ifdef CONFIG_PAX_KERNEXEC
30635+ " movl %%cr0, %0\n"
30636+ " movl %0, %%eax\n"
30637+ " andl $0xFFFEFFFF, %%eax\n"
30638+ " movl %%eax, %%cr0\n"
30639+#endif
30640+
30641+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30642+
30643+#ifdef CONFIG_PAX_KERNEXEC
30644+ " movl %0, %%cr0\n"
30645+#endif
30646+
30647 " jmp 2b\n"
30648 ".previous\n"
30649- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30650+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30651
30652 for (i = 0; i < (4096-320)/64; i++) {
30653 __asm__ __volatile__ (
30654- "1: prefetch 320(%0)\n"
30655- "2: movq (%0), %%mm0\n"
30656- " movntq %%mm0, (%1)\n"
30657- " movq 8(%0), %%mm1\n"
30658- " movntq %%mm1, 8(%1)\n"
30659- " movq 16(%0), %%mm2\n"
30660- " movntq %%mm2, 16(%1)\n"
30661- " movq 24(%0), %%mm3\n"
30662- " movntq %%mm3, 24(%1)\n"
30663- " movq 32(%0), %%mm4\n"
30664- " movntq %%mm4, 32(%1)\n"
30665- " movq 40(%0), %%mm5\n"
30666- " movntq %%mm5, 40(%1)\n"
30667- " movq 48(%0), %%mm6\n"
30668- " movntq %%mm6, 48(%1)\n"
30669- " movq 56(%0), %%mm7\n"
30670- " movntq %%mm7, 56(%1)\n"
30671+ "1: prefetch 320(%1)\n"
30672+ "2: movq (%1), %%mm0\n"
30673+ " movntq %%mm0, (%2)\n"
30674+ " movq 8(%1), %%mm1\n"
30675+ " movntq %%mm1, 8(%2)\n"
30676+ " movq 16(%1), %%mm2\n"
30677+ " movntq %%mm2, 16(%2)\n"
30678+ " movq 24(%1), %%mm3\n"
30679+ " movntq %%mm3, 24(%2)\n"
30680+ " movq 32(%1), %%mm4\n"
30681+ " movntq %%mm4, 32(%2)\n"
30682+ " movq 40(%1), %%mm5\n"
30683+ " movntq %%mm5, 40(%2)\n"
30684+ " movq 48(%1), %%mm6\n"
30685+ " movntq %%mm6, 48(%2)\n"
30686+ " movq 56(%1), %%mm7\n"
30687+ " movntq %%mm7, 56(%2)\n"
30688 ".section .fixup, \"ax\"\n"
30689- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30690+ "3:\n"
30691+
30692+#ifdef CONFIG_PAX_KERNEXEC
30693+ " movl %%cr0, %0\n"
30694+ " movl %0, %%eax\n"
30695+ " andl $0xFFFEFFFF, %%eax\n"
30696+ " movl %%eax, %%cr0\n"
30697+#endif
30698+
30699+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30700+
30701+#ifdef CONFIG_PAX_KERNEXEC
30702+ " movl %0, %%cr0\n"
30703+#endif
30704+
30705 " jmp 2b\n"
30706 ".previous\n"
30707- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30708+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30709
30710 from += 64;
30711 to += 64;
30712@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30713 static void fast_copy_page(void *to, void *from)
30714 {
30715 int i;
30716+ unsigned long cr0;
30717
30718 kernel_fpu_begin();
30719
30720 __asm__ __volatile__ (
30721- "1: prefetch (%0)\n"
30722- " prefetch 64(%0)\n"
30723- " prefetch 128(%0)\n"
30724- " prefetch 192(%0)\n"
30725- " prefetch 256(%0)\n"
30726+ "1: prefetch (%1)\n"
30727+ " prefetch 64(%1)\n"
30728+ " prefetch 128(%1)\n"
30729+ " prefetch 192(%1)\n"
30730+ " prefetch 256(%1)\n"
30731 "2: \n"
30732 ".section .fixup, \"ax\"\n"
30733- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30734+ "3: \n"
30735+
30736+#ifdef CONFIG_PAX_KERNEXEC
30737+ " movl %%cr0, %0\n"
30738+ " movl %0, %%eax\n"
30739+ " andl $0xFFFEFFFF, %%eax\n"
30740+ " movl %%eax, %%cr0\n"
30741+#endif
30742+
30743+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30744+
30745+#ifdef CONFIG_PAX_KERNEXEC
30746+ " movl %0, %%cr0\n"
30747+#endif
30748+
30749 " jmp 2b\n"
30750 ".previous\n"
30751- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30752+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30753
30754 for (i = 0; i < 4096/64; i++) {
30755 __asm__ __volatile__ (
30756- "1: prefetch 320(%0)\n"
30757- "2: movq (%0), %%mm0\n"
30758- " movq 8(%0), %%mm1\n"
30759- " movq 16(%0), %%mm2\n"
30760- " movq 24(%0), %%mm3\n"
30761- " movq %%mm0, (%1)\n"
30762- " movq %%mm1, 8(%1)\n"
30763- " movq %%mm2, 16(%1)\n"
30764- " movq %%mm3, 24(%1)\n"
30765- " movq 32(%0), %%mm0\n"
30766- " movq 40(%0), %%mm1\n"
30767- " movq 48(%0), %%mm2\n"
30768- " movq 56(%0), %%mm3\n"
30769- " movq %%mm0, 32(%1)\n"
30770- " movq %%mm1, 40(%1)\n"
30771- " movq %%mm2, 48(%1)\n"
30772- " movq %%mm3, 56(%1)\n"
30773+ "1: prefetch 320(%1)\n"
30774+ "2: movq (%1), %%mm0\n"
30775+ " movq 8(%1), %%mm1\n"
30776+ " movq 16(%1), %%mm2\n"
30777+ " movq 24(%1), %%mm3\n"
30778+ " movq %%mm0, (%2)\n"
30779+ " movq %%mm1, 8(%2)\n"
30780+ " movq %%mm2, 16(%2)\n"
30781+ " movq %%mm3, 24(%2)\n"
30782+ " movq 32(%1), %%mm0\n"
30783+ " movq 40(%1), %%mm1\n"
30784+ " movq 48(%1), %%mm2\n"
30785+ " movq 56(%1), %%mm3\n"
30786+ " movq %%mm0, 32(%2)\n"
30787+ " movq %%mm1, 40(%2)\n"
30788+ " movq %%mm2, 48(%2)\n"
30789+ " movq %%mm3, 56(%2)\n"
30790 ".section .fixup, \"ax\"\n"
30791- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30792+ "3:\n"
30793+
30794+#ifdef CONFIG_PAX_KERNEXEC
30795+ " movl %%cr0, %0\n"
30796+ " movl %0, %%eax\n"
30797+ " andl $0xFFFEFFFF, %%eax\n"
30798+ " movl %%eax, %%cr0\n"
30799+#endif
30800+
30801+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30802+
30803+#ifdef CONFIG_PAX_KERNEXEC
30804+ " movl %0, %%cr0\n"
30805+#endif
30806+
30807 " jmp 2b\n"
30808 ".previous\n"
30809 _ASM_EXTABLE(1b, 3b)
30810- : : "r" (from), "r" (to) : "memory");
30811+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30812
30813 from += 64;
30814 to += 64;
30815diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30816index f6d13ee..d789440 100644
30817--- a/arch/x86/lib/msr-reg.S
30818+++ b/arch/x86/lib/msr-reg.S
30819@@ -3,6 +3,7 @@
30820 #include <asm/dwarf2.h>
30821 #include <asm/asm.h>
30822 #include <asm/msr.h>
30823+#include <asm/alternative-asm.h>
30824
30825 #ifdef CONFIG_X86_64
30826 /*
30827@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30828 movl %edi, 28(%r10)
30829 popq_cfi %rbp
30830 popq_cfi %rbx
30831+ pax_force_retaddr
30832 ret
30833 3:
30834 CFI_RESTORE_STATE
30835diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30836index fc6ba17..14ad9a5 100644
30837--- a/arch/x86/lib/putuser.S
30838+++ b/arch/x86/lib/putuser.S
30839@@ -16,7 +16,9 @@
30840 #include <asm/errno.h>
30841 #include <asm/asm.h>
30842 #include <asm/smap.h>
30843-
30844+#include <asm/segment.h>
30845+#include <asm/pgtable.h>
30846+#include <asm/alternative-asm.h>
30847
30848 /*
30849 * __put_user_X
30850@@ -30,57 +32,125 @@
30851 * as they get called from within inline assembly.
30852 */
30853
30854-#define ENTER CFI_STARTPROC ; \
30855- GET_THREAD_INFO(%_ASM_BX)
30856-#define EXIT ASM_CLAC ; \
30857- ret ; \
30858+#define ENTER CFI_STARTPROC
30859+#define EXIT ASM_CLAC ; \
30860+ pax_force_retaddr ; \
30861+ ret ; \
30862 CFI_ENDPROC
30863
30864+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30865+#define _DEST %_ASM_CX,%_ASM_BX
30866+#else
30867+#define _DEST %_ASM_CX
30868+#endif
30869+
30870+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30871+#define __copyuser_seg gs;
30872+#else
30873+#define __copyuser_seg
30874+#endif
30875+
30876 .text
30877 ENTRY(__put_user_1)
30878 ENTER
30879+
30880+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30881+ GET_THREAD_INFO(%_ASM_BX)
30882 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30883 jae bad_put_user
30884+
30885+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30886+ mov pax_user_shadow_base,%_ASM_BX
30887+ cmp %_ASM_BX,%_ASM_CX
30888+ jb 1234f
30889+ xor %ebx,%ebx
30890+1234:
30891+#endif
30892+
30893+#endif
30894+
30895 ASM_STAC
30896-1: movb %al,(%_ASM_CX)
30897+1: __copyuser_seg movb %al,(_DEST)
30898 xor %eax,%eax
30899 EXIT
30900 ENDPROC(__put_user_1)
30901
30902 ENTRY(__put_user_2)
30903 ENTER
30904+
30905+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30906+ GET_THREAD_INFO(%_ASM_BX)
30907 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30908 sub $1,%_ASM_BX
30909 cmp %_ASM_BX,%_ASM_CX
30910 jae bad_put_user
30911+
30912+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30913+ mov pax_user_shadow_base,%_ASM_BX
30914+ cmp %_ASM_BX,%_ASM_CX
30915+ jb 1234f
30916+ xor %ebx,%ebx
30917+1234:
30918+#endif
30919+
30920+#endif
30921+
30922 ASM_STAC
30923-2: movw %ax,(%_ASM_CX)
30924+2: __copyuser_seg movw %ax,(_DEST)
30925 xor %eax,%eax
30926 EXIT
30927 ENDPROC(__put_user_2)
30928
30929 ENTRY(__put_user_4)
30930 ENTER
30931+
30932+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30933+ GET_THREAD_INFO(%_ASM_BX)
30934 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30935 sub $3,%_ASM_BX
30936 cmp %_ASM_BX,%_ASM_CX
30937 jae bad_put_user
30938+
30939+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30940+ mov pax_user_shadow_base,%_ASM_BX
30941+ cmp %_ASM_BX,%_ASM_CX
30942+ jb 1234f
30943+ xor %ebx,%ebx
30944+1234:
30945+#endif
30946+
30947+#endif
30948+
30949 ASM_STAC
30950-3: movl %eax,(%_ASM_CX)
30951+3: __copyuser_seg movl %eax,(_DEST)
30952 xor %eax,%eax
30953 EXIT
30954 ENDPROC(__put_user_4)
30955
30956 ENTRY(__put_user_8)
30957 ENTER
30958+
30959+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30960+ GET_THREAD_INFO(%_ASM_BX)
30961 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30962 sub $7,%_ASM_BX
30963 cmp %_ASM_BX,%_ASM_CX
30964 jae bad_put_user
30965+
30966+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30967+ mov pax_user_shadow_base,%_ASM_BX
30968+ cmp %_ASM_BX,%_ASM_CX
30969+ jb 1234f
30970+ xor %ebx,%ebx
30971+1234:
30972+#endif
30973+
30974+#endif
30975+
30976 ASM_STAC
30977-4: mov %_ASM_AX,(%_ASM_CX)
30978+4: __copyuser_seg mov %_ASM_AX,(_DEST)
30979 #ifdef CONFIG_X86_32
30980-5: movl %edx,4(%_ASM_CX)
30981+5: __copyuser_seg movl %edx,4(_DEST)
30982 #endif
30983 xor %eax,%eax
30984 EXIT
30985diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
30986index 5dff5f0..cadebf4 100644
30987--- a/arch/x86/lib/rwsem.S
30988+++ b/arch/x86/lib/rwsem.S
30989@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
30990 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30991 CFI_RESTORE __ASM_REG(dx)
30992 restore_common_regs
30993+ pax_force_retaddr
30994 ret
30995 CFI_ENDPROC
30996 ENDPROC(call_rwsem_down_read_failed)
30997@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
30998 movq %rax,%rdi
30999 call rwsem_down_write_failed
31000 restore_common_regs
31001+ pax_force_retaddr
31002 ret
31003 CFI_ENDPROC
31004 ENDPROC(call_rwsem_down_write_failed)
31005@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31006 movq %rax,%rdi
31007 call rwsem_wake
31008 restore_common_regs
31009-1: ret
31010+1: pax_force_retaddr
31011+ ret
31012 CFI_ENDPROC
31013 ENDPROC(call_rwsem_wake)
31014
31015@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31016 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31017 CFI_RESTORE __ASM_REG(dx)
31018 restore_common_regs
31019+ pax_force_retaddr
31020 ret
31021 CFI_ENDPROC
31022 ENDPROC(call_rwsem_downgrade_wake)
31023diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31024index b30b5eb..2b57052 100644
31025--- a/arch/x86/lib/thunk_64.S
31026+++ b/arch/x86/lib/thunk_64.S
31027@@ -9,6 +9,7 @@
31028 #include <asm/dwarf2.h>
31029 #include <asm/calling.h>
31030 #include <asm/asm.h>
31031+#include <asm/alternative-asm.h>
31032
31033 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31034 .macro THUNK name, func, put_ret_addr_in_rdi=0
31035@@ -16,11 +17,11 @@
31036 \name:
31037 CFI_STARTPROC
31038
31039- /* this one pushes 9 elems, the next one would be %rIP */
31040- SAVE_ARGS
31041+ /* this one pushes 15+1 elems, the next one would be %rIP */
31042+ SAVE_ARGS 8
31043
31044 .if \put_ret_addr_in_rdi
31045- movq_cfi_restore 9*8, rdi
31046+ movq_cfi_restore RIP, rdi
31047 .endif
31048
31049 call \func
31050@@ -47,9 +48,10 @@
31051
31052 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31053 CFI_STARTPROC
31054- SAVE_ARGS
31055+ SAVE_ARGS 8
31056 restore:
31057- RESTORE_ARGS
31058+ RESTORE_ARGS 1,8
31059+ pax_force_retaddr
31060 ret
31061 CFI_ENDPROC
31062 _ASM_NOKPROBE(restore)
31063diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31064index e2f5e21..4b22130 100644
31065--- a/arch/x86/lib/usercopy_32.c
31066+++ b/arch/x86/lib/usercopy_32.c
31067@@ -42,11 +42,13 @@ do { \
31068 int __d0; \
31069 might_fault(); \
31070 __asm__ __volatile__( \
31071+ __COPYUSER_SET_ES \
31072 ASM_STAC "\n" \
31073 "0: rep; stosl\n" \
31074 " movl %2,%0\n" \
31075 "1: rep; stosb\n" \
31076 "2: " ASM_CLAC "\n" \
31077+ __COPYUSER_RESTORE_ES \
31078 ".section .fixup,\"ax\"\n" \
31079 "3: lea 0(%2,%0,4),%0\n" \
31080 " jmp 2b\n" \
31081@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31082
31083 #ifdef CONFIG_X86_INTEL_USERCOPY
31084 static unsigned long
31085-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31086+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31087 {
31088 int d0, d1;
31089 __asm__ __volatile__(
31090@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31091 " .align 2,0x90\n"
31092 "3: movl 0(%4), %%eax\n"
31093 "4: movl 4(%4), %%edx\n"
31094- "5: movl %%eax, 0(%3)\n"
31095- "6: movl %%edx, 4(%3)\n"
31096+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31097+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31098 "7: movl 8(%4), %%eax\n"
31099 "8: movl 12(%4),%%edx\n"
31100- "9: movl %%eax, 8(%3)\n"
31101- "10: movl %%edx, 12(%3)\n"
31102+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31103+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31104 "11: movl 16(%4), %%eax\n"
31105 "12: movl 20(%4), %%edx\n"
31106- "13: movl %%eax, 16(%3)\n"
31107- "14: movl %%edx, 20(%3)\n"
31108+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31109+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31110 "15: movl 24(%4), %%eax\n"
31111 "16: movl 28(%4), %%edx\n"
31112- "17: movl %%eax, 24(%3)\n"
31113- "18: movl %%edx, 28(%3)\n"
31114+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31115+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31116 "19: movl 32(%4), %%eax\n"
31117 "20: movl 36(%4), %%edx\n"
31118- "21: movl %%eax, 32(%3)\n"
31119- "22: movl %%edx, 36(%3)\n"
31120+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31121+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31122 "23: movl 40(%4), %%eax\n"
31123 "24: movl 44(%4), %%edx\n"
31124- "25: movl %%eax, 40(%3)\n"
31125- "26: movl %%edx, 44(%3)\n"
31126+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31127+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31128 "27: movl 48(%4), %%eax\n"
31129 "28: movl 52(%4), %%edx\n"
31130- "29: movl %%eax, 48(%3)\n"
31131- "30: movl %%edx, 52(%3)\n"
31132+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31133+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31134 "31: movl 56(%4), %%eax\n"
31135 "32: movl 60(%4), %%edx\n"
31136- "33: movl %%eax, 56(%3)\n"
31137- "34: movl %%edx, 60(%3)\n"
31138+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31139+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31140 " addl $-64, %0\n"
31141 " addl $64, %4\n"
31142 " addl $64, %3\n"
31143@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31144 " shrl $2, %0\n"
31145 " andl $3, %%eax\n"
31146 " cld\n"
31147+ __COPYUSER_SET_ES
31148 "99: rep; movsl\n"
31149 "36: movl %%eax, %0\n"
31150 "37: rep; movsb\n"
31151 "100:\n"
31152+ __COPYUSER_RESTORE_ES
31153+ ".section .fixup,\"ax\"\n"
31154+ "101: lea 0(%%eax,%0,4),%0\n"
31155+ " jmp 100b\n"
31156+ ".previous\n"
31157+ _ASM_EXTABLE(1b,100b)
31158+ _ASM_EXTABLE(2b,100b)
31159+ _ASM_EXTABLE(3b,100b)
31160+ _ASM_EXTABLE(4b,100b)
31161+ _ASM_EXTABLE(5b,100b)
31162+ _ASM_EXTABLE(6b,100b)
31163+ _ASM_EXTABLE(7b,100b)
31164+ _ASM_EXTABLE(8b,100b)
31165+ _ASM_EXTABLE(9b,100b)
31166+ _ASM_EXTABLE(10b,100b)
31167+ _ASM_EXTABLE(11b,100b)
31168+ _ASM_EXTABLE(12b,100b)
31169+ _ASM_EXTABLE(13b,100b)
31170+ _ASM_EXTABLE(14b,100b)
31171+ _ASM_EXTABLE(15b,100b)
31172+ _ASM_EXTABLE(16b,100b)
31173+ _ASM_EXTABLE(17b,100b)
31174+ _ASM_EXTABLE(18b,100b)
31175+ _ASM_EXTABLE(19b,100b)
31176+ _ASM_EXTABLE(20b,100b)
31177+ _ASM_EXTABLE(21b,100b)
31178+ _ASM_EXTABLE(22b,100b)
31179+ _ASM_EXTABLE(23b,100b)
31180+ _ASM_EXTABLE(24b,100b)
31181+ _ASM_EXTABLE(25b,100b)
31182+ _ASM_EXTABLE(26b,100b)
31183+ _ASM_EXTABLE(27b,100b)
31184+ _ASM_EXTABLE(28b,100b)
31185+ _ASM_EXTABLE(29b,100b)
31186+ _ASM_EXTABLE(30b,100b)
31187+ _ASM_EXTABLE(31b,100b)
31188+ _ASM_EXTABLE(32b,100b)
31189+ _ASM_EXTABLE(33b,100b)
31190+ _ASM_EXTABLE(34b,100b)
31191+ _ASM_EXTABLE(35b,100b)
31192+ _ASM_EXTABLE(36b,100b)
31193+ _ASM_EXTABLE(37b,100b)
31194+ _ASM_EXTABLE(99b,101b)
31195+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31196+ : "1"(to), "2"(from), "0"(size)
31197+ : "eax", "edx", "memory");
31198+ return size;
31199+}
31200+
31201+static unsigned long
31202+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31203+{
31204+ int d0, d1;
31205+ __asm__ __volatile__(
31206+ " .align 2,0x90\n"
31207+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31208+ " cmpl $67, %0\n"
31209+ " jbe 3f\n"
31210+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31211+ " .align 2,0x90\n"
31212+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31213+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31214+ "5: movl %%eax, 0(%3)\n"
31215+ "6: movl %%edx, 4(%3)\n"
31216+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31217+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31218+ "9: movl %%eax, 8(%3)\n"
31219+ "10: movl %%edx, 12(%3)\n"
31220+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31221+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31222+ "13: movl %%eax, 16(%3)\n"
31223+ "14: movl %%edx, 20(%3)\n"
31224+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31225+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31226+ "17: movl %%eax, 24(%3)\n"
31227+ "18: movl %%edx, 28(%3)\n"
31228+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31229+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31230+ "21: movl %%eax, 32(%3)\n"
31231+ "22: movl %%edx, 36(%3)\n"
31232+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31233+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31234+ "25: movl %%eax, 40(%3)\n"
31235+ "26: movl %%edx, 44(%3)\n"
31236+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31237+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31238+ "29: movl %%eax, 48(%3)\n"
31239+ "30: movl %%edx, 52(%3)\n"
31240+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31241+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31242+ "33: movl %%eax, 56(%3)\n"
31243+ "34: movl %%edx, 60(%3)\n"
31244+ " addl $-64, %0\n"
31245+ " addl $64, %4\n"
31246+ " addl $64, %3\n"
31247+ " cmpl $63, %0\n"
31248+ " ja 1b\n"
31249+ "35: movl %0, %%eax\n"
31250+ " shrl $2, %0\n"
31251+ " andl $3, %%eax\n"
31252+ " cld\n"
31253+ "99: rep; "__copyuser_seg" movsl\n"
31254+ "36: movl %%eax, %0\n"
31255+ "37: rep; "__copyuser_seg" movsb\n"
31256+ "100:\n"
31257 ".section .fixup,\"ax\"\n"
31258 "101: lea 0(%%eax,%0,4),%0\n"
31259 " jmp 100b\n"
31260@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31261 int d0, d1;
31262 __asm__ __volatile__(
31263 " .align 2,0x90\n"
31264- "0: movl 32(%4), %%eax\n"
31265+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31266 " cmpl $67, %0\n"
31267 " jbe 2f\n"
31268- "1: movl 64(%4), %%eax\n"
31269+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31270 " .align 2,0x90\n"
31271- "2: movl 0(%4), %%eax\n"
31272- "21: movl 4(%4), %%edx\n"
31273+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31274+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31275 " movl %%eax, 0(%3)\n"
31276 " movl %%edx, 4(%3)\n"
31277- "3: movl 8(%4), %%eax\n"
31278- "31: movl 12(%4),%%edx\n"
31279+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31280+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31281 " movl %%eax, 8(%3)\n"
31282 " movl %%edx, 12(%3)\n"
31283- "4: movl 16(%4), %%eax\n"
31284- "41: movl 20(%4), %%edx\n"
31285+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31286+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31287 " movl %%eax, 16(%3)\n"
31288 " movl %%edx, 20(%3)\n"
31289- "10: movl 24(%4), %%eax\n"
31290- "51: movl 28(%4), %%edx\n"
31291+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31292+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31293 " movl %%eax, 24(%3)\n"
31294 " movl %%edx, 28(%3)\n"
31295- "11: movl 32(%4), %%eax\n"
31296- "61: movl 36(%4), %%edx\n"
31297+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31298+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31299 " movl %%eax, 32(%3)\n"
31300 " movl %%edx, 36(%3)\n"
31301- "12: movl 40(%4), %%eax\n"
31302- "71: movl 44(%4), %%edx\n"
31303+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31304+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31305 " movl %%eax, 40(%3)\n"
31306 " movl %%edx, 44(%3)\n"
31307- "13: movl 48(%4), %%eax\n"
31308- "81: movl 52(%4), %%edx\n"
31309+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31310+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31311 " movl %%eax, 48(%3)\n"
31312 " movl %%edx, 52(%3)\n"
31313- "14: movl 56(%4), %%eax\n"
31314- "91: movl 60(%4), %%edx\n"
31315+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31316+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31317 " movl %%eax, 56(%3)\n"
31318 " movl %%edx, 60(%3)\n"
31319 " addl $-64, %0\n"
31320@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31321 " shrl $2, %0\n"
31322 " andl $3, %%eax\n"
31323 " cld\n"
31324- "6: rep; movsl\n"
31325+ "6: rep; "__copyuser_seg" movsl\n"
31326 " movl %%eax,%0\n"
31327- "7: rep; movsb\n"
31328+ "7: rep; "__copyuser_seg" movsb\n"
31329 "8:\n"
31330 ".section .fixup,\"ax\"\n"
31331 "9: lea 0(%%eax,%0,4),%0\n"
31332@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31333
31334 __asm__ __volatile__(
31335 " .align 2,0x90\n"
31336- "0: movl 32(%4), %%eax\n"
31337+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31338 " cmpl $67, %0\n"
31339 " jbe 2f\n"
31340- "1: movl 64(%4), %%eax\n"
31341+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31342 " .align 2,0x90\n"
31343- "2: movl 0(%4), %%eax\n"
31344- "21: movl 4(%4), %%edx\n"
31345+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31346+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31347 " movnti %%eax, 0(%3)\n"
31348 " movnti %%edx, 4(%3)\n"
31349- "3: movl 8(%4), %%eax\n"
31350- "31: movl 12(%4),%%edx\n"
31351+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31352+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31353 " movnti %%eax, 8(%3)\n"
31354 " movnti %%edx, 12(%3)\n"
31355- "4: movl 16(%4), %%eax\n"
31356- "41: movl 20(%4), %%edx\n"
31357+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31358+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31359 " movnti %%eax, 16(%3)\n"
31360 " movnti %%edx, 20(%3)\n"
31361- "10: movl 24(%4), %%eax\n"
31362- "51: movl 28(%4), %%edx\n"
31363+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31364+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31365 " movnti %%eax, 24(%3)\n"
31366 " movnti %%edx, 28(%3)\n"
31367- "11: movl 32(%4), %%eax\n"
31368- "61: movl 36(%4), %%edx\n"
31369+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31370+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31371 " movnti %%eax, 32(%3)\n"
31372 " movnti %%edx, 36(%3)\n"
31373- "12: movl 40(%4), %%eax\n"
31374- "71: movl 44(%4), %%edx\n"
31375+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31376+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31377 " movnti %%eax, 40(%3)\n"
31378 " movnti %%edx, 44(%3)\n"
31379- "13: movl 48(%4), %%eax\n"
31380- "81: movl 52(%4), %%edx\n"
31381+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31382+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31383 " movnti %%eax, 48(%3)\n"
31384 " movnti %%edx, 52(%3)\n"
31385- "14: movl 56(%4), %%eax\n"
31386- "91: movl 60(%4), %%edx\n"
31387+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31388+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31389 " movnti %%eax, 56(%3)\n"
31390 " movnti %%edx, 60(%3)\n"
31391 " addl $-64, %0\n"
31392@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31393 " shrl $2, %0\n"
31394 " andl $3, %%eax\n"
31395 " cld\n"
31396- "6: rep; movsl\n"
31397+ "6: rep; "__copyuser_seg" movsl\n"
31398 " movl %%eax,%0\n"
31399- "7: rep; movsb\n"
31400+ "7: rep; "__copyuser_seg" movsb\n"
31401 "8:\n"
31402 ".section .fixup,\"ax\"\n"
31403 "9: lea 0(%%eax,%0,4),%0\n"
31404@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31405
31406 __asm__ __volatile__(
31407 " .align 2,0x90\n"
31408- "0: movl 32(%4), %%eax\n"
31409+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31410 " cmpl $67, %0\n"
31411 " jbe 2f\n"
31412- "1: movl 64(%4), %%eax\n"
31413+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31414 " .align 2,0x90\n"
31415- "2: movl 0(%4), %%eax\n"
31416- "21: movl 4(%4), %%edx\n"
31417+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31418+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31419 " movnti %%eax, 0(%3)\n"
31420 " movnti %%edx, 4(%3)\n"
31421- "3: movl 8(%4), %%eax\n"
31422- "31: movl 12(%4),%%edx\n"
31423+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31424+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31425 " movnti %%eax, 8(%3)\n"
31426 " movnti %%edx, 12(%3)\n"
31427- "4: movl 16(%4), %%eax\n"
31428- "41: movl 20(%4), %%edx\n"
31429+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31430+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31431 " movnti %%eax, 16(%3)\n"
31432 " movnti %%edx, 20(%3)\n"
31433- "10: movl 24(%4), %%eax\n"
31434- "51: movl 28(%4), %%edx\n"
31435+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31436+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31437 " movnti %%eax, 24(%3)\n"
31438 " movnti %%edx, 28(%3)\n"
31439- "11: movl 32(%4), %%eax\n"
31440- "61: movl 36(%4), %%edx\n"
31441+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31442+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31443 " movnti %%eax, 32(%3)\n"
31444 " movnti %%edx, 36(%3)\n"
31445- "12: movl 40(%4), %%eax\n"
31446- "71: movl 44(%4), %%edx\n"
31447+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31448+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31449 " movnti %%eax, 40(%3)\n"
31450 " movnti %%edx, 44(%3)\n"
31451- "13: movl 48(%4), %%eax\n"
31452- "81: movl 52(%4), %%edx\n"
31453+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31454+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31455 " movnti %%eax, 48(%3)\n"
31456 " movnti %%edx, 52(%3)\n"
31457- "14: movl 56(%4), %%eax\n"
31458- "91: movl 60(%4), %%edx\n"
31459+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31460+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31461 " movnti %%eax, 56(%3)\n"
31462 " movnti %%edx, 60(%3)\n"
31463 " addl $-64, %0\n"
31464@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31465 " shrl $2, %0\n"
31466 " andl $3, %%eax\n"
31467 " cld\n"
31468- "6: rep; movsl\n"
31469+ "6: rep; "__copyuser_seg" movsl\n"
31470 " movl %%eax,%0\n"
31471- "7: rep; movsb\n"
31472+ "7: rep; "__copyuser_seg" movsb\n"
31473 "8:\n"
31474 ".section .fixup,\"ax\"\n"
31475 "9: lea 0(%%eax,%0,4),%0\n"
31476@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31477 */
31478 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31479 unsigned long size);
31480-unsigned long __copy_user_intel(void __user *to, const void *from,
31481+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31482+ unsigned long size);
31483+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31484 unsigned long size);
31485 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31486 const void __user *from, unsigned long size);
31487 #endif /* CONFIG_X86_INTEL_USERCOPY */
31488
31489 /* Generic arbitrary sized copy. */
31490-#define __copy_user(to, from, size) \
31491+#define __copy_user(to, from, size, prefix, set, restore) \
31492 do { \
31493 int __d0, __d1, __d2; \
31494 __asm__ __volatile__( \
31495+ set \
31496 " cmp $7,%0\n" \
31497 " jbe 1f\n" \
31498 " movl %1,%0\n" \
31499 " negl %0\n" \
31500 " andl $7,%0\n" \
31501 " subl %0,%3\n" \
31502- "4: rep; movsb\n" \
31503+ "4: rep; "prefix"movsb\n" \
31504 " movl %3,%0\n" \
31505 " shrl $2,%0\n" \
31506 " andl $3,%3\n" \
31507 " .align 2,0x90\n" \
31508- "0: rep; movsl\n" \
31509+ "0: rep; "prefix"movsl\n" \
31510 " movl %3,%0\n" \
31511- "1: rep; movsb\n" \
31512+ "1: rep; "prefix"movsb\n" \
31513 "2:\n" \
31514+ restore \
31515 ".section .fixup,\"ax\"\n" \
31516 "5: addl %3,%0\n" \
31517 " jmp 2b\n" \
31518@@ -538,14 +650,14 @@ do { \
31519 " negl %0\n" \
31520 " andl $7,%0\n" \
31521 " subl %0,%3\n" \
31522- "4: rep; movsb\n" \
31523+ "4: rep; "__copyuser_seg"movsb\n" \
31524 " movl %3,%0\n" \
31525 " shrl $2,%0\n" \
31526 " andl $3,%3\n" \
31527 " .align 2,0x90\n" \
31528- "0: rep; movsl\n" \
31529+ "0: rep; "__copyuser_seg"movsl\n" \
31530 " movl %3,%0\n" \
31531- "1: rep; movsb\n" \
31532+ "1: rep; "__copyuser_seg"movsb\n" \
31533 "2:\n" \
31534 ".section .fixup,\"ax\"\n" \
31535 "5: addl %3,%0\n" \
31536@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31537 {
31538 stac();
31539 if (movsl_is_ok(to, from, n))
31540- __copy_user(to, from, n);
31541+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31542 else
31543- n = __copy_user_intel(to, from, n);
31544+ n = __generic_copy_to_user_intel(to, from, n);
31545 clac();
31546 return n;
31547 }
31548@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31549 {
31550 stac();
31551 if (movsl_is_ok(to, from, n))
31552- __copy_user(to, from, n);
31553+ __copy_user(to, from, n, __copyuser_seg, "", "");
31554 else
31555- n = __copy_user_intel((void __user *)to,
31556- (const void *)from, n);
31557+ n = __generic_copy_from_user_intel(to, from, n);
31558 clac();
31559 return n;
31560 }
31561@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31562 if (n > 64 && cpu_has_xmm2)
31563 n = __copy_user_intel_nocache(to, from, n);
31564 else
31565- __copy_user(to, from, n);
31566+ __copy_user(to, from, n, __copyuser_seg, "", "");
31567 #else
31568- __copy_user(to, from, n);
31569+ __copy_user(to, from, n, __copyuser_seg, "", "");
31570 #endif
31571 clac();
31572 return n;
31573 }
31574 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31575
31576-/**
31577- * copy_to_user: - Copy a block of data into user space.
31578- * @to: Destination address, in user space.
31579- * @from: Source address, in kernel space.
31580- * @n: Number of bytes to copy.
31581- *
31582- * Context: User context only. This function may sleep.
31583- *
31584- * Copy data from kernel space to user space.
31585- *
31586- * Returns number of bytes that could not be copied.
31587- * On success, this will be zero.
31588- */
31589-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31590+#ifdef CONFIG_PAX_MEMORY_UDEREF
31591+void __set_fs(mm_segment_t x)
31592 {
31593- if (access_ok(VERIFY_WRITE, to, n))
31594- n = __copy_to_user(to, from, n);
31595- return n;
31596+ switch (x.seg) {
31597+ case 0:
31598+ loadsegment(gs, 0);
31599+ break;
31600+ case TASK_SIZE_MAX:
31601+ loadsegment(gs, __USER_DS);
31602+ break;
31603+ case -1UL:
31604+ loadsegment(gs, __KERNEL_DS);
31605+ break;
31606+ default:
31607+ BUG();
31608+ }
31609 }
31610-EXPORT_SYMBOL(_copy_to_user);
31611+EXPORT_SYMBOL(__set_fs);
31612
31613-/**
31614- * copy_from_user: - Copy a block of data from user space.
31615- * @to: Destination address, in kernel space.
31616- * @from: Source address, in user space.
31617- * @n: Number of bytes to copy.
31618- *
31619- * Context: User context only. This function may sleep.
31620- *
31621- * Copy data from user space to kernel space.
31622- *
31623- * Returns number of bytes that could not be copied.
31624- * On success, this will be zero.
31625- *
31626- * If some data could not be copied, this function will pad the copied
31627- * data to the requested size using zero bytes.
31628- */
31629-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31630+void set_fs(mm_segment_t x)
31631 {
31632- if (access_ok(VERIFY_READ, from, n))
31633- n = __copy_from_user(to, from, n);
31634- else
31635- memset(to, 0, n);
31636- return n;
31637+ current_thread_info()->addr_limit = x;
31638+ __set_fs(x);
31639 }
31640-EXPORT_SYMBOL(_copy_from_user);
31641+EXPORT_SYMBOL(set_fs);
31642+#endif
31643diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31644index 0a42327..7a82465 100644
31645--- a/arch/x86/lib/usercopy_64.c
31646+++ b/arch/x86/lib/usercopy_64.c
31647@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31648 might_fault();
31649 /* no memory constraint because it doesn't change any memory gcc knows
31650 about */
31651+ pax_open_userland();
31652 stac();
31653 asm volatile(
31654 " testq %[size8],%[size8]\n"
31655@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31656 _ASM_EXTABLE(0b,3b)
31657 _ASM_EXTABLE(1b,2b)
31658 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31659- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31660+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31661 [zero] "r" (0UL), [eight] "r" (8UL));
31662 clac();
31663+ pax_close_userland();
31664 return size;
31665 }
31666 EXPORT_SYMBOL(__clear_user);
31667@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31668 }
31669 EXPORT_SYMBOL(clear_user);
31670
31671-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31672+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31673 {
31674- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31675- return copy_user_generic((__force void *)to, (__force void *)from, len);
31676- }
31677- return len;
31678+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31679+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31680+ return len;
31681 }
31682 EXPORT_SYMBOL(copy_in_user);
31683
31684@@ -69,8 +70,10 @@ EXPORT_SYMBOL(copy_in_user);
31685 * it is not necessary to optimize tail handling.
31686 */
31687 __visible unsigned long
31688-copy_user_handle_tail(char *to, char *from, unsigned len)
31689+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len)
31690 {
31691+ clac();
31692+ pax_close_userland();
31693 for (; len; --len, to++) {
31694 char c;
31695
31696@@ -79,10 +82,9 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
31697 if (__put_user_nocheck(c, to, sizeof(char)))
31698 break;
31699 }
31700- clac();
31701
31702 /* If the destination is a kernel buffer, we always clear the end */
31703- if (!__addr_ok(to))
31704+ if (!__addr_ok(to) && (unsigned long)to >= TASK_SIZE_MAX + pax_user_shadow_base)
31705 memset(to, 0, len);
31706 return len;
31707 }
31708diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31709index c4cc740..60a7362 100644
31710--- a/arch/x86/mm/Makefile
31711+++ b/arch/x86/mm/Makefile
31712@@ -35,3 +35,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31713 obj-$(CONFIG_MEMTEST) += memtest.o
31714
31715 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31716+
31717+quote:="
31718+obj-$(CONFIG_X86_64) += uderef_64.o
31719+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31720diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31721index 903ec1e..c4166b2 100644
31722--- a/arch/x86/mm/extable.c
31723+++ b/arch/x86/mm/extable.c
31724@@ -6,12 +6,24 @@
31725 static inline unsigned long
31726 ex_insn_addr(const struct exception_table_entry *x)
31727 {
31728- return (unsigned long)&x->insn + x->insn;
31729+ unsigned long reloc = 0;
31730+
31731+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31732+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31733+#endif
31734+
31735+ return (unsigned long)&x->insn + x->insn + reloc;
31736 }
31737 static inline unsigned long
31738 ex_fixup_addr(const struct exception_table_entry *x)
31739 {
31740- return (unsigned long)&x->fixup + x->fixup;
31741+ unsigned long reloc = 0;
31742+
31743+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31744+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31745+#endif
31746+
31747+ return (unsigned long)&x->fixup + x->fixup + reloc;
31748 }
31749
31750 int fixup_exception(struct pt_regs *regs)
31751@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31752 unsigned long new_ip;
31753
31754 #ifdef CONFIG_PNPBIOS
31755- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31756+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31757 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31758 extern u32 pnp_bios_is_utter_crap;
31759 pnp_bios_is_utter_crap = 1;
31760@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31761 i += 4;
31762 p->fixup -= i;
31763 i += 4;
31764+
31765+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31766+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31767+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31768+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31769+#endif
31770+
31771 }
31772 }
31773
31774diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31775index ede025f..380466b 100644
31776--- a/arch/x86/mm/fault.c
31777+++ b/arch/x86/mm/fault.c
31778@@ -13,12 +13,19 @@
31779 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31780 #include <linux/prefetch.h> /* prefetchw */
31781 #include <linux/context_tracking.h> /* exception_enter(), ... */
31782+#include <linux/unistd.h>
31783+#include <linux/compiler.h>
31784
31785 #include <asm/traps.h> /* dotraplinkage, ... */
31786 #include <asm/pgalloc.h> /* pgd_*(), ... */
31787 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31788 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31789 #include <asm/vsyscall.h> /* emulate_vsyscall */
31790+#include <asm/tlbflush.h>
31791+
31792+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31793+#include <asm/stacktrace.h>
31794+#endif
31795
31796 #define CREATE_TRACE_POINTS
31797 #include <asm/trace/exceptions.h>
31798@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31799 int ret = 0;
31800
31801 /* kprobe_running() needs smp_processor_id() */
31802- if (kprobes_built_in() && !user_mode_vm(regs)) {
31803+ if (kprobes_built_in() && !user_mode(regs)) {
31804 preempt_disable();
31805 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31806 ret = 1;
31807@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31808 return !instr_lo || (instr_lo>>1) == 1;
31809 case 0x00:
31810 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31811- if (probe_kernel_address(instr, opcode))
31812+ if (user_mode(regs)) {
31813+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31814+ return 0;
31815+ } else if (probe_kernel_address(instr, opcode))
31816 return 0;
31817
31818 *prefetch = (instr_lo == 0xF) &&
31819@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31820 while (instr < max_instr) {
31821 unsigned char opcode;
31822
31823- if (probe_kernel_address(instr, opcode))
31824+ if (user_mode(regs)) {
31825+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31826+ break;
31827+ } else if (probe_kernel_address(instr, opcode))
31828 break;
31829
31830 instr++;
31831@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31832 force_sig_info(si_signo, &info, tsk);
31833 }
31834
31835+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31836+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31837+#endif
31838+
31839+#ifdef CONFIG_PAX_EMUTRAMP
31840+static int pax_handle_fetch_fault(struct pt_regs *regs);
31841+#endif
31842+
31843+#ifdef CONFIG_PAX_PAGEEXEC
31844+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31845+{
31846+ pgd_t *pgd;
31847+ pud_t *pud;
31848+ pmd_t *pmd;
31849+
31850+ pgd = pgd_offset(mm, address);
31851+ if (!pgd_present(*pgd))
31852+ return NULL;
31853+ pud = pud_offset(pgd, address);
31854+ if (!pud_present(*pud))
31855+ return NULL;
31856+ pmd = pmd_offset(pud, address);
31857+ if (!pmd_present(*pmd))
31858+ return NULL;
31859+ return pmd;
31860+}
31861+#endif
31862+
31863 DEFINE_SPINLOCK(pgd_lock);
31864 LIST_HEAD(pgd_list);
31865
31866@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31867 for (address = VMALLOC_START & PMD_MASK;
31868 address >= TASK_SIZE && address < FIXADDR_TOP;
31869 address += PMD_SIZE) {
31870+
31871+#ifdef CONFIG_PAX_PER_CPU_PGD
31872+ unsigned long cpu;
31873+#else
31874 struct page *page;
31875+#endif
31876
31877 spin_lock(&pgd_lock);
31878+
31879+#ifdef CONFIG_PAX_PER_CPU_PGD
31880+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31881+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31882+ pmd_t *ret;
31883+
31884+ ret = vmalloc_sync_one(pgd, address);
31885+ if (!ret)
31886+ break;
31887+ pgd = get_cpu_pgd(cpu, kernel);
31888+#else
31889 list_for_each_entry(page, &pgd_list, lru) {
31890+ pgd_t *pgd;
31891 spinlock_t *pgt_lock;
31892 pmd_t *ret;
31893
31894@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31895 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31896
31897 spin_lock(pgt_lock);
31898- ret = vmalloc_sync_one(page_address(page), address);
31899+ pgd = page_address(page);
31900+#endif
31901+
31902+ ret = vmalloc_sync_one(pgd, address);
31903+
31904+#ifndef CONFIG_PAX_PER_CPU_PGD
31905 spin_unlock(pgt_lock);
31906+#endif
31907
31908 if (!ret)
31909 break;
31910@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
31911 * an interrupt in the middle of a task switch..
31912 */
31913 pgd_paddr = read_cr3();
31914+
31915+#ifdef CONFIG_PAX_PER_CPU_PGD
31916+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31917+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31918+#endif
31919+
31920 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31921 if (!pmd_k)
31922 return -1;
31923@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
31924 * happen within a race in page table update. In the later
31925 * case just flush:
31926 */
31927- pgd = pgd_offset(current->active_mm, address);
31928+
31929 pgd_ref = pgd_offset_k(address);
31930 if (pgd_none(*pgd_ref))
31931 return -1;
31932
31933+#ifdef CONFIG_PAX_PER_CPU_PGD
31934+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31935+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31936+ if (pgd_none(*pgd)) {
31937+ set_pgd(pgd, *pgd_ref);
31938+ arch_flush_lazy_mmu_mode();
31939+ } else {
31940+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31941+ }
31942+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31943+#else
31944+ pgd = pgd_offset(current->active_mm, address);
31945+#endif
31946+
31947 if (pgd_none(*pgd)) {
31948 set_pgd(pgd, *pgd_ref);
31949 arch_flush_lazy_mmu_mode();
31950@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31951 static int is_errata100(struct pt_regs *regs, unsigned long address)
31952 {
31953 #ifdef CONFIG_X86_64
31954- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31955+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31956 return 1;
31957 #endif
31958 return 0;
31959@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
31960 }
31961
31962 static const char nx_warning[] = KERN_CRIT
31963-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
31964+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
31965 static const char smep_warning[] = KERN_CRIT
31966-"unable to execute userspace code (SMEP?) (uid: %d)\n";
31967+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
31968
31969 static void
31970 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31971@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31972 if (!oops_may_print())
31973 return;
31974
31975- if (error_code & PF_INSTR) {
31976+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
31977 unsigned int level;
31978 pgd_t *pgd;
31979 pte_t *pte;
31980@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31981 pte = lookup_address_in_pgd(pgd, address, &level);
31982
31983 if (pte && pte_present(*pte) && !pte_exec(*pte))
31984- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
31985+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31986 if (pte && pte_present(*pte) && pte_exec(*pte) &&
31987 (pgd_flags(*pgd) & _PAGE_USER) &&
31988 (__read_cr4() & X86_CR4_SMEP))
31989- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
31990+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31991 }
31992
31993+#ifdef CONFIG_PAX_KERNEXEC
31994+ if (init_mm.start_code <= address && address < init_mm.end_code) {
31995+ if (current->signal->curr_ip)
31996+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
31997+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
31998+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31999+ else
32000+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32001+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32002+ }
32003+#endif
32004+
32005 printk(KERN_ALERT "BUG: unable to handle kernel ");
32006 if (address < PAGE_SIZE)
32007 printk(KERN_CONT "NULL pointer dereference");
32008@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32009 return;
32010 }
32011 #endif
32012+
32013+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32014+ if (pax_is_fetch_fault(regs, error_code, address)) {
32015+
32016+#ifdef CONFIG_PAX_EMUTRAMP
32017+ switch (pax_handle_fetch_fault(regs)) {
32018+ case 2:
32019+ return;
32020+ }
32021+#endif
32022+
32023+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32024+ do_group_exit(SIGKILL);
32025+ }
32026+#endif
32027+
32028 /* Kernel addresses are always protection faults: */
32029 if (address >= TASK_SIZE)
32030 error_code |= PF_PROT;
32031@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32032 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32033 printk(KERN_ERR
32034 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32035- tsk->comm, tsk->pid, address);
32036+ tsk->comm, task_pid_nr(tsk), address);
32037 code = BUS_MCEERR_AR;
32038 }
32039 #endif
32040@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32041 return 1;
32042 }
32043
32044+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32045+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32046+{
32047+ pte_t *pte;
32048+ pmd_t *pmd;
32049+ spinlock_t *ptl;
32050+ unsigned char pte_mask;
32051+
32052+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32053+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32054+ return 0;
32055+
32056+ /* PaX: it's our fault, let's handle it if we can */
32057+
32058+ /* PaX: take a look at read faults before acquiring any locks */
32059+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32060+ /* instruction fetch attempt from a protected page in user mode */
32061+ up_read(&mm->mmap_sem);
32062+
32063+#ifdef CONFIG_PAX_EMUTRAMP
32064+ switch (pax_handle_fetch_fault(regs)) {
32065+ case 2:
32066+ return 1;
32067+ }
32068+#endif
32069+
32070+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32071+ do_group_exit(SIGKILL);
32072+ }
32073+
32074+ pmd = pax_get_pmd(mm, address);
32075+ if (unlikely(!pmd))
32076+ return 0;
32077+
32078+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32079+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32080+ pte_unmap_unlock(pte, ptl);
32081+ return 0;
32082+ }
32083+
32084+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32085+ /* write attempt to a protected page in user mode */
32086+ pte_unmap_unlock(pte, ptl);
32087+ return 0;
32088+ }
32089+
32090+#ifdef CONFIG_SMP
32091+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32092+#else
32093+ if (likely(address > get_limit(regs->cs)))
32094+#endif
32095+ {
32096+ set_pte(pte, pte_mkread(*pte));
32097+ __flush_tlb_one(address);
32098+ pte_unmap_unlock(pte, ptl);
32099+ up_read(&mm->mmap_sem);
32100+ return 1;
32101+ }
32102+
32103+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32104+
32105+ /*
32106+ * PaX: fill DTLB with user rights and retry
32107+ */
32108+ __asm__ __volatile__ (
32109+ "orb %2,(%1)\n"
32110+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32111+/*
32112+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32113+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32114+ * page fault when examined during a TLB load attempt. this is true not only
32115+ * for PTEs holding a non-present entry but also present entries that will
32116+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32117+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32118+ * for our target pages since their PTEs are simply not in the TLBs at all.
32119+
32120+ * the best thing in omitting it is that we gain around 15-20% speed in the
32121+ * fast path of the page fault handler and can get rid of tracing since we
32122+ * can no longer flush unintended entries.
32123+ */
32124+ "invlpg (%0)\n"
32125+#endif
32126+ __copyuser_seg"testb $0,(%0)\n"
32127+ "xorb %3,(%1)\n"
32128+ :
32129+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32130+ : "memory", "cc");
32131+ pte_unmap_unlock(pte, ptl);
32132+ up_read(&mm->mmap_sem);
32133+ return 1;
32134+}
32135+#endif
32136+
32137 /*
32138 * Handle a spurious fault caused by a stale TLB entry.
32139 *
32140@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32141 static inline int
32142 access_error(unsigned long error_code, struct vm_area_struct *vma)
32143 {
32144+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32145+ return 1;
32146+
32147 if (error_code & PF_WRITE) {
32148 /* write, present and write, not present: */
32149 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32150@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32151 if (error_code & PF_USER)
32152 return false;
32153
32154- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32155+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32156 return false;
32157
32158 return true;
32159@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32160 tsk = current;
32161 mm = tsk->mm;
32162
32163+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32164+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32165+ if (!search_exception_tables(regs->ip)) {
32166+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32167+ bad_area_nosemaphore(regs, error_code, address);
32168+ return;
32169+ }
32170+ if (address < pax_user_shadow_base) {
32171+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32172+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32173+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32174+ } else
32175+ address -= pax_user_shadow_base;
32176+ }
32177+#endif
32178+
32179 /*
32180 * Detect and handle instructions that would cause a page fault for
32181 * both a tracked kernel page and a userspace page.
32182@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32183 * User-mode registers count as a user access even for any
32184 * potential system fault or CPU buglet:
32185 */
32186- if (user_mode_vm(regs)) {
32187+ if (user_mode(regs)) {
32188 local_irq_enable();
32189 error_code |= PF_USER;
32190 flags |= FAULT_FLAG_USER;
32191@@ -1187,6 +1411,11 @@ retry:
32192 might_sleep();
32193 }
32194
32195+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32196+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32197+ return;
32198+#endif
32199+
32200 vma = find_vma(mm, address);
32201 if (unlikely(!vma)) {
32202 bad_area(regs, error_code, address);
32203@@ -1198,18 +1427,24 @@ retry:
32204 bad_area(regs, error_code, address);
32205 return;
32206 }
32207- if (error_code & PF_USER) {
32208- /*
32209- * Accessing the stack below %sp is always a bug.
32210- * The large cushion allows instructions like enter
32211- * and pusha to work. ("enter $65535, $31" pushes
32212- * 32 pointers and then decrements %sp by 65535.)
32213- */
32214- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32215- bad_area(regs, error_code, address);
32216- return;
32217- }
32218+ /*
32219+ * Accessing the stack below %sp is always a bug.
32220+ * The large cushion allows instructions like enter
32221+ * and pusha to work. ("enter $65535, $31" pushes
32222+ * 32 pointers and then decrements %sp by 65535.)
32223+ */
32224+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32225+ bad_area(regs, error_code, address);
32226+ return;
32227 }
32228+
32229+#ifdef CONFIG_PAX_SEGMEXEC
32230+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32231+ bad_area(regs, error_code, address);
32232+ return;
32233+ }
32234+#endif
32235+
32236 if (unlikely(expand_stack(vma, address))) {
32237 bad_area(regs, error_code, address);
32238 return;
32239@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32240 }
32241 NOKPROBE_SYMBOL(trace_do_page_fault);
32242 #endif /* CONFIG_TRACING */
32243+
32244+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32245+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32246+{
32247+ struct mm_struct *mm = current->mm;
32248+ unsigned long ip = regs->ip;
32249+
32250+ if (v8086_mode(regs))
32251+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32252+
32253+#ifdef CONFIG_PAX_PAGEEXEC
32254+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32255+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32256+ return true;
32257+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32258+ return true;
32259+ return false;
32260+ }
32261+#endif
32262+
32263+#ifdef CONFIG_PAX_SEGMEXEC
32264+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32265+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32266+ return true;
32267+ return false;
32268+ }
32269+#endif
32270+
32271+ return false;
32272+}
32273+#endif
32274+
32275+#ifdef CONFIG_PAX_EMUTRAMP
32276+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32277+{
32278+ int err;
32279+
32280+ do { /* PaX: libffi trampoline emulation */
32281+ unsigned char mov, jmp;
32282+ unsigned int addr1, addr2;
32283+
32284+#ifdef CONFIG_X86_64
32285+ if ((regs->ip + 9) >> 32)
32286+ break;
32287+#endif
32288+
32289+ err = get_user(mov, (unsigned char __user *)regs->ip);
32290+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32291+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32292+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32293+
32294+ if (err)
32295+ break;
32296+
32297+ if (mov == 0xB8 && jmp == 0xE9) {
32298+ regs->ax = addr1;
32299+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32300+ return 2;
32301+ }
32302+ } while (0);
32303+
32304+ do { /* PaX: gcc trampoline emulation #1 */
32305+ unsigned char mov1, mov2;
32306+ unsigned short jmp;
32307+ unsigned int addr1, addr2;
32308+
32309+#ifdef CONFIG_X86_64
32310+ if ((regs->ip + 11) >> 32)
32311+ break;
32312+#endif
32313+
32314+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32315+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32316+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32317+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32318+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32319+
32320+ if (err)
32321+ break;
32322+
32323+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32324+ regs->cx = addr1;
32325+ regs->ax = addr2;
32326+ regs->ip = addr2;
32327+ return 2;
32328+ }
32329+ } while (0);
32330+
32331+ do { /* PaX: gcc trampoline emulation #2 */
32332+ unsigned char mov, jmp;
32333+ unsigned int addr1, addr2;
32334+
32335+#ifdef CONFIG_X86_64
32336+ if ((regs->ip + 9) >> 32)
32337+ break;
32338+#endif
32339+
32340+ err = get_user(mov, (unsigned char __user *)regs->ip);
32341+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32342+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32343+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32344+
32345+ if (err)
32346+ break;
32347+
32348+ if (mov == 0xB9 && jmp == 0xE9) {
32349+ regs->cx = addr1;
32350+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32351+ return 2;
32352+ }
32353+ } while (0);
32354+
32355+ return 1; /* PaX in action */
32356+}
32357+
32358+#ifdef CONFIG_X86_64
32359+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32360+{
32361+ int err;
32362+
32363+ do { /* PaX: libffi trampoline emulation */
32364+ unsigned short mov1, mov2, jmp1;
32365+ unsigned char stcclc, jmp2;
32366+ unsigned long addr1, addr2;
32367+
32368+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32369+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32370+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32371+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32372+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32373+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32374+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32375+
32376+ if (err)
32377+ break;
32378+
32379+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32380+ regs->r11 = addr1;
32381+ regs->r10 = addr2;
32382+ if (stcclc == 0xF8)
32383+ regs->flags &= ~X86_EFLAGS_CF;
32384+ else
32385+ regs->flags |= X86_EFLAGS_CF;
32386+ regs->ip = addr1;
32387+ return 2;
32388+ }
32389+ } while (0);
32390+
32391+ do { /* PaX: gcc trampoline emulation #1 */
32392+ unsigned short mov1, mov2, jmp1;
32393+ unsigned char jmp2;
32394+ unsigned int addr1;
32395+ unsigned long addr2;
32396+
32397+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32398+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32399+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32400+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32401+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32402+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32403+
32404+ if (err)
32405+ break;
32406+
32407+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32408+ regs->r11 = addr1;
32409+ regs->r10 = addr2;
32410+ regs->ip = addr1;
32411+ return 2;
32412+ }
32413+ } while (0);
32414+
32415+ do { /* PaX: gcc trampoline emulation #2 */
32416+ unsigned short mov1, mov2, jmp1;
32417+ unsigned char jmp2;
32418+ unsigned long addr1, addr2;
32419+
32420+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32421+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32422+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32423+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32424+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32425+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32426+
32427+ if (err)
32428+ break;
32429+
32430+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32431+ regs->r11 = addr1;
32432+ regs->r10 = addr2;
32433+ regs->ip = addr1;
32434+ return 2;
32435+ }
32436+ } while (0);
32437+
32438+ return 1; /* PaX in action */
32439+}
32440+#endif
32441+
32442+/*
32443+ * PaX: decide what to do with offenders (regs->ip = fault address)
32444+ *
32445+ * returns 1 when task should be killed
32446+ * 2 when gcc trampoline was detected
32447+ */
32448+static int pax_handle_fetch_fault(struct pt_regs *regs)
32449+{
32450+ if (v8086_mode(regs))
32451+ return 1;
32452+
32453+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32454+ return 1;
32455+
32456+#ifdef CONFIG_X86_32
32457+ return pax_handle_fetch_fault_32(regs);
32458+#else
32459+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32460+ return pax_handle_fetch_fault_32(regs);
32461+ else
32462+ return pax_handle_fetch_fault_64(regs);
32463+#endif
32464+}
32465+#endif
32466+
32467+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32468+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32469+{
32470+ long i;
32471+
32472+ printk(KERN_ERR "PAX: bytes at PC: ");
32473+ for (i = 0; i < 20; i++) {
32474+ unsigned char c;
32475+ if (get_user(c, (unsigned char __force_user *)pc+i))
32476+ printk(KERN_CONT "?? ");
32477+ else
32478+ printk(KERN_CONT "%02x ", c);
32479+ }
32480+ printk("\n");
32481+
32482+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32483+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32484+ unsigned long c;
32485+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32486+#ifdef CONFIG_X86_32
32487+ printk(KERN_CONT "???????? ");
32488+#else
32489+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32490+ printk(KERN_CONT "???????? ???????? ");
32491+ else
32492+ printk(KERN_CONT "???????????????? ");
32493+#endif
32494+ } else {
32495+#ifdef CONFIG_X86_64
32496+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32497+ printk(KERN_CONT "%08x ", (unsigned int)c);
32498+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32499+ } else
32500+#endif
32501+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32502+ }
32503+ }
32504+ printk("\n");
32505+}
32506+#endif
32507+
32508+/**
32509+ * probe_kernel_write(): safely attempt to write to a location
32510+ * @dst: address to write to
32511+ * @src: pointer to the data that shall be written
32512+ * @size: size of the data chunk
32513+ *
32514+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32515+ * happens, handle that and return -EFAULT.
32516+ */
32517+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32518+{
32519+ long ret;
32520+ mm_segment_t old_fs = get_fs();
32521+
32522+ set_fs(KERNEL_DS);
32523+ pagefault_disable();
32524+ pax_open_kernel();
32525+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32526+ pax_close_kernel();
32527+ pagefault_enable();
32528+ set_fs(old_fs);
32529+
32530+ return ret ? -EFAULT : 0;
32531+}
32532diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32533index 81bf3d2..7ef25c2 100644
32534--- a/arch/x86/mm/gup.c
32535+++ b/arch/x86/mm/gup.c
32536@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32537 addr = start;
32538 len = (unsigned long) nr_pages << PAGE_SHIFT;
32539 end = start + len;
32540- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32541+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32542 (void __user *)start, len)))
32543 return 0;
32544
32545@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32546 goto slow_irqon;
32547 #endif
32548
32549+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32550+ (void __user *)start, len)))
32551+ return 0;
32552+
32553 /*
32554 * XXX: batch / limit 'nr', to avoid large irq off latency
32555 * needs some instrumenting to determine the common sizes used by
32556diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32557index 4500142..53a363c 100644
32558--- a/arch/x86/mm/highmem_32.c
32559+++ b/arch/x86/mm/highmem_32.c
32560@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32561 idx = type + KM_TYPE_NR*smp_processor_id();
32562 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32563 BUG_ON(!pte_none(*(kmap_pte-idx)));
32564+
32565+ pax_open_kernel();
32566 set_pte(kmap_pte-idx, mk_pte(page, prot));
32567+ pax_close_kernel();
32568+
32569 arch_flush_lazy_mmu_mode();
32570
32571 return (void *)vaddr;
32572diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32573index 42982b2..7168fc3 100644
32574--- a/arch/x86/mm/hugetlbpage.c
32575+++ b/arch/x86/mm/hugetlbpage.c
32576@@ -74,23 +74,24 @@ int pud_huge(pud_t pud)
32577 #ifdef CONFIG_HUGETLB_PAGE
32578 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32579 unsigned long addr, unsigned long len,
32580- unsigned long pgoff, unsigned long flags)
32581+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32582 {
32583 struct hstate *h = hstate_file(file);
32584 struct vm_unmapped_area_info info;
32585-
32586+
32587 info.flags = 0;
32588 info.length = len;
32589 info.low_limit = current->mm->mmap_legacy_base;
32590 info.high_limit = TASK_SIZE;
32591 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32592 info.align_offset = 0;
32593+ info.threadstack_offset = offset;
32594 return vm_unmapped_area(&info);
32595 }
32596
32597 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32598 unsigned long addr0, unsigned long len,
32599- unsigned long pgoff, unsigned long flags)
32600+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32601 {
32602 struct hstate *h = hstate_file(file);
32603 struct vm_unmapped_area_info info;
32604@@ -102,6 +103,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32605 info.high_limit = current->mm->mmap_base;
32606 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32607 info.align_offset = 0;
32608+ info.threadstack_offset = offset;
32609 addr = vm_unmapped_area(&info);
32610
32611 /*
32612@@ -114,6 +116,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32613 VM_BUG_ON(addr != -ENOMEM);
32614 info.flags = 0;
32615 info.low_limit = TASK_UNMAPPED_BASE;
32616+
32617+#ifdef CONFIG_PAX_RANDMMAP
32618+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32619+ info.low_limit += current->mm->delta_mmap;
32620+#endif
32621+
32622 info.high_limit = TASK_SIZE;
32623 addr = vm_unmapped_area(&info);
32624 }
32625@@ -128,10 +136,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32626 struct hstate *h = hstate_file(file);
32627 struct mm_struct *mm = current->mm;
32628 struct vm_area_struct *vma;
32629+ unsigned long pax_task_size = TASK_SIZE;
32630+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32631
32632 if (len & ~huge_page_mask(h))
32633 return -EINVAL;
32634- if (len > TASK_SIZE)
32635+
32636+#ifdef CONFIG_PAX_SEGMEXEC
32637+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32638+ pax_task_size = SEGMEXEC_TASK_SIZE;
32639+#endif
32640+
32641+ pax_task_size -= PAGE_SIZE;
32642+
32643+ if (len > pax_task_size)
32644 return -ENOMEM;
32645
32646 if (flags & MAP_FIXED) {
32647@@ -140,19 +158,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32648 return addr;
32649 }
32650
32651+#ifdef CONFIG_PAX_RANDMMAP
32652+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32653+#endif
32654+
32655 if (addr) {
32656 addr = ALIGN(addr, huge_page_size(h));
32657 vma = find_vma(mm, addr);
32658- if (TASK_SIZE - len >= addr &&
32659- (!vma || addr + len <= vma->vm_start))
32660+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32661 return addr;
32662 }
32663 if (mm->get_unmapped_area == arch_get_unmapped_area)
32664 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32665- pgoff, flags);
32666+ pgoff, flags, offset);
32667 else
32668 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32669- pgoff, flags);
32670+ pgoff, flags, offset);
32671 }
32672 #endif /* CONFIG_HUGETLB_PAGE */
32673
32674diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32675index a110efc..a31a18f 100644
32676--- a/arch/x86/mm/init.c
32677+++ b/arch/x86/mm/init.c
32678@@ -4,6 +4,7 @@
32679 #include <linux/swap.h>
32680 #include <linux/memblock.h>
32681 #include <linux/bootmem.h> /* for max_low_pfn */
32682+#include <linux/tboot.h>
32683
32684 #include <asm/cacheflush.h>
32685 #include <asm/e820.h>
32686@@ -17,6 +18,8 @@
32687 #include <asm/proto.h>
32688 #include <asm/dma.h> /* for MAX_DMA_PFN */
32689 #include <asm/microcode.h>
32690+#include <asm/desc.h>
32691+#include <asm/bios_ebda.h>
32692
32693 /*
32694 * We need to define the tracepoints somewhere, and tlb.c
32695@@ -620,7 +623,18 @@ void __init init_mem_mapping(void)
32696 early_ioremap_page_table_range_init();
32697 #endif
32698
32699+#ifdef CONFIG_PAX_PER_CPU_PGD
32700+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32701+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32702+ KERNEL_PGD_PTRS);
32703+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32704+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32705+ KERNEL_PGD_PTRS);
32706+ load_cr3(get_cpu_pgd(0, kernel));
32707+#else
32708 load_cr3(swapper_pg_dir);
32709+#endif
32710+
32711 __flush_tlb_all();
32712
32713 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32714@@ -636,10 +650,40 @@ void __init init_mem_mapping(void)
32715 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32716 * mmio resources as well as potential bios/acpi data regions.
32717 */
32718+
32719+#ifdef CONFIG_GRKERNSEC_KMEM
32720+static unsigned int ebda_start __read_only;
32721+static unsigned int ebda_end __read_only;
32722+#endif
32723+
32724 int devmem_is_allowed(unsigned long pagenr)
32725 {
32726- if (pagenr < 256)
32727+#ifdef CONFIG_GRKERNSEC_KMEM
32728+ /* allow BDA */
32729+ if (!pagenr)
32730 return 1;
32731+ /* allow EBDA */
32732+ if (pagenr >= ebda_start && pagenr < ebda_end)
32733+ return 1;
32734+ /* if tboot is in use, allow access to its hardcoded serial log range */
32735+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32736+ return 1;
32737+#else
32738+ if (!pagenr)
32739+ return 1;
32740+#ifdef CONFIG_VM86
32741+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32742+ return 1;
32743+#endif
32744+#endif
32745+
32746+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32747+ return 1;
32748+#ifdef CONFIG_GRKERNSEC_KMEM
32749+ /* throw out everything else below 1MB */
32750+ if (pagenr <= 256)
32751+ return 0;
32752+#endif
32753 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32754 return 0;
32755 if (!page_is_ram(pagenr))
32756@@ -685,8 +729,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32757 #endif
32758 }
32759
32760+#ifdef CONFIG_GRKERNSEC_KMEM
32761+static inline void gr_init_ebda(void)
32762+{
32763+ unsigned int ebda_addr;
32764+ unsigned int ebda_size = 0;
32765+
32766+ ebda_addr = get_bios_ebda();
32767+ if (ebda_addr) {
32768+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32769+ ebda_size <<= 10;
32770+ }
32771+ if (ebda_addr && ebda_size) {
32772+ ebda_start = ebda_addr >> PAGE_SHIFT;
32773+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32774+ } else {
32775+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32776+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32777+ }
32778+}
32779+#else
32780+static inline void gr_init_ebda(void) { }
32781+#endif
32782+
32783 void free_initmem(void)
32784 {
32785+#ifdef CONFIG_PAX_KERNEXEC
32786+#ifdef CONFIG_X86_32
32787+ /* PaX: limit KERNEL_CS to actual size */
32788+ unsigned long addr, limit;
32789+ struct desc_struct d;
32790+ int cpu;
32791+#else
32792+ pgd_t *pgd;
32793+ pud_t *pud;
32794+ pmd_t *pmd;
32795+ unsigned long addr, end;
32796+#endif
32797+#endif
32798+
32799+ gr_init_ebda();
32800+
32801+#ifdef CONFIG_PAX_KERNEXEC
32802+#ifdef CONFIG_X86_32
32803+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32804+ limit = (limit - 1UL) >> PAGE_SHIFT;
32805+
32806+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32807+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32808+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32809+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32810+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32811+ }
32812+
32813+ /* PaX: make KERNEL_CS read-only */
32814+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32815+ if (!paravirt_enabled())
32816+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32817+/*
32818+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32819+ pgd = pgd_offset_k(addr);
32820+ pud = pud_offset(pgd, addr);
32821+ pmd = pmd_offset(pud, addr);
32822+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32823+ }
32824+*/
32825+#ifdef CONFIG_X86_PAE
32826+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32827+/*
32828+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32829+ pgd = pgd_offset_k(addr);
32830+ pud = pud_offset(pgd, addr);
32831+ pmd = pmd_offset(pud, addr);
32832+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32833+ }
32834+*/
32835+#endif
32836+
32837+#ifdef CONFIG_MODULES
32838+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32839+#endif
32840+
32841+#else
32842+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32843+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32844+ pgd = pgd_offset_k(addr);
32845+ pud = pud_offset(pgd, addr);
32846+ pmd = pmd_offset(pud, addr);
32847+ if (!pmd_present(*pmd))
32848+ continue;
32849+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32850+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32851+ else
32852+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32853+ }
32854+
32855+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32856+ end = addr + KERNEL_IMAGE_SIZE;
32857+ for (; addr < end; addr += PMD_SIZE) {
32858+ pgd = pgd_offset_k(addr);
32859+ pud = pud_offset(pgd, addr);
32860+ pmd = pmd_offset(pud, addr);
32861+ if (!pmd_present(*pmd))
32862+ continue;
32863+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32864+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32865+ }
32866+#endif
32867+
32868+ flush_tlb_all();
32869+#endif
32870+
32871 free_init_pages("unused kernel",
32872 (unsigned long)(&__init_begin),
32873 (unsigned long)(&__init_end));
32874diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32875index c8140e1..59257fc 100644
32876--- a/arch/x86/mm/init_32.c
32877+++ b/arch/x86/mm/init_32.c
32878@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32879 bool __read_mostly __vmalloc_start_set = false;
32880
32881 /*
32882- * Creates a middle page table and puts a pointer to it in the
32883- * given global directory entry. This only returns the gd entry
32884- * in non-PAE compilation mode, since the middle layer is folded.
32885- */
32886-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32887-{
32888- pud_t *pud;
32889- pmd_t *pmd_table;
32890-
32891-#ifdef CONFIG_X86_PAE
32892- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32893- pmd_table = (pmd_t *)alloc_low_page();
32894- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32895- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32896- pud = pud_offset(pgd, 0);
32897- BUG_ON(pmd_table != pmd_offset(pud, 0));
32898-
32899- return pmd_table;
32900- }
32901-#endif
32902- pud = pud_offset(pgd, 0);
32903- pmd_table = pmd_offset(pud, 0);
32904-
32905- return pmd_table;
32906-}
32907-
32908-/*
32909 * Create a page table and place a pointer to it in a middle page
32910 * directory entry:
32911 */
32912@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32913 pte_t *page_table = (pte_t *)alloc_low_page();
32914
32915 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32916+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32917+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32918+#else
32919 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32920+#endif
32921 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32922 }
32923
32924 return pte_offset_kernel(pmd, 0);
32925 }
32926
32927+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32928+{
32929+ pud_t *pud;
32930+ pmd_t *pmd_table;
32931+
32932+ pud = pud_offset(pgd, 0);
32933+ pmd_table = pmd_offset(pud, 0);
32934+
32935+ return pmd_table;
32936+}
32937+
32938 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32939 {
32940 int pgd_idx = pgd_index(vaddr);
32941@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32942 int pgd_idx, pmd_idx;
32943 unsigned long vaddr;
32944 pgd_t *pgd;
32945+ pud_t *pud;
32946 pmd_t *pmd;
32947 pte_t *pte = NULL;
32948 unsigned long count = page_table_range_init_count(start, end);
32949@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32950 pgd = pgd_base + pgd_idx;
32951
32952 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32953- pmd = one_md_table_init(pgd);
32954- pmd = pmd + pmd_index(vaddr);
32955+ pud = pud_offset(pgd, vaddr);
32956+ pmd = pmd_offset(pud, vaddr);
32957+
32958+#ifdef CONFIG_X86_PAE
32959+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32960+#endif
32961+
32962 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
32963 pmd++, pmd_idx++) {
32964 pte = page_table_kmap_check(one_page_table_init(pmd),
32965@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32966 }
32967 }
32968
32969-static inline int is_kernel_text(unsigned long addr)
32970+static inline int is_kernel_text(unsigned long start, unsigned long end)
32971 {
32972- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
32973- return 1;
32974- return 0;
32975+ if ((start >= ktla_ktva((unsigned long)_etext) ||
32976+ end <= ktla_ktva((unsigned long)_stext)) &&
32977+ (start >= ktla_ktva((unsigned long)_einittext) ||
32978+ end <= ktla_ktva((unsigned long)_sinittext)) &&
32979+
32980+#ifdef CONFIG_ACPI_SLEEP
32981+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
32982+#endif
32983+
32984+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
32985+ return 0;
32986+ return 1;
32987 }
32988
32989 /*
32990@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
32991 unsigned long last_map_addr = end;
32992 unsigned long start_pfn, end_pfn;
32993 pgd_t *pgd_base = swapper_pg_dir;
32994- int pgd_idx, pmd_idx, pte_ofs;
32995+ unsigned int pgd_idx, pmd_idx, pte_ofs;
32996 unsigned long pfn;
32997 pgd_t *pgd;
32998+ pud_t *pud;
32999 pmd_t *pmd;
33000 pte_t *pte;
33001 unsigned pages_2m, pages_4k;
33002@@ -291,8 +295,13 @@ repeat:
33003 pfn = start_pfn;
33004 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33005 pgd = pgd_base + pgd_idx;
33006- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33007- pmd = one_md_table_init(pgd);
33008+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33009+ pud = pud_offset(pgd, 0);
33010+ pmd = pmd_offset(pud, 0);
33011+
33012+#ifdef CONFIG_X86_PAE
33013+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33014+#endif
33015
33016 if (pfn >= end_pfn)
33017 continue;
33018@@ -304,14 +313,13 @@ repeat:
33019 #endif
33020 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33021 pmd++, pmd_idx++) {
33022- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33023+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33024
33025 /*
33026 * Map with big pages if possible, otherwise
33027 * create normal page tables:
33028 */
33029 if (use_pse) {
33030- unsigned int addr2;
33031 pgprot_t prot = PAGE_KERNEL_LARGE;
33032 /*
33033 * first pass will use the same initial
33034@@ -322,11 +330,7 @@ repeat:
33035 _PAGE_PSE);
33036
33037 pfn &= PMD_MASK >> PAGE_SHIFT;
33038- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33039- PAGE_OFFSET + PAGE_SIZE-1;
33040-
33041- if (is_kernel_text(addr) ||
33042- is_kernel_text(addr2))
33043+ if (is_kernel_text(address, address + PMD_SIZE))
33044 prot = PAGE_KERNEL_LARGE_EXEC;
33045
33046 pages_2m++;
33047@@ -343,7 +347,7 @@ repeat:
33048 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33049 pte += pte_ofs;
33050 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33051- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33052+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33053 pgprot_t prot = PAGE_KERNEL;
33054 /*
33055 * first pass will use the same initial
33056@@ -351,7 +355,7 @@ repeat:
33057 */
33058 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33059
33060- if (is_kernel_text(addr))
33061+ if (is_kernel_text(address, address + PAGE_SIZE))
33062 prot = PAGE_KERNEL_EXEC;
33063
33064 pages_4k++;
33065@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33066
33067 pud = pud_offset(pgd, va);
33068 pmd = pmd_offset(pud, va);
33069- if (!pmd_present(*pmd))
33070+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33071 break;
33072
33073 /* should not be large page here */
33074@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33075
33076 static void __init pagetable_init(void)
33077 {
33078- pgd_t *pgd_base = swapper_pg_dir;
33079-
33080- permanent_kmaps_init(pgd_base);
33081+ permanent_kmaps_init(swapper_pg_dir);
33082 }
33083
33084-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33085+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33086 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33087
33088 /* user-defined highmem size */
33089@@ -787,10 +789,10 @@ void __init mem_init(void)
33090 ((unsigned long)&__init_end -
33091 (unsigned long)&__init_begin) >> 10,
33092
33093- (unsigned long)&_etext, (unsigned long)&_edata,
33094- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33095+ (unsigned long)&_sdata, (unsigned long)&_edata,
33096+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33097
33098- (unsigned long)&_text, (unsigned long)&_etext,
33099+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33100 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33101
33102 /*
33103@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33104 if (!kernel_set_to_readonly)
33105 return;
33106
33107+ start = ktla_ktva(start);
33108 pr_debug("Set kernel text: %lx - %lx for read write\n",
33109 start, start+size);
33110
33111@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33112 if (!kernel_set_to_readonly)
33113 return;
33114
33115+ start = ktla_ktva(start);
33116 pr_debug("Set kernel text: %lx - %lx for read only\n",
33117 start, start+size);
33118
33119@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33120 unsigned long start = PFN_ALIGN(_text);
33121 unsigned long size = PFN_ALIGN(_etext) - start;
33122
33123+ start = ktla_ktva(start);
33124 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33125 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33126 size >> 10);
33127diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33128index 30eb05a..ae671ac 100644
33129--- a/arch/x86/mm/init_64.c
33130+++ b/arch/x86/mm/init_64.c
33131@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33132 * around without checking the pgd every time.
33133 */
33134
33135-pteval_t __supported_pte_mask __read_mostly = ~0;
33136+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33137 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33138
33139 int force_personality32;
33140@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33141
33142 for (address = start; address <= end; address += PGDIR_SIZE) {
33143 const pgd_t *pgd_ref = pgd_offset_k(address);
33144+
33145+#ifdef CONFIG_PAX_PER_CPU_PGD
33146+ unsigned long cpu;
33147+#else
33148 struct page *page;
33149+#endif
33150
33151 /*
33152 * When it is called after memory hot remove, pgd_none()
33153@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33154 continue;
33155
33156 spin_lock(&pgd_lock);
33157+
33158+#ifdef CONFIG_PAX_PER_CPU_PGD
33159+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33160+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33161+
33162+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33163+ BUG_ON(pgd_page_vaddr(*pgd)
33164+ != pgd_page_vaddr(*pgd_ref));
33165+
33166+ if (removed) {
33167+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33168+ pgd_clear(pgd);
33169+ } else {
33170+ if (pgd_none(*pgd))
33171+ set_pgd(pgd, *pgd_ref);
33172+ }
33173+
33174+ pgd = pgd_offset_cpu(cpu, kernel, address);
33175+#else
33176 list_for_each_entry(page, &pgd_list, lru) {
33177 pgd_t *pgd;
33178 spinlock_t *pgt_lock;
33179@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33180 /* the pgt_lock only for Xen */
33181 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33182 spin_lock(pgt_lock);
33183+#endif
33184
33185 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33186 BUG_ON(pgd_page_vaddr(*pgd)
33187@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33188 set_pgd(pgd, *pgd_ref);
33189 }
33190
33191+#ifndef CONFIG_PAX_PER_CPU_PGD
33192 spin_unlock(pgt_lock);
33193+#endif
33194+
33195 }
33196 spin_unlock(&pgd_lock);
33197 }
33198@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33199 {
33200 if (pgd_none(*pgd)) {
33201 pud_t *pud = (pud_t *)spp_getpage();
33202- pgd_populate(&init_mm, pgd, pud);
33203+ pgd_populate_kernel(&init_mm, pgd, pud);
33204 if (pud != pud_offset(pgd, 0))
33205 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33206 pud, pud_offset(pgd, 0));
33207@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33208 {
33209 if (pud_none(*pud)) {
33210 pmd_t *pmd = (pmd_t *) spp_getpage();
33211- pud_populate(&init_mm, pud, pmd);
33212+ pud_populate_kernel(&init_mm, pud, pmd);
33213 if (pmd != pmd_offset(pud, 0))
33214 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33215 pmd, pmd_offset(pud, 0));
33216@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33217 pmd = fill_pmd(pud, vaddr);
33218 pte = fill_pte(pmd, vaddr);
33219
33220+ pax_open_kernel();
33221 set_pte(pte, new_pte);
33222+ pax_close_kernel();
33223
33224 /*
33225 * It's enough to flush this one mapping.
33226@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33227 pgd = pgd_offset_k((unsigned long)__va(phys));
33228 if (pgd_none(*pgd)) {
33229 pud = (pud_t *) spp_getpage();
33230- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33231- _PAGE_USER));
33232+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33233 }
33234 pud = pud_offset(pgd, (unsigned long)__va(phys));
33235 if (pud_none(*pud)) {
33236 pmd = (pmd_t *) spp_getpage();
33237- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33238- _PAGE_USER));
33239+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33240 }
33241 pmd = pmd_offset(pud, phys);
33242 BUG_ON(!pmd_none(*pmd));
33243@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33244 prot);
33245
33246 spin_lock(&init_mm.page_table_lock);
33247- pud_populate(&init_mm, pud, pmd);
33248+ pud_populate_kernel(&init_mm, pud, pmd);
33249 spin_unlock(&init_mm.page_table_lock);
33250 }
33251 __flush_tlb_all();
33252@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33253 page_size_mask);
33254
33255 spin_lock(&init_mm.page_table_lock);
33256- pgd_populate(&init_mm, pgd, pud);
33257+ pgd_populate_kernel(&init_mm, pgd, pud);
33258 spin_unlock(&init_mm.page_table_lock);
33259 pgd_changed = true;
33260 }
33261diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33262index 9ca35fc..4b2b7b7 100644
33263--- a/arch/x86/mm/iomap_32.c
33264+++ b/arch/x86/mm/iomap_32.c
33265@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33266 type = kmap_atomic_idx_push();
33267 idx = type + KM_TYPE_NR * smp_processor_id();
33268 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33269+
33270+ pax_open_kernel();
33271 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33272+ pax_close_kernel();
33273+
33274 arch_flush_lazy_mmu_mode();
33275
33276 return (void *)vaddr;
33277diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33278index fdf617c..b9e85bc 100644
33279--- a/arch/x86/mm/ioremap.c
33280+++ b/arch/x86/mm/ioremap.c
33281@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33282 unsigned long i;
33283
33284 for (i = 0; i < nr_pages; ++i)
33285- if (pfn_valid(start_pfn + i) &&
33286- !PageReserved(pfn_to_page(start_pfn + i)))
33287+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33288+ !PageReserved(pfn_to_page(start_pfn + i))))
33289 return 1;
33290
33291 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33292@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33293 *
33294 * Caller must ensure there is only one unmapping for the same pointer.
33295 */
33296-void iounmap(volatile void __iomem *addr)
33297+void iounmap(const volatile void __iomem *addr)
33298 {
33299 struct vm_struct *p, *o;
33300
33301@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33302 */
33303 void *xlate_dev_mem_ptr(phys_addr_t phys)
33304 {
33305- void *addr;
33306- unsigned long start = phys & PAGE_MASK;
33307-
33308 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33309- if (page_is_ram(start >> PAGE_SHIFT))
33310+ if (page_is_ram(phys >> PAGE_SHIFT))
33311+#ifdef CONFIG_HIGHMEM
33312+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33313+#endif
33314 return __va(phys);
33315
33316- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33317- if (addr)
33318- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33319-
33320- return addr;
33321+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33322 }
33323
33324 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33325 {
33326 if (page_is_ram(phys >> PAGE_SHIFT))
33327+#ifdef CONFIG_HIGHMEM
33328+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33329+#endif
33330 return;
33331
33332 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33333 return;
33334 }
33335
33336-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33337+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33338
33339 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33340 {
33341@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33342 early_ioremap_setup();
33343
33344 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33345- memset(bm_pte, 0, sizeof(bm_pte));
33346- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33347+ pmd_populate_user(&init_mm, pmd, bm_pte);
33348
33349 /*
33350 * The boot-ioremap range spans multiple pmds, for which
33351diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33352index b4f2e7e..96c9c3e 100644
33353--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33354+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33355@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33356 * memory (e.g. tracked pages)? For now, we need this to avoid
33357 * invoking kmemcheck for PnP BIOS calls.
33358 */
33359- if (regs->flags & X86_VM_MASK)
33360+ if (v8086_mode(regs))
33361 return false;
33362- if (regs->cs != __KERNEL_CS)
33363+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33364 return false;
33365
33366 pte = kmemcheck_pte_lookup(address);
33367diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33368index df4552b..12c129c 100644
33369--- a/arch/x86/mm/mmap.c
33370+++ b/arch/x86/mm/mmap.c
33371@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33372 * Leave an at least ~128 MB hole with possible stack randomization.
33373 */
33374 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33375-#define MAX_GAP (TASK_SIZE/6*5)
33376+#define MAX_GAP (pax_task_size/6*5)
33377
33378 static int mmap_is_legacy(void)
33379 {
33380@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33381 return rnd << PAGE_SHIFT;
33382 }
33383
33384-static unsigned long mmap_base(void)
33385+static unsigned long mmap_base(struct mm_struct *mm)
33386 {
33387 unsigned long gap = rlimit(RLIMIT_STACK);
33388+ unsigned long pax_task_size = TASK_SIZE;
33389+
33390+#ifdef CONFIG_PAX_SEGMEXEC
33391+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33392+ pax_task_size = SEGMEXEC_TASK_SIZE;
33393+#endif
33394
33395 if (gap < MIN_GAP)
33396 gap = MIN_GAP;
33397 else if (gap > MAX_GAP)
33398 gap = MAX_GAP;
33399
33400- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33401+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33402 }
33403
33404 /*
33405 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33406 * does, but not when emulating X86_32
33407 */
33408-static unsigned long mmap_legacy_base(void)
33409+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33410 {
33411- if (mmap_is_ia32())
33412+ if (mmap_is_ia32()) {
33413+
33414+#ifdef CONFIG_PAX_SEGMEXEC
33415+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33416+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33417+ else
33418+#endif
33419+
33420 return TASK_UNMAPPED_BASE;
33421- else
33422+ } else
33423 return TASK_UNMAPPED_BASE + mmap_rnd();
33424 }
33425
33426@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33427 */
33428 void arch_pick_mmap_layout(struct mm_struct *mm)
33429 {
33430- mm->mmap_legacy_base = mmap_legacy_base();
33431- mm->mmap_base = mmap_base();
33432+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33433+ mm->mmap_base = mmap_base(mm);
33434+
33435+#ifdef CONFIG_PAX_RANDMMAP
33436+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33437+ mm->mmap_legacy_base += mm->delta_mmap;
33438+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33439+ }
33440+#endif
33441
33442 if (mmap_is_legacy()) {
33443 mm->mmap_base = mm->mmap_legacy_base;
33444diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33445index 0057a7a..95c7edd 100644
33446--- a/arch/x86/mm/mmio-mod.c
33447+++ b/arch/x86/mm/mmio-mod.c
33448@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33449 break;
33450 default:
33451 {
33452- unsigned char *ip = (unsigned char *)instptr;
33453+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33454 my_trace->opcode = MMIO_UNKNOWN_OP;
33455 my_trace->width = 0;
33456 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33457@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33458 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33459 void __iomem *addr)
33460 {
33461- static atomic_t next_id;
33462+ static atomic_unchecked_t next_id;
33463 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33464 /* These are page-unaligned. */
33465 struct mmiotrace_map map = {
33466@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33467 .private = trace
33468 },
33469 .phys = offset,
33470- .id = atomic_inc_return(&next_id)
33471+ .id = atomic_inc_return_unchecked(&next_id)
33472 };
33473 map.map_id = trace->id;
33474
33475@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33476 ioremap_trace_core(offset, size, addr);
33477 }
33478
33479-static void iounmap_trace_core(volatile void __iomem *addr)
33480+static void iounmap_trace_core(const volatile void __iomem *addr)
33481 {
33482 struct mmiotrace_map map = {
33483 .phys = 0,
33484@@ -328,7 +328,7 @@ not_enabled:
33485 }
33486 }
33487
33488-void mmiotrace_iounmap(volatile void __iomem *addr)
33489+void mmiotrace_iounmap(const volatile void __iomem *addr)
33490 {
33491 might_sleep();
33492 if (is_enabled()) /* recheck and proper locking in *_core() */
33493diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33494index cd4785b..25188b6 100644
33495--- a/arch/x86/mm/numa.c
33496+++ b/arch/x86/mm/numa.c
33497@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33498 }
33499 }
33500
33501-static int __init numa_register_memblks(struct numa_meminfo *mi)
33502+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33503 {
33504 unsigned long uninitialized_var(pfn_align);
33505 int i, nid;
33506diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33507index 536ea2f..f42c293 100644
33508--- a/arch/x86/mm/pageattr.c
33509+++ b/arch/x86/mm/pageattr.c
33510@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33511 */
33512 #ifdef CONFIG_PCI_BIOS
33513 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33514- pgprot_val(forbidden) |= _PAGE_NX;
33515+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33516 #endif
33517
33518 /*
33519@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33520 * Does not cover __inittext since that is gone later on. On
33521 * 64bit we do not enforce !NX on the low mapping
33522 */
33523- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33524- pgprot_val(forbidden) |= _PAGE_NX;
33525+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33526+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33527
33528+#ifdef CONFIG_DEBUG_RODATA
33529 /*
33530 * The .rodata section needs to be read-only. Using the pfn
33531 * catches all aliases.
33532@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33533 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33534 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33535 pgprot_val(forbidden) |= _PAGE_RW;
33536+#endif
33537
33538 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33539 /*
33540@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33541 }
33542 #endif
33543
33544+#ifdef CONFIG_PAX_KERNEXEC
33545+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33546+ pgprot_val(forbidden) |= _PAGE_RW;
33547+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33548+ }
33549+#endif
33550+
33551 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33552
33553 return prot;
33554@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33555 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33556 {
33557 /* change init_mm */
33558+ pax_open_kernel();
33559 set_pte_atomic(kpte, pte);
33560+
33561 #ifdef CONFIG_X86_32
33562 if (!SHARED_KERNEL_PMD) {
33563+
33564+#ifdef CONFIG_PAX_PER_CPU_PGD
33565+ unsigned long cpu;
33566+#else
33567 struct page *page;
33568+#endif
33569
33570+#ifdef CONFIG_PAX_PER_CPU_PGD
33571+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33572+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33573+#else
33574 list_for_each_entry(page, &pgd_list, lru) {
33575- pgd_t *pgd;
33576+ pgd_t *pgd = (pgd_t *)page_address(page);
33577+#endif
33578+
33579 pud_t *pud;
33580 pmd_t *pmd;
33581
33582- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33583+ pgd += pgd_index(address);
33584 pud = pud_offset(pgd, address);
33585 pmd = pmd_offset(pud, address);
33586 set_pte_atomic((pte_t *)pmd, pte);
33587 }
33588 }
33589 #endif
33590+ pax_close_kernel();
33591 }
33592
33593 static int
33594diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33595index 7ac6869..c0ba541 100644
33596--- a/arch/x86/mm/pat.c
33597+++ b/arch/x86/mm/pat.c
33598@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33599 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33600
33601 if (pg_flags == _PGMT_DEFAULT)
33602- return -1;
33603+ return _PAGE_CACHE_MODE_NUM;
33604 else if (pg_flags == _PGMT_WC)
33605 return _PAGE_CACHE_MODE_WC;
33606 else if (pg_flags == _PGMT_UC_MINUS)
33607@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33608
33609 page = pfn_to_page(pfn);
33610 type = get_page_memtype(page);
33611- if (type != -1) {
33612+ if (type != _PAGE_CACHE_MODE_NUM) {
33613 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33614 start, end - 1, type, req_type);
33615 if (new_type)
33616@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33617
33618 if (!entry) {
33619 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33620- current->comm, current->pid, start, end - 1);
33621+ current->comm, task_pid_nr(current), start, end - 1);
33622 return -EINVAL;
33623 }
33624
33625@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33626 page = pfn_to_page(paddr >> PAGE_SHIFT);
33627 rettype = get_page_memtype(page);
33628 /*
33629- * -1 from get_page_memtype() implies RAM page is in its
33630+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33631 * default state and not reserved, and hence of type WB
33632 */
33633- if (rettype == -1)
33634+ if (rettype == _PAGE_CACHE_MODE_NUM)
33635 rettype = _PAGE_CACHE_MODE_WB;
33636
33637 return rettype;
33638@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33639
33640 while (cursor < to) {
33641 if (!devmem_is_allowed(pfn)) {
33642- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33643- current->comm, from, to - 1);
33644+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33645+ current->comm, from, to - 1, cursor);
33646 return 0;
33647 }
33648 cursor += PAGE_SIZE;
33649@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33650 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33651 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33652 "for [mem %#010Lx-%#010Lx]\n",
33653- current->comm, current->pid,
33654+ current->comm, task_pid_nr(current),
33655 cattr_name(pcm),
33656 base, (unsigned long long)(base + size-1));
33657 return -EINVAL;
33658@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33659 pcm = lookup_memtype(paddr);
33660 if (want_pcm != pcm) {
33661 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33662- current->comm, current->pid,
33663+ current->comm, task_pid_nr(current),
33664 cattr_name(want_pcm),
33665 (unsigned long long)paddr,
33666 (unsigned long long)(paddr + size - 1),
33667@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33668 free_memtype(paddr, paddr + size);
33669 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33670 " for [mem %#010Lx-%#010Lx], got %s\n",
33671- current->comm, current->pid,
33672+ current->comm, task_pid_nr(current),
33673 cattr_name(want_pcm),
33674 (unsigned long long)paddr,
33675 (unsigned long long)(paddr + size - 1),
33676diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33677index 6582adc..fcc5d0b 100644
33678--- a/arch/x86/mm/pat_rbtree.c
33679+++ b/arch/x86/mm/pat_rbtree.c
33680@@ -161,7 +161,7 @@ success:
33681
33682 failure:
33683 printk(KERN_INFO "%s:%d conflicting memory types "
33684- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33685+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33686 end, cattr_name(found_type), cattr_name(match->type));
33687 return -EBUSY;
33688 }
33689diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33690index 9f0614d..92ae64a 100644
33691--- a/arch/x86/mm/pf_in.c
33692+++ b/arch/x86/mm/pf_in.c
33693@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33694 int i;
33695 enum reason_type rv = OTHERS;
33696
33697- p = (unsigned char *)ins_addr;
33698+ p = (unsigned char *)ktla_ktva(ins_addr);
33699 p += skip_prefix(p, &prf);
33700 p += get_opcode(p, &opcode);
33701
33702@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33703 struct prefix_bits prf;
33704 int i;
33705
33706- p = (unsigned char *)ins_addr;
33707+ p = (unsigned char *)ktla_ktva(ins_addr);
33708 p += skip_prefix(p, &prf);
33709 p += get_opcode(p, &opcode);
33710
33711@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33712 struct prefix_bits prf;
33713 int i;
33714
33715- p = (unsigned char *)ins_addr;
33716+ p = (unsigned char *)ktla_ktva(ins_addr);
33717 p += skip_prefix(p, &prf);
33718 p += get_opcode(p, &opcode);
33719
33720@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33721 struct prefix_bits prf;
33722 int i;
33723
33724- p = (unsigned char *)ins_addr;
33725+ p = (unsigned char *)ktla_ktva(ins_addr);
33726 p += skip_prefix(p, &prf);
33727 p += get_opcode(p, &opcode);
33728 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33729@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33730 struct prefix_bits prf;
33731 int i;
33732
33733- p = (unsigned char *)ins_addr;
33734+ p = (unsigned char *)ktla_ktva(ins_addr);
33735 p += skip_prefix(p, &prf);
33736 p += get_opcode(p, &opcode);
33737 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33738diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33739index 7b22ada..b11e66f 100644
33740--- a/arch/x86/mm/pgtable.c
33741+++ b/arch/x86/mm/pgtable.c
33742@@ -97,10 +97,75 @@ static inline void pgd_list_del(pgd_t *pgd)
33743 list_del(&page->lru);
33744 }
33745
33746-#define UNSHARED_PTRS_PER_PGD \
33747- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33748+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33749+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33750
33751+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33752+{
33753+ unsigned int count = USER_PGD_PTRS;
33754
33755+ if (!pax_user_shadow_base)
33756+ return;
33757+
33758+ while (count--)
33759+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33760+}
33761+#endif
33762+
33763+#ifdef CONFIG_PAX_PER_CPU_PGD
33764+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33765+{
33766+ unsigned int count = USER_PGD_PTRS;
33767+
33768+ while (count--) {
33769+ pgd_t pgd;
33770+
33771+#ifdef CONFIG_X86_64
33772+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33773+#else
33774+ pgd = *src++;
33775+#endif
33776+
33777+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33778+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33779+#endif
33780+
33781+ *dst++ = pgd;
33782+ }
33783+
33784+}
33785+#endif
33786+
33787+#ifdef CONFIG_X86_64
33788+#define pxd_t pud_t
33789+#define pyd_t pgd_t
33790+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33791+#define pgtable_pxd_page_ctor(page) true
33792+#define pgtable_pxd_page_dtor(page) do {} while (0)
33793+#define pxd_free(mm, pud) pud_free((mm), (pud))
33794+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33795+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33796+#define PYD_SIZE PGDIR_SIZE
33797+#define mm_inc_nr_pxds(mm) do {} while (0)
33798+#define mm_dec_nr_pxds(mm) do {} while (0)
33799+#else
33800+#define pxd_t pmd_t
33801+#define pyd_t pud_t
33802+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33803+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33804+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33805+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33806+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33807+#define pyd_offset(mm, address) pud_offset((mm), (address))
33808+#define PYD_SIZE PUD_SIZE
33809+#define mm_inc_nr_pxds(mm) mm_inc_nr_pmds(mm)
33810+#define mm_dec_nr_pxds(mm) mm_dec_nr_pmds(mm)
33811+#endif
33812+
33813+#ifdef CONFIG_PAX_PER_CPU_PGD
33814+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33815+static inline void pgd_dtor(pgd_t *pgd) {}
33816+#else
33817 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33818 {
33819 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33820@@ -141,6 +206,7 @@ static void pgd_dtor(pgd_t *pgd)
33821 pgd_list_del(pgd);
33822 spin_unlock(&pgd_lock);
33823 }
33824+#endif
33825
33826 /*
33827 * List of all pgd's needed for non-PAE so it can invalidate entries
33828@@ -153,7 +219,7 @@ static void pgd_dtor(pgd_t *pgd)
33829 * -- nyc
33830 */
33831
33832-#ifdef CONFIG_X86_PAE
33833+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33834 /*
33835 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33836 * updating the top-level pagetable entries to guarantee the
33837@@ -165,7 +231,7 @@ static void pgd_dtor(pgd_t *pgd)
33838 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33839 * and initialize the kernel pmds here.
33840 */
33841-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33842+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33843
33844 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33845 {
33846@@ -183,46 +249,48 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33847 */
33848 flush_tlb_mm(mm);
33849 }
33850+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33851+#define PREALLOCATED_PXDS USER_PGD_PTRS
33852 #else /* !CONFIG_X86_PAE */
33853
33854 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33855-#define PREALLOCATED_PMDS 0
33856+#define PREALLOCATED_PXDS 0
33857
33858 #endif /* CONFIG_X86_PAE */
33859
33860-static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
33861+static void free_pxds(struct mm_struct *mm, pxd_t *pxds[])
33862 {
33863 int i;
33864
33865- for(i = 0; i < PREALLOCATED_PMDS; i++)
33866- if (pmds[i]) {
33867- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33868- free_page((unsigned long)pmds[i]);
33869- mm_dec_nr_pmds(mm);
33870+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33871+ if (pxds[i]) {
33872+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33873+ free_page((unsigned long)pxds[i]);
33874+ mm_dec_nr_pxds(mm);
33875 }
33876 }
33877
33878-static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
33879+static int preallocate_pxds(struct mm_struct *mm, pxd_t *pxds[])
33880 {
33881 int i;
33882 bool failed = false;
33883
33884- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33885- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33886- if (!pmd)
33887+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33888+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33889+ if (!pxd)
33890 failed = true;
33891- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33892- free_page((unsigned long)pmd);
33893- pmd = NULL;
33894+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33895+ free_page((unsigned long)pxd);
33896+ pxd = NULL;
33897 failed = true;
33898 }
33899- if (pmd)
33900- mm_inc_nr_pmds(mm);
33901- pmds[i] = pmd;
33902+ if (pxd)
33903+ mm_inc_nr_pxds(mm);
33904+ pxds[i] = pxd;
33905 }
33906
33907 if (failed) {
33908- free_pmds(mm, pmds);
33909+ free_pxds(mm, pxds);
33910 return -ENOMEM;
33911 }
33912
33913@@ -235,50 +303,54 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
33914 * preallocate which never got a corresponding vma will need to be
33915 * freed manually.
33916 */
33917-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33918+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33919 {
33920 int i;
33921
33922- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33923+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33924 pgd_t pgd = pgdp[i];
33925
33926 if (pgd_val(pgd) != 0) {
33927- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33928+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33929
33930- pgdp[i] = native_make_pgd(0);
33931+ set_pgd(pgdp + i, native_make_pgd(0));
33932
33933- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33934- pmd_free(mm, pmd);
33935- mm_dec_nr_pmds(mm);
33936+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33937+ pxd_free(mm, pxd);
33938+ mm_dec_nr_pxds(mm);
33939 }
33940 }
33941 }
33942
33943-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33944+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33945 {
33946- pud_t *pud;
33947+ pyd_t *pyd;
33948 int i;
33949
33950- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33951+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33952 return;
33953
33954- pud = pud_offset(pgd, 0);
33955+#ifdef CONFIG_X86_64
33956+ pyd = pyd_offset(mm, 0L);
33957+#else
33958+ pyd = pyd_offset(pgd, 0L);
33959+#endif
33960
33961- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33962- pmd_t *pmd = pmds[i];
33963+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33964+ pxd_t *pxd = pxds[i];
33965
33966 if (i >= KERNEL_PGD_BOUNDARY)
33967- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33968- sizeof(pmd_t) * PTRS_PER_PMD);
33969+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33970+ sizeof(pxd_t) * PTRS_PER_PMD);
33971
33972- pud_populate(mm, pud, pmd);
33973+ pyd_populate(mm, pyd, pxd);
33974 }
33975 }
33976
33977 pgd_t *pgd_alloc(struct mm_struct *mm)
33978 {
33979 pgd_t *pgd;
33980- pmd_t *pmds[PREALLOCATED_PMDS];
33981+ pxd_t *pxds[PREALLOCATED_PXDS];
33982
33983 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
33984
33985@@ -287,11 +359,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33986
33987 mm->pgd = pgd;
33988
33989- if (preallocate_pmds(mm, pmds) != 0)
33990+ if (preallocate_pxds(mm, pxds) != 0)
33991 goto out_free_pgd;
33992
33993 if (paravirt_pgd_alloc(mm) != 0)
33994- goto out_free_pmds;
33995+ goto out_free_pxds;
33996
33997 /*
33998 * Make sure that pre-populating the pmds is atomic with
33999@@ -301,14 +373,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34000 spin_lock(&pgd_lock);
34001
34002 pgd_ctor(mm, pgd);
34003- pgd_prepopulate_pmd(mm, pgd, pmds);
34004+ pgd_prepopulate_pxd(mm, pgd, pxds);
34005
34006 spin_unlock(&pgd_lock);
34007
34008 return pgd;
34009
34010-out_free_pmds:
34011- free_pmds(mm, pmds);
34012+out_free_pxds:
34013+ free_pxds(mm, pxds);
34014 out_free_pgd:
34015 free_page((unsigned long)pgd);
34016 out:
34017@@ -317,7 +389,7 @@ out:
34018
34019 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34020 {
34021- pgd_mop_up_pmds(mm, pgd);
34022+ pgd_mop_up_pxds(mm, pgd);
34023 pgd_dtor(pgd);
34024 paravirt_pgd_free(mm, pgd);
34025 free_page((unsigned long)pgd);
34026diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34027index 75cc097..79a097f 100644
34028--- a/arch/x86/mm/pgtable_32.c
34029+++ b/arch/x86/mm/pgtable_32.c
34030@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34031 return;
34032 }
34033 pte = pte_offset_kernel(pmd, vaddr);
34034+
34035+ pax_open_kernel();
34036 if (pte_val(pteval))
34037 set_pte_at(&init_mm, vaddr, pte, pteval);
34038 else
34039 pte_clear(&init_mm, vaddr, pte);
34040+ pax_close_kernel();
34041
34042 /*
34043 * It's enough to flush this one mapping.
34044diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34045index e666cbb..61788c45 100644
34046--- a/arch/x86/mm/physaddr.c
34047+++ b/arch/x86/mm/physaddr.c
34048@@ -10,7 +10,7 @@
34049 #ifdef CONFIG_X86_64
34050
34051 #ifdef CONFIG_DEBUG_VIRTUAL
34052-unsigned long __phys_addr(unsigned long x)
34053+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34054 {
34055 unsigned long y = x - __START_KERNEL_map;
34056
34057@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34058 #else
34059
34060 #ifdef CONFIG_DEBUG_VIRTUAL
34061-unsigned long __phys_addr(unsigned long x)
34062+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34063 {
34064 unsigned long phys_addr = x - PAGE_OFFSET;
34065 /* VMALLOC_* aren't constants */
34066diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34067index 90555bf..f5f1828 100644
34068--- a/arch/x86/mm/setup_nx.c
34069+++ b/arch/x86/mm/setup_nx.c
34070@@ -5,8 +5,10 @@
34071 #include <asm/pgtable.h>
34072 #include <asm/proto.h>
34073
34074+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34075 static int disable_nx;
34076
34077+#ifndef CONFIG_PAX_PAGEEXEC
34078 /*
34079 * noexec = on|off
34080 *
34081@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34082 return 0;
34083 }
34084 early_param("noexec", noexec_setup);
34085+#endif
34086+
34087+#endif
34088
34089 void x86_configure_nx(void)
34090 {
34091+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34092 if (cpu_has_nx && !disable_nx)
34093 __supported_pte_mask |= _PAGE_NX;
34094 else
34095+#endif
34096 __supported_pte_mask &= ~_PAGE_NX;
34097 }
34098
34099diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34100index 3250f23..7a97ba2 100644
34101--- a/arch/x86/mm/tlb.c
34102+++ b/arch/x86/mm/tlb.c
34103@@ -45,7 +45,11 @@ void leave_mm(int cpu)
34104 BUG();
34105 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34106 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34107+
34108+#ifndef CONFIG_PAX_PER_CPU_PGD
34109 load_cr3(swapper_pg_dir);
34110+#endif
34111+
34112 /*
34113 * This gets called in the idle path where RCU
34114 * functions differently. Tracing normally
34115diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34116new file mode 100644
34117index 0000000..dace51c
34118--- /dev/null
34119+++ b/arch/x86/mm/uderef_64.c
34120@@ -0,0 +1,37 @@
34121+#include <linux/mm.h>
34122+#include <asm/pgtable.h>
34123+#include <asm/uaccess.h>
34124+
34125+#ifdef CONFIG_PAX_MEMORY_UDEREF
34126+/* PaX: due to the special call convention these functions must
34127+ * - remain leaf functions under all configurations,
34128+ * - never be called directly, only dereferenced from the wrappers.
34129+ */
34130+void __pax_open_userland(void)
34131+{
34132+ unsigned int cpu;
34133+
34134+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34135+ return;
34136+
34137+ cpu = raw_get_cpu();
34138+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34139+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34140+ raw_put_cpu_no_resched();
34141+}
34142+EXPORT_SYMBOL(__pax_open_userland);
34143+
34144+void __pax_close_userland(void)
34145+{
34146+ unsigned int cpu;
34147+
34148+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34149+ return;
34150+
34151+ cpu = raw_get_cpu();
34152+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34153+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34154+ raw_put_cpu_no_resched();
34155+}
34156+EXPORT_SYMBOL(__pax_close_userland);
34157+#endif
34158diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34159index 6440221..f84b5c7 100644
34160--- a/arch/x86/net/bpf_jit.S
34161+++ b/arch/x86/net/bpf_jit.S
34162@@ -9,6 +9,7 @@
34163 */
34164 #include <linux/linkage.h>
34165 #include <asm/dwarf2.h>
34166+#include <asm/alternative-asm.h>
34167
34168 /*
34169 * Calling convention :
34170@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34171 jle bpf_slow_path_word
34172 mov (SKBDATA,%rsi),%eax
34173 bswap %eax /* ntohl() */
34174+ pax_force_retaddr
34175 ret
34176
34177 sk_load_half:
34178@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34179 jle bpf_slow_path_half
34180 movzwl (SKBDATA,%rsi),%eax
34181 rol $8,%ax # ntohs()
34182+ pax_force_retaddr
34183 ret
34184
34185 sk_load_byte:
34186@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34187 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34188 jle bpf_slow_path_byte
34189 movzbl (SKBDATA,%rsi),%eax
34190+ pax_force_retaddr
34191 ret
34192
34193 /* rsi contains offset and can be scratched */
34194@@ -90,6 +94,7 @@ bpf_slow_path_word:
34195 js bpf_error
34196 mov - MAX_BPF_STACK + 32(%rbp),%eax
34197 bswap %eax
34198+ pax_force_retaddr
34199 ret
34200
34201 bpf_slow_path_half:
34202@@ -98,12 +103,14 @@ bpf_slow_path_half:
34203 mov - MAX_BPF_STACK + 32(%rbp),%ax
34204 rol $8,%ax
34205 movzwl %ax,%eax
34206+ pax_force_retaddr
34207 ret
34208
34209 bpf_slow_path_byte:
34210 bpf_slow_path_common(1)
34211 js bpf_error
34212 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34213+ pax_force_retaddr
34214 ret
34215
34216 #define sk_negative_common(SIZE) \
34217@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34218 sk_negative_common(4)
34219 mov (%rax), %eax
34220 bswap %eax
34221+ pax_force_retaddr
34222 ret
34223
34224 bpf_slow_path_half_neg:
34225@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34226 mov (%rax),%ax
34227 rol $8,%ax
34228 movzwl %ax,%eax
34229+ pax_force_retaddr
34230 ret
34231
34232 bpf_slow_path_byte_neg:
34233@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34234 .globl sk_load_byte_negative_offset
34235 sk_negative_common(1)
34236 movzbl (%rax), %eax
34237+ pax_force_retaddr
34238 ret
34239
34240 bpf_error:
34241@@ -156,4 +166,5 @@ bpf_error:
34242 mov - MAX_BPF_STACK + 16(%rbp),%r14
34243 mov - MAX_BPF_STACK + 24(%rbp),%r15
34244 leaveq
34245+ pax_force_retaddr
34246 ret
34247diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34248index 9875143..36776ae 100644
34249--- a/arch/x86/net/bpf_jit_comp.c
34250+++ b/arch/x86/net/bpf_jit_comp.c
34251@@ -13,7 +13,11 @@
34252 #include <linux/if_vlan.h>
34253 #include <asm/cacheflush.h>
34254
34255+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34256+int bpf_jit_enable __read_only;
34257+#else
34258 int bpf_jit_enable __read_mostly;
34259+#endif
34260
34261 /*
34262 * assembly code in arch/x86/net/bpf_jit.S
34263@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34264 static void jit_fill_hole(void *area, unsigned int size)
34265 {
34266 /* fill whole space with int3 instructions */
34267+ pax_open_kernel();
34268 memset(area, 0xcc, size);
34269+ pax_close_kernel();
34270 }
34271
34272 struct jit_context {
34273@@ -559,6 +565,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
34274 if (is_ereg(dst_reg))
34275 EMIT1(0x41);
34276 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
34277+
34278+ /* emit 'movzwl eax, ax' */
34279+ if (is_ereg(dst_reg))
34280+ EMIT3(0x45, 0x0F, 0xB7);
34281+ else
34282+ EMIT2(0x0F, 0xB7);
34283+ EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
34284 break;
34285 case 32:
34286 /* emit 'bswap eax' to swap lower 4 bytes */
34287@@ -577,6 +590,27 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
34288 break;
34289
34290 case BPF_ALU | BPF_END | BPF_FROM_LE:
34291+ switch (imm32) {
34292+ case 16:
34293+ /* emit 'movzwl eax, ax' to zero extend 16-bit
34294+ * into 64 bit
34295+ */
34296+ if (is_ereg(dst_reg))
34297+ EMIT3(0x45, 0x0F, 0xB7);
34298+ else
34299+ EMIT2(0x0F, 0xB7);
34300+ EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
34301+ break;
34302+ case 32:
34303+ /* emit 'mov eax, eax' to clear upper 32-bits */
34304+ if (is_ereg(dst_reg))
34305+ EMIT1(0x45);
34306+ EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
34307+ break;
34308+ case 64:
34309+ /* nop */
34310+ break;
34311+ }
34312 break;
34313
34314 /* ST: *(u8*)(dst_reg + off) = imm */
34315@@ -896,7 +930,9 @@ common_load:
34316 pr_err("bpf_jit_compile fatal error\n");
34317 return -EFAULT;
34318 }
34319+ pax_open_kernel();
34320 memcpy(image + proglen, temp, ilen);
34321+ pax_close_kernel();
34322 }
34323 proglen += ilen;
34324 addrs[i] = proglen;
34325@@ -968,7 +1004,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34326
34327 if (image) {
34328 bpf_flush_icache(header, image + proglen);
34329- set_memory_ro((unsigned long)header, header->pages);
34330 prog->bpf_func = (void *)image;
34331 prog->jited = true;
34332 }
34333@@ -981,12 +1016,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34334 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34335 struct bpf_binary_header *header = (void *)addr;
34336
34337- if (!fp->jited)
34338- goto free_filter;
34339+ if (fp->jited)
34340+ bpf_jit_binary_free(header);
34341
34342- set_memory_rw(addr, header->pages);
34343- bpf_jit_binary_free(header);
34344-
34345-free_filter:
34346 bpf_prog_unlock_free(fp);
34347 }
34348diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34349index 5d04be5..2beeaa2 100644
34350--- a/arch/x86/oprofile/backtrace.c
34351+++ b/arch/x86/oprofile/backtrace.c
34352@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34353 struct stack_frame_ia32 *fp;
34354 unsigned long bytes;
34355
34356- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34357+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34358 if (bytes != 0)
34359 return NULL;
34360
34361- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34362+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34363
34364 oprofile_add_trace(bufhead[0].return_address);
34365
34366@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34367 struct stack_frame bufhead[2];
34368 unsigned long bytes;
34369
34370- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34371+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34372 if (bytes != 0)
34373 return NULL;
34374
34375@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34376 {
34377 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34378
34379- if (!user_mode_vm(regs)) {
34380+ if (!user_mode(regs)) {
34381 unsigned long stack = kernel_stack_pointer(regs);
34382 if (depth)
34383 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34384diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34385index 1d2e639..f6ef82a 100644
34386--- a/arch/x86/oprofile/nmi_int.c
34387+++ b/arch/x86/oprofile/nmi_int.c
34388@@ -23,6 +23,7 @@
34389 #include <asm/nmi.h>
34390 #include <asm/msr.h>
34391 #include <asm/apic.h>
34392+#include <asm/pgtable.h>
34393
34394 #include "op_counter.h"
34395 #include "op_x86_model.h"
34396@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34397 if (ret)
34398 return ret;
34399
34400- if (!model->num_virt_counters)
34401- model->num_virt_counters = model->num_counters;
34402+ if (!model->num_virt_counters) {
34403+ pax_open_kernel();
34404+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34405+ pax_close_kernel();
34406+ }
34407
34408 mux_init(ops);
34409
34410diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34411index 50d86c0..7985318 100644
34412--- a/arch/x86/oprofile/op_model_amd.c
34413+++ b/arch/x86/oprofile/op_model_amd.c
34414@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34415 num_counters = AMD64_NUM_COUNTERS;
34416 }
34417
34418- op_amd_spec.num_counters = num_counters;
34419- op_amd_spec.num_controls = num_counters;
34420- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34421+ pax_open_kernel();
34422+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34423+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34424+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34425+ pax_close_kernel();
34426
34427 return 0;
34428 }
34429diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34430index d90528e..0127e2b 100644
34431--- a/arch/x86/oprofile/op_model_ppro.c
34432+++ b/arch/x86/oprofile/op_model_ppro.c
34433@@ -19,6 +19,7 @@
34434 #include <asm/msr.h>
34435 #include <asm/apic.h>
34436 #include <asm/nmi.h>
34437+#include <asm/pgtable.h>
34438
34439 #include "op_x86_model.h"
34440 #include "op_counter.h"
34441@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34442
34443 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34444
34445- op_arch_perfmon_spec.num_counters = num_counters;
34446- op_arch_perfmon_spec.num_controls = num_counters;
34447+ pax_open_kernel();
34448+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34449+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34450+ pax_close_kernel();
34451 }
34452
34453 static int arch_perfmon_init(struct oprofile_operations *ignore)
34454diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34455index 71e8a67..6a313bb 100644
34456--- a/arch/x86/oprofile/op_x86_model.h
34457+++ b/arch/x86/oprofile/op_x86_model.h
34458@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34459 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34460 struct op_msrs const * const msrs);
34461 #endif
34462-};
34463+} __do_const;
34464
34465 struct op_counter_config;
34466
34467diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34468index 852aa4c..71613f2 100644
34469--- a/arch/x86/pci/intel_mid_pci.c
34470+++ b/arch/x86/pci/intel_mid_pci.c
34471@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34472 pci_mmcfg_late_init();
34473 pcibios_enable_irq = intel_mid_pci_irq_enable;
34474 pcibios_disable_irq = intel_mid_pci_irq_disable;
34475- pci_root_ops = intel_mid_pci_ops;
34476+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34477 pci_soc_mode = 1;
34478 /* Continue with standard init */
34479 return 1;
34480diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34481index 5dc6ca5..25c03f5 100644
34482--- a/arch/x86/pci/irq.c
34483+++ b/arch/x86/pci/irq.c
34484@@ -51,7 +51,7 @@ struct irq_router {
34485 struct irq_router_handler {
34486 u16 vendor;
34487 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34488-};
34489+} __do_const;
34490
34491 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34492 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34493@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34494 return 0;
34495 }
34496
34497-static __initdata struct irq_router_handler pirq_routers[] = {
34498+static __initconst const struct irq_router_handler pirq_routers[] = {
34499 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34500 { PCI_VENDOR_ID_AL, ali_router_probe },
34501 { PCI_VENDOR_ID_ITE, ite_router_probe },
34502@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34503 static void __init pirq_find_router(struct irq_router *r)
34504 {
34505 struct irq_routing_table *rt = pirq_table;
34506- struct irq_router_handler *h;
34507+ const struct irq_router_handler *h;
34508
34509 #ifdef CONFIG_PCI_BIOS
34510 if (!rt->signature) {
34511@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34512 return 0;
34513 }
34514
34515-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34516+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34517 {
34518 .callback = fix_broken_hp_bios_irq9,
34519 .ident = "HP Pavilion N5400 Series Laptop",
34520diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34521index 9b83b90..4112152 100644
34522--- a/arch/x86/pci/pcbios.c
34523+++ b/arch/x86/pci/pcbios.c
34524@@ -79,7 +79,7 @@ union bios32 {
34525 static struct {
34526 unsigned long address;
34527 unsigned short segment;
34528-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34529+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34530
34531 /*
34532 * Returns the entry point for the given service, NULL on error
34533@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34534 unsigned long length; /* %ecx */
34535 unsigned long entry; /* %edx */
34536 unsigned long flags;
34537+ struct desc_struct d, *gdt;
34538
34539 local_irq_save(flags);
34540- __asm__("lcall *(%%edi); cld"
34541+
34542+ gdt = get_cpu_gdt_table(smp_processor_id());
34543+
34544+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34545+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34546+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34547+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34548+
34549+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34550 : "=a" (return_code),
34551 "=b" (address),
34552 "=c" (length),
34553 "=d" (entry)
34554 : "0" (service),
34555 "1" (0),
34556- "D" (&bios32_indirect));
34557+ "D" (&bios32_indirect),
34558+ "r"(__PCIBIOS_DS)
34559+ : "memory");
34560+
34561+ pax_open_kernel();
34562+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34563+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34564+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34565+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34566+ pax_close_kernel();
34567+
34568 local_irq_restore(flags);
34569
34570 switch (return_code) {
34571- case 0:
34572- return address + entry;
34573- case 0x80: /* Not present */
34574- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34575- return 0;
34576- default: /* Shouldn't happen */
34577- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34578- service, return_code);
34579+ case 0: {
34580+ int cpu;
34581+ unsigned char flags;
34582+
34583+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34584+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34585+ printk(KERN_WARNING "bios32_service: not valid\n");
34586 return 0;
34587+ }
34588+ address = address + PAGE_OFFSET;
34589+ length += 16UL; /* some BIOSs underreport this... */
34590+ flags = 4;
34591+ if (length >= 64*1024*1024) {
34592+ length >>= PAGE_SHIFT;
34593+ flags |= 8;
34594+ }
34595+
34596+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34597+ gdt = get_cpu_gdt_table(cpu);
34598+ pack_descriptor(&d, address, length, 0x9b, flags);
34599+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34600+ pack_descriptor(&d, address, length, 0x93, flags);
34601+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34602+ }
34603+ return entry;
34604+ }
34605+ case 0x80: /* Not present */
34606+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34607+ return 0;
34608+ default: /* Shouldn't happen */
34609+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34610+ service, return_code);
34611+ return 0;
34612 }
34613 }
34614
34615 static struct {
34616 unsigned long address;
34617 unsigned short segment;
34618-} pci_indirect = { 0, __KERNEL_CS };
34619+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34620
34621-static int pci_bios_present;
34622+static int pci_bios_present __read_only;
34623
34624 static int __init check_pcibios(void)
34625 {
34626@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34627 unsigned long flags, pcibios_entry;
34628
34629 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34630- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34631+ pci_indirect.address = pcibios_entry;
34632
34633 local_irq_save(flags);
34634- __asm__(
34635- "lcall *(%%edi); cld\n\t"
34636+ __asm__("movw %w6, %%ds\n\t"
34637+ "lcall *%%ss:(%%edi); cld\n\t"
34638+ "push %%ss\n\t"
34639+ "pop %%ds\n\t"
34640 "jc 1f\n\t"
34641 "xor %%ah, %%ah\n"
34642 "1:"
34643@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34644 "=b" (ebx),
34645 "=c" (ecx)
34646 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34647- "D" (&pci_indirect)
34648+ "D" (&pci_indirect),
34649+ "r" (__PCIBIOS_DS)
34650 : "memory");
34651 local_irq_restore(flags);
34652
34653@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34654
34655 switch (len) {
34656 case 1:
34657- __asm__("lcall *(%%esi); cld\n\t"
34658+ __asm__("movw %w6, %%ds\n\t"
34659+ "lcall *%%ss:(%%esi); cld\n\t"
34660+ "push %%ss\n\t"
34661+ "pop %%ds\n\t"
34662 "jc 1f\n\t"
34663 "xor %%ah, %%ah\n"
34664 "1:"
34665@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34666 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34667 "b" (bx),
34668 "D" ((long)reg),
34669- "S" (&pci_indirect));
34670+ "S" (&pci_indirect),
34671+ "r" (__PCIBIOS_DS));
34672 /*
34673 * Zero-extend the result beyond 8 bits, do not trust the
34674 * BIOS having done it:
34675@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34676 *value &= 0xff;
34677 break;
34678 case 2:
34679- __asm__("lcall *(%%esi); cld\n\t"
34680+ __asm__("movw %w6, %%ds\n\t"
34681+ "lcall *%%ss:(%%esi); cld\n\t"
34682+ "push %%ss\n\t"
34683+ "pop %%ds\n\t"
34684 "jc 1f\n\t"
34685 "xor %%ah, %%ah\n"
34686 "1:"
34687@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34688 : "1" (PCIBIOS_READ_CONFIG_WORD),
34689 "b" (bx),
34690 "D" ((long)reg),
34691- "S" (&pci_indirect));
34692+ "S" (&pci_indirect),
34693+ "r" (__PCIBIOS_DS));
34694 /*
34695 * Zero-extend the result beyond 16 bits, do not trust the
34696 * BIOS having done it:
34697@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34698 *value &= 0xffff;
34699 break;
34700 case 4:
34701- __asm__("lcall *(%%esi); cld\n\t"
34702+ __asm__("movw %w6, %%ds\n\t"
34703+ "lcall *%%ss:(%%esi); cld\n\t"
34704+ "push %%ss\n\t"
34705+ "pop %%ds\n\t"
34706 "jc 1f\n\t"
34707 "xor %%ah, %%ah\n"
34708 "1:"
34709@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34710 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34711 "b" (bx),
34712 "D" ((long)reg),
34713- "S" (&pci_indirect));
34714+ "S" (&pci_indirect),
34715+ "r" (__PCIBIOS_DS));
34716 break;
34717 }
34718
34719@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34720
34721 switch (len) {
34722 case 1:
34723- __asm__("lcall *(%%esi); cld\n\t"
34724+ __asm__("movw %w6, %%ds\n\t"
34725+ "lcall *%%ss:(%%esi); cld\n\t"
34726+ "push %%ss\n\t"
34727+ "pop %%ds\n\t"
34728 "jc 1f\n\t"
34729 "xor %%ah, %%ah\n"
34730 "1:"
34731@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34732 "c" (value),
34733 "b" (bx),
34734 "D" ((long)reg),
34735- "S" (&pci_indirect));
34736+ "S" (&pci_indirect),
34737+ "r" (__PCIBIOS_DS));
34738 break;
34739 case 2:
34740- __asm__("lcall *(%%esi); cld\n\t"
34741+ __asm__("movw %w6, %%ds\n\t"
34742+ "lcall *%%ss:(%%esi); cld\n\t"
34743+ "push %%ss\n\t"
34744+ "pop %%ds\n\t"
34745 "jc 1f\n\t"
34746 "xor %%ah, %%ah\n"
34747 "1:"
34748@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34749 "c" (value),
34750 "b" (bx),
34751 "D" ((long)reg),
34752- "S" (&pci_indirect));
34753+ "S" (&pci_indirect),
34754+ "r" (__PCIBIOS_DS));
34755 break;
34756 case 4:
34757- __asm__("lcall *(%%esi); cld\n\t"
34758+ __asm__("movw %w6, %%ds\n\t"
34759+ "lcall *%%ss:(%%esi); cld\n\t"
34760+ "push %%ss\n\t"
34761+ "pop %%ds\n\t"
34762 "jc 1f\n\t"
34763 "xor %%ah, %%ah\n"
34764 "1:"
34765@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34766 "c" (value),
34767 "b" (bx),
34768 "D" ((long)reg),
34769- "S" (&pci_indirect));
34770+ "S" (&pci_indirect),
34771+ "r" (__PCIBIOS_DS));
34772 break;
34773 }
34774
34775@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34776
34777 DBG("PCI: Fetching IRQ routing table... ");
34778 __asm__("push %%es\n\t"
34779+ "movw %w8, %%ds\n\t"
34780 "push %%ds\n\t"
34781 "pop %%es\n\t"
34782- "lcall *(%%esi); cld\n\t"
34783+ "lcall *%%ss:(%%esi); cld\n\t"
34784 "pop %%es\n\t"
34785+ "push %%ss\n\t"
34786+ "pop %%ds\n"
34787 "jc 1f\n\t"
34788 "xor %%ah, %%ah\n"
34789 "1:"
34790@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34791 "1" (0),
34792 "D" ((long) &opt),
34793 "S" (&pci_indirect),
34794- "m" (opt)
34795+ "m" (opt),
34796+ "r" (__PCIBIOS_DS)
34797 : "memory");
34798 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34799 if (ret & 0xff00)
34800@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34801 {
34802 int ret;
34803
34804- __asm__("lcall *(%%esi); cld\n\t"
34805+ __asm__("movw %w5, %%ds\n\t"
34806+ "lcall *%%ss:(%%esi); cld\n\t"
34807+ "push %%ss\n\t"
34808+ "pop %%ds\n"
34809 "jc 1f\n\t"
34810 "xor %%ah, %%ah\n"
34811 "1:"
34812@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34813 : "0" (PCIBIOS_SET_PCI_HW_INT),
34814 "b" ((dev->bus->number << 8) | dev->devfn),
34815 "c" ((irq << 8) | (pin + 10)),
34816- "S" (&pci_indirect));
34817+ "S" (&pci_indirect),
34818+ "r" (__PCIBIOS_DS));
34819 return !(ret & 0xff00);
34820 }
34821 EXPORT_SYMBOL(pcibios_set_irq_routing);
34822diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34823index 40e7cda..c7e6672 100644
34824--- a/arch/x86/platform/efi/efi_32.c
34825+++ b/arch/x86/platform/efi/efi_32.c
34826@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34827 {
34828 struct desc_ptr gdt_descr;
34829
34830+#ifdef CONFIG_PAX_KERNEXEC
34831+ struct desc_struct d;
34832+#endif
34833+
34834 local_irq_save(efi_rt_eflags);
34835
34836 load_cr3(initial_page_table);
34837 __flush_tlb_all();
34838
34839+#ifdef CONFIG_PAX_KERNEXEC
34840+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34841+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34842+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34843+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34844+#endif
34845+
34846 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34847 gdt_descr.size = GDT_SIZE - 1;
34848 load_gdt(&gdt_descr);
34849@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34850 {
34851 struct desc_ptr gdt_descr;
34852
34853+#ifdef CONFIG_PAX_KERNEXEC
34854+ struct desc_struct d;
34855+
34856+ memset(&d, 0, sizeof d);
34857+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34858+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34859+#endif
34860+
34861 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34862 gdt_descr.size = GDT_SIZE - 1;
34863 load_gdt(&gdt_descr);
34864
34865+#ifdef CONFIG_PAX_PER_CPU_PGD
34866+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34867+#else
34868 load_cr3(swapper_pg_dir);
34869+#endif
34870+
34871 __flush_tlb_all();
34872
34873 local_irq_restore(efi_rt_eflags);
34874diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34875index 17e80d8..9fa6e41 100644
34876--- a/arch/x86/platform/efi/efi_64.c
34877+++ b/arch/x86/platform/efi/efi_64.c
34878@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34879 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34880 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34881 }
34882+
34883+#ifdef CONFIG_PAX_PER_CPU_PGD
34884+ load_cr3(swapper_pg_dir);
34885+#endif
34886+
34887 __flush_tlb_all();
34888 }
34889
34890@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34891 for (pgd = 0; pgd < n_pgds; pgd++)
34892 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34893 kfree(save_pgd);
34894+
34895+#ifdef CONFIG_PAX_PER_CPU_PGD
34896+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34897+#endif
34898+
34899 __flush_tlb_all();
34900 local_irq_restore(efi_flags);
34901 early_code_mapping_set_exec(0);
34902@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34903 unsigned npages;
34904 pgd_t *pgd;
34905
34906- if (efi_enabled(EFI_OLD_MEMMAP))
34907+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34908+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34909+ * able to execute the EFI services.
34910+ */
34911+ if (__supported_pte_mask & _PAGE_NX) {
34912+ unsigned long addr = (unsigned long) __va(0);
34913+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34914+
34915+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34916+#ifdef CONFIG_PAX_PER_CPU_PGD
34917+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34918+#endif
34919+ set_pgd(pgd_offset_k(addr), pe);
34920+ }
34921+
34922 return 0;
34923+ }
34924
34925 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34926 pgd = __va(efi_scratch.efi_pgt);
34927diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34928index 040192b..7d3300f 100644
34929--- a/arch/x86/platform/efi/efi_stub_32.S
34930+++ b/arch/x86/platform/efi/efi_stub_32.S
34931@@ -6,7 +6,9 @@
34932 */
34933
34934 #include <linux/linkage.h>
34935+#include <linux/init.h>
34936 #include <asm/page_types.h>
34937+#include <asm/segment.h>
34938
34939 /*
34940 * efi_call_phys(void *, ...) is a function with variable parameters.
34941@@ -20,7 +22,7 @@
34942 * service functions will comply with gcc calling convention, too.
34943 */
34944
34945-.text
34946+__INIT
34947 ENTRY(efi_call_phys)
34948 /*
34949 * 0. The function can only be called in Linux kernel. So CS has been
34950@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34951 * The mapping of lower virtual memory has been created in prolog and
34952 * epilog.
34953 */
34954- movl $1f, %edx
34955- subl $__PAGE_OFFSET, %edx
34956- jmp *%edx
34957+#ifdef CONFIG_PAX_KERNEXEC
34958+ movl $(__KERNEXEC_EFI_DS), %edx
34959+ mov %edx, %ds
34960+ mov %edx, %es
34961+ mov %edx, %ss
34962+ addl $2f,(1f)
34963+ ljmp *(1f)
34964+
34965+__INITDATA
34966+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34967+.previous
34968+
34969+2:
34970+ subl $2b,(1b)
34971+#else
34972+ jmp 1f-__PAGE_OFFSET
34973 1:
34974+#endif
34975
34976 /*
34977 * 2. Now on the top of stack is the return
34978@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34979 * parameter 2, ..., param n. To make things easy, we save the return
34980 * address of efi_call_phys in a global variable.
34981 */
34982- popl %edx
34983- movl %edx, saved_return_addr
34984- /* get the function pointer into ECX*/
34985- popl %ecx
34986- movl %ecx, efi_rt_function_ptr
34987- movl $2f, %edx
34988- subl $__PAGE_OFFSET, %edx
34989- pushl %edx
34990+ popl (saved_return_addr)
34991+ popl (efi_rt_function_ptr)
34992
34993 /*
34994 * 3. Clear PG bit in %CR0.
34995@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34996 /*
34997 * 5. Call the physical function.
34998 */
34999- jmp *%ecx
35000+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
35001
35002-2:
35003 /*
35004 * 6. After EFI runtime service returns, control will return to
35005 * following instruction. We'd better readjust stack pointer first.
35006@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
35007 movl %cr0, %edx
35008 orl $0x80000000, %edx
35009 movl %edx, %cr0
35010- jmp 1f
35011-1:
35012+
35013 /*
35014 * 8. Now restore the virtual mode from flat mode by
35015 * adding EIP with PAGE_OFFSET.
35016 */
35017- movl $1f, %edx
35018- jmp *%edx
35019+#ifdef CONFIG_PAX_KERNEXEC
35020+ movl $(__KERNEL_DS), %edx
35021+ mov %edx, %ds
35022+ mov %edx, %es
35023+ mov %edx, %ss
35024+ ljmp $(__KERNEL_CS),$1f
35025+#else
35026+ jmp 1f+__PAGE_OFFSET
35027+#endif
35028 1:
35029
35030 /*
35031 * 9. Balance the stack. And because EAX contain the return value,
35032 * we'd better not clobber it.
35033 */
35034- leal efi_rt_function_ptr, %edx
35035- movl (%edx), %ecx
35036- pushl %ecx
35037+ pushl (efi_rt_function_ptr)
35038
35039 /*
35040- * 10. Push the saved return address onto the stack and return.
35041+ * 10. Return to the saved return address.
35042 */
35043- leal saved_return_addr, %edx
35044- movl (%edx), %ecx
35045- pushl %ecx
35046- ret
35047+ jmpl *(saved_return_addr)
35048 ENDPROC(efi_call_phys)
35049 .previous
35050
35051-.data
35052+__INITDATA
35053 saved_return_addr:
35054 .long 0
35055 efi_rt_function_ptr:
35056diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
35057index 86d0f9e..6d499f4 100644
35058--- a/arch/x86/platform/efi/efi_stub_64.S
35059+++ b/arch/x86/platform/efi/efi_stub_64.S
35060@@ -11,6 +11,7 @@
35061 #include <asm/msr.h>
35062 #include <asm/processor-flags.h>
35063 #include <asm/page_types.h>
35064+#include <asm/alternative-asm.h>
35065
35066 #define SAVE_XMM \
35067 mov %rsp, %rax; \
35068@@ -88,6 +89,7 @@ ENTRY(efi_call)
35069 RESTORE_PGT
35070 addq $48, %rsp
35071 RESTORE_XMM
35072+ pax_force_retaddr 0, 1
35073 ret
35074 ENDPROC(efi_call)
35075
35076diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
35077index 3005f0c..d06aeb0 100644
35078--- a/arch/x86/platform/intel-mid/intel-mid.c
35079+++ b/arch/x86/platform/intel-mid/intel-mid.c
35080@@ -63,7 +63,7 @@ enum intel_mid_timer_options intel_mid_timer_options;
35081 /* intel_mid_ops to store sub arch ops */
35082 struct intel_mid_ops *intel_mid_ops;
35083 /* getter function for sub arch ops*/
35084-static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35085+static const void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35086 enum intel_mid_cpu_type __intel_mid_cpu_chip;
35087 EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
35088
35089@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
35090 {
35091 };
35092
35093-static void intel_mid_reboot(void)
35094+static void __noreturn intel_mid_reboot(void)
35095 {
35096 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35097+ BUG();
35098 }
35099
35100 static unsigned long __init intel_mid_calibrate_tsc(void)
35101diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35102index 3c1c386..59a68ed 100644
35103--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35104+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35105@@ -13,6 +13,6 @@
35106 /* For every CPU addition a new get_<cpuname>_ops interface needs
35107 * to be added.
35108 */
35109-extern void *get_penwell_ops(void);
35110-extern void *get_cloverview_ops(void);
35111-extern void *get_tangier_ops(void);
35112+extern const void *get_penwell_ops(void);
35113+extern const void *get_cloverview_ops(void);
35114+extern const void *get_tangier_ops(void);
35115diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35116index 23381d2..8ddc10e 100644
35117--- a/arch/x86/platform/intel-mid/mfld.c
35118+++ b/arch/x86/platform/intel-mid/mfld.c
35119@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35120 pm_power_off = mfld_power_off;
35121 }
35122
35123-void *get_penwell_ops(void)
35124+const void *get_penwell_ops(void)
35125 {
35126 return &penwell_ops;
35127 }
35128
35129-void *get_cloverview_ops(void)
35130+const void *get_cloverview_ops(void)
35131 {
35132 return &penwell_ops;
35133 }
35134diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35135index aaca917..66eadbc 100644
35136--- a/arch/x86/platform/intel-mid/mrfl.c
35137+++ b/arch/x86/platform/intel-mid/mrfl.c
35138@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35139 .arch_setup = tangier_arch_setup,
35140 };
35141
35142-void *get_tangier_ops(void)
35143+const void *get_tangier_ops(void)
35144 {
35145 return &tangier_ops;
35146 }
35147diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
35148index c9a0838..fae0977 100644
35149--- a/arch/x86/platform/intel-quark/imr_selftest.c
35150+++ b/arch/x86/platform/intel-quark/imr_selftest.c
35151@@ -54,7 +54,7 @@ static void __init imr_self_test_result(int res, const char *fmt, ...)
35152 */
35153 static void __init imr_self_test(void)
35154 {
35155- phys_addr_t base = virt_to_phys(&_text);
35156+ phys_addr_t base = virt_to_phys(ktla_ktva(_text));
35157 size_t size = virt_to_phys(&__end_rodata) - base;
35158 const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
35159 int ret;
35160diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35161index d6ee929..3637cb5 100644
35162--- a/arch/x86/platform/olpc/olpc_dt.c
35163+++ b/arch/x86/platform/olpc/olpc_dt.c
35164@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35165 return res;
35166 }
35167
35168-static struct of_pdt_ops prom_olpc_ops __initdata = {
35169+static struct of_pdt_ops prom_olpc_ops __initconst = {
35170 .nextprop = olpc_dt_nextprop,
35171 .getproplen = olpc_dt_getproplen,
35172 .getproperty = olpc_dt_getproperty,
35173diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35174index 3e32ed5..cc0adc5 100644
35175--- a/arch/x86/power/cpu.c
35176+++ b/arch/x86/power/cpu.c
35177@@ -134,11 +134,8 @@ static void do_fpu_end(void)
35178 static void fix_processor_context(void)
35179 {
35180 int cpu = smp_processor_id();
35181- struct tss_struct *t = &per_cpu(init_tss, cpu);
35182-#ifdef CONFIG_X86_64
35183- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35184- tss_desc tss;
35185-#endif
35186+ struct tss_struct *t = init_tss + cpu;
35187+
35188 set_tss_desc(cpu, t); /*
35189 * This just modifies memory; should not be
35190 * necessary. But... This is necessary, because
35191@@ -147,10 +144,6 @@ static void fix_processor_context(void)
35192 */
35193
35194 #ifdef CONFIG_X86_64
35195- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35196- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35197- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35198-
35199 syscall_init(); /* This sets MSR_*STAR and related */
35200 #endif
35201 load_TR_desc(); /* This does ltr */
35202diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35203index 0b7a63d..0d0f2c2 100644
35204--- a/arch/x86/realmode/init.c
35205+++ b/arch/x86/realmode/init.c
35206@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35207 __va(real_mode_header->trampoline_header);
35208
35209 #ifdef CONFIG_X86_32
35210- trampoline_header->start = __pa_symbol(startup_32_smp);
35211+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35212+
35213+#ifdef CONFIG_PAX_KERNEXEC
35214+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35215+#endif
35216+
35217+ trampoline_header->boot_cs = __BOOT_CS;
35218 trampoline_header->gdt_limit = __BOOT_DS + 7;
35219 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35220 #else
35221@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35222 *trampoline_cr4_features = __read_cr4();
35223
35224 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35225- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35226+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35227 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35228 #endif
35229 }
35230diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35231index 2730d77..2e4cd19 100644
35232--- a/arch/x86/realmode/rm/Makefile
35233+++ b/arch/x86/realmode/rm/Makefile
35234@@ -68,5 +68,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35235
35236 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35237 -I$(srctree)/arch/x86/boot
35238+ifdef CONSTIFY_PLUGIN
35239+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35240+endif
35241 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35242 GCOV_PROFILE := n
35243diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35244index a28221d..93c40f1 100644
35245--- a/arch/x86/realmode/rm/header.S
35246+++ b/arch/x86/realmode/rm/header.S
35247@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35248 #endif
35249 /* APM/BIOS reboot */
35250 .long pa_machine_real_restart_asm
35251-#ifdef CONFIG_X86_64
35252+#ifdef CONFIG_X86_32
35253+ .long __KERNEL_CS
35254+#else
35255 .long __KERNEL32_CS
35256 #endif
35257 END(real_mode_header)
35258diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
35259index d66c607..3def845 100644
35260--- a/arch/x86/realmode/rm/reboot.S
35261+++ b/arch/x86/realmode/rm/reboot.S
35262@@ -27,6 +27,10 @@ ENTRY(machine_real_restart_asm)
35263 lgdtl pa_tr_gdt
35264
35265 /* Disable paging to drop us out of long mode */
35266+ movl %cr4, %eax
35267+ andl $~X86_CR4_PCIDE, %eax
35268+ movl %eax, %cr4
35269+
35270 movl %cr0, %eax
35271 andl $~X86_CR0_PG, %eax
35272 movl %eax, %cr0
35273diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35274index 48ddd76..c26749f 100644
35275--- a/arch/x86/realmode/rm/trampoline_32.S
35276+++ b/arch/x86/realmode/rm/trampoline_32.S
35277@@ -24,6 +24,12 @@
35278 #include <asm/page_types.h>
35279 #include "realmode.h"
35280
35281+#ifdef CONFIG_PAX_KERNEXEC
35282+#define ta(X) (X)
35283+#else
35284+#define ta(X) (pa_ ## X)
35285+#endif
35286+
35287 .text
35288 .code16
35289
35290@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35291
35292 cli # We should be safe anyway
35293
35294- movl tr_start, %eax # where we need to go
35295-
35296 movl $0xA5A5A5A5, trampoline_status
35297 # write marker for master knows we're running
35298
35299@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35300 movw $1, %dx # protected mode (PE) bit
35301 lmsw %dx # into protected mode
35302
35303- ljmpl $__BOOT_CS, $pa_startup_32
35304+ ljmpl *(trampoline_header)
35305
35306 .section ".text32","ax"
35307 .code32
35308@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35309 .balign 8
35310 GLOBAL(trampoline_header)
35311 tr_start: .space 4
35312- tr_gdt_pad: .space 2
35313+ tr_boot_cs: .space 2
35314 tr_gdt: .space 6
35315 END(trampoline_header)
35316
35317diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35318index dac7b20..72dbaca 100644
35319--- a/arch/x86/realmode/rm/trampoline_64.S
35320+++ b/arch/x86/realmode/rm/trampoline_64.S
35321@@ -93,6 +93,7 @@ ENTRY(startup_32)
35322 movl %edx, %gs
35323
35324 movl pa_tr_cr4, %eax
35325+ andl $~X86_CR4_PCIDE, %eax
35326 movl %eax, %cr4 # Enable PAE mode
35327
35328 # Setup trampoline 4 level pagetables
35329@@ -106,7 +107,7 @@ ENTRY(startup_32)
35330 wrmsr
35331
35332 # Enable paging and in turn activate Long Mode
35333- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35334+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35335 movl %eax, %cr0
35336
35337 /*
35338diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35339index 9e7e147..25a4158 100644
35340--- a/arch/x86/realmode/rm/wakeup_asm.S
35341+++ b/arch/x86/realmode/rm/wakeup_asm.S
35342@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35343 lgdtl pmode_gdt
35344
35345 /* This really couldn't... */
35346- movl pmode_entry, %eax
35347 movl pmode_cr0, %ecx
35348 movl %ecx, %cr0
35349- ljmpl $__KERNEL_CS, $pa_startup_32
35350- /* -> jmp *%eax in trampoline_32.S */
35351+
35352+ ljmpl *pmode_entry
35353 #else
35354 jmp trampoline_start
35355 #endif
35356diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35357index 604a37e..e49702a 100644
35358--- a/arch/x86/tools/Makefile
35359+++ b/arch/x86/tools/Makefile
35360@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35361
35362 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35363
35364-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35365+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35366 hostprogs-y += relocs
35367 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35368 PHONY += relocs
35369diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35370index 0c2fae8..88036b7 100644
35371--- a/arch/x86/tools/relocs.c
35372+++ b/arch/x86/tools/relocs.c
35373@@ -1,5 +1,7 @@
35374 /* This is included from relocs_32/64.c */
35375
35376+#include "../../../include/generated/autoconf.h"
35377+
35378 #define ElfW(type) _ElfW(ELF_BITS, type)
35379 #define _ElfW(bits, type) __ElfW(bits, type)
35380 #define __ElfW(bits, type) Elf##bits##_##type
35381@@ -11,6 +13,7 @@
35382 #define Elf_Sym ElfW(Sym)
35383
35384 static Elf_Ehdr ehdr;
35385+static Elf_Phdr *phdr;
35386
35387 struct relocs {
35388 uint32_t *offset;
35389@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35390 }
35391 }
35392
35393+static void read_phdrs(FILE *fp)
35394+{
35395+ unsigned int i;
35396+
35397+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35398+ if (!phdr) {
35399+ die("Unable to allocate %d program headers\n",
35400+ ehdr.e_phnum);
35401+ }
35402+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35403+ die("Seek to %d failed: %s\n",
35404+ ehdr.e_phoff, strerror(errno));
35405+ }
35406+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35407+ die("Cannot read ELF program headers: %s\n",
35408+ strerror(errno));
35409+ }
35410+ for(i = 0; i < ehdr.e_phnum; i++) {
35411+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35412+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35413+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35414+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35415+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35416+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35417+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35418+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35419+ }
35420+
35421+}
35422+
35423 static void read_shdrs(FILE *fp)
35424 {
35425- int i;
35426+ unsigned int i;
35427 Elf_Shdr shdr;
35428
35429 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35430@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35431
35432 static void read_strtabs(FILE *fp)
35433 {
35434- int i;
35435+ unsigned int i;
35436 for (i = 0; i < ehdr.e_shnum; i++) {
35437 struct section *sec = &secs[i];
35438 if (sec->shdr.sh_type != SHT_STRTAB) {
35439@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35440
35441 static void read_symtabs(FILE *fp)
35442 {
35443- int i,j;
35444+ unsigned int i,j;
35445 for (i = 0; i < ehdr.e_shnum; i++) {
35446 struct section *sec = &secs[i];
35447 if (sec->shdr.sh_type != SHT_SYMTAB) {
35448@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35449 }
35450
35451
35452-static void read_relocs(FILE *fp)
35453+static void read_relocs(FILE *fp, int use_real_mode)
35454 {
35455- int i,j;
35456+ unsigned int i,j;
35457+ uint32_t base;
35458+
35459 for (i = 0; i < ehdr.e_shnum; i++) {
35460 struct section *sec = &secs[i];
35461 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35462@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35463 die("Cannot read symbol table: %s\n",
35464 strerror(errno));
35465 }
35466+ base = 0;
35467+
35468+#ifdef CONFIG_X86_32
35469+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35470+ if (phdr[j].p_type != PT_LOAD )
35471+ continue;
35472+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35473+ continue;
35474+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35475+ break;
35476+ }
35477+#endif
35478+
35479 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35480 Elf_Rel *rel = &sec->reltab[j];
35481- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35482+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35483 rel->r_info = elf_xword_to_cpu(rel->r_info);
35484 #if (SHT_REL_TYPE == SHT_RELA)
35485 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35486@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35487
35488 static void print_absolute_symbols(void)
35489 {
35490- int i;
35491+ unsigned int i;
35492 const char *format;
35493
35494 if (ELF_BITS == 64)
35495@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35496 for (i = 0; i < ehdr.e_shnum; i++) {
35497 struct section *sec = &secs[i];
35498 char *sym_strtab;
35499- int j;
35500+ unsigned int j;
35501
35502 if (sec->shdr.sh_type != SHT_SYMTAB) {
35503 continue;
35504@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35505
35506 static void print_absolute_relocs(void)
35507 {
35508- int i, printed = 0;
35509+ unsigned int i, printed = 0;
35510 const char *format;
35511
35512 if (ELF_BITS == 64)
35513@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35514 struct section *sec_applies, *sec_symtab;
35515 char *sym_strtab;
35516 Elf_Sym *sh_symtab;
35517- int j;
35518+ unsigned int j;
35519 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35520 continue;
35521 }
35522@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35523 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35524 Elf_Sym *sym, const char *symname))
35525 {
35526- int i;
35527+ unsigned int i;
35528 /* Walk through the relocations */
35529 for (i = 0; i < ehdr.e_shnum; i++) {
35530 char *sym_strtab;
35531 Elf_Sym *sh_symtab;
35532 struct section *sec_applies, *sec_symtab;
35533- int j;
35534+ unsigned int j;
35535 struct section *sec = &secs[i];
35536
35537 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35538@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35539 {
35540 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35541 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35542+ char *sym_strtab = sec->link->link->strtab;
35543+
35544+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35545+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35546+ return 0;
35547+
35548+#ifdef CONFIG_PAX_KERNEXEC
35549+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35550+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35551+ return 0;
35552+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35553+ return 0;
35554+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35555+ return 0;
35556+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35557+ return 0;
35558+#endif
35559
35560 switch (r_type) {
35561 case R_386_NONE:
35562@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35563
35564 static void emit_relocs(int as_text, int use_real_mode)
35565 {
35566- int i;
35567+ unsigned int i;
35568 int (*write_reloc)(uint32_t, FILE *) = write32;
35569 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35570 const char *symname);
35571@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35572 {
35573 regex_init(use_real_mode);
35574 read_ehdr(fp);
35575+ read_phdrs(fp);
35576 read_shdrs(fp);
35577 read_strtabs(fp);
35578 read_symtabs(fp);
35579- read_relocs(fp);
35580+ read_relocs(fp, use_real_mode);
35581 if (ELF_BITS == 64)
35582 percpu_init();
35583 if (show_absolute_syms) {
35584diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35585index f40281e..92728c9 100644
35586--- a/arch/x86/um/mem_32.c
35587+++ b/arch/x86/um/mem_32.c
35588@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35589 gate_vma.vm_start = FIXADDR_USER_START;
35590 gate_vma.vm_end = FIXADDR_USER_END;
35591 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35592- gate_vma.vm_page_prot = __P101;
35593+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35594
35595 return 0;
35596 }
35597diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35598index 80ffa5b..a33bd15 100644
35599--- a/arch/x86/um/tls_32.c
35600+++ b/arch/x86/um/tls_32.c
35601@@ -260,7 +260,7 @@ out:
35602 if (unlikely(task == current &&
35603 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35604 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35605- "without flushed TLS.", current->pid);
35606+ "without flushed TLS.", task_pid_nr(current));
35607 }
35608
35609 return 0;
35610diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35611index 7b9be98..39bb57f 100644
35612--- a/arch/x86/vdso/Makefile
35613+++ b/arch/x86/vdso/Makefile
35614@@ -175,7 +175,7 @@ quiet_cmd_vdso = VDSO $@
35615 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35616 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35617
35618-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35619+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35620 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35621 GCOV_PROFILE := n
35622
35623diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35624index 0224987..c7d65a5 100644
35625--- a/arch/x86/vdso/vdso2c.h
35626+++ b/arch/x86/vdso/vdso2c.h
35627@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35628 unsigned long load_size = -1; /* Work around bogus warning */
35629 unsigned long mapping_size;
35630 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35631- int i;
35632+ unsigned int i;
35633 unsigned long j;
35634 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35635 *alt_sec = NULL;
35636diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35637index e904c27..b9eaa03 100644
35638--- a/arch/x86/vdso/vdso32-setup.c
35639+++ b/arch/x86/vdso/vdso32-setup.c
35640@@ -14,6 +14,7 @@
35641 #include <asm/cpufeature.h>
35642 #include <asm/processor.h>
35643 #include <asm/vdso.h>
35644+#include <asm/mman.h>
35645
35646 #ifdef CONFIG_COMPAT_VDSO
35647 #define VDSO_DEFAULT 0
35648diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35649index 1c9f750..cfddb1a 100644
35650--- a/arch/x86/vdso/vma.c
35651+++ b/arch/x86/vdso/vma.c
35652@@ -19,10 +19,7 @@
35653 #include <asm/page.h>
35654 #include <asm/hpet.h>
35655 #include <asm/desc.h>
35656-
35657-#if defined(CONFIG_X86_64)
35658-unsigned int __read_mostly vdso64_enabled = 1;
35659-#endif
35660+#include <asm/mman.h>
35661
35662 void __init init_vdso_image(const struct vdso_image *image)
35663 {
35664@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35665 .pages = no_pages,
35666 };
35667
35668+#ifdef CONFIG_PAX_RANDMMAP
35669+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35670+ calculate_addr = false;
35671+#endif
35672+
35673 if (calculate_addr) {
35674 addr = vdso_addr(current->mm->start_stack,
35675 image->size - image->sym_vvar_start);
35676@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35677 down_write(&mm->mmap_sem);
35678
35679 addr = get_unmapped_area(NULL, addr,
35680- image->size - image->sym_vvar_start, 0, 0);
35681+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35682 if (IS_ERR_VALUE(addr)) {
35683 ret = addr;
35684 goto up_fail;
35685 }
35686
35687 text_start = addr - image->sym_vvar_start;
35688- current->mm->context.vdso = (void __user *)text_start;
35689+ mm->context.vdso = text_start;
35690
35691 /*
35692 * MAYWRITE to allow gdb to COW and set breakpoints
35693@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35694 hpet_address >> PAGE_SHIFT,
35695 PAGE_SIZE,
35696 pgprot_noncached(PAGE_READONLY));
35697-
35698- if (ret)
35699- goto up_fail;
35700 }
35701 #endif
35702
35703 up_fail:
35704 if (ret)
35705- current->mm->context.vdso = NULL;
35706+ current->mm->context.vdso = 0;
35707
35708 up_write(&mm->mmap_sem);
35709 return ret;
35710@@ -191,8 +190,8 @@ static int load_vdso32(void)
35711
35712 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35713 current_thread_info()->sysenter_return =
35714- current->mm->context.vdso +
35715- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35716+ (void __force_user *)(current->mm->context.vdso +
35717+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35718
35719 return 0;
35720 }
35721@@ -201,9 +200,6 @@ static int load_vdso32(void)
35722 #ifdef CONFIG_X86_64
35723 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35724 {
35725- if (!vdso64_enabled)
35726- return 0;
35727-
35728 return map_vdso(&vdso_image_64, true);
35729 }
35730
35731@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35732 int uses_interp)
35733 {
35734 #ifdef CONFIG_X86_X32_ABI
35735- if (test_thread_flag(TIF_X32)) {
35736- if (!vdso64_enabled)
35737- return 0;
35738-
35739+ if (test_thread_flag(TIF_X32))
35740 return map_vdso(&vdso_image_x32, true);
35741- }
35742 #endif
35743
35744 return load_vdso32();
35745@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35746 #endif
35747
35748 #ifdef CONFIG_X86_64
35749-static __init int vdso_setup(char *s)
35750-{
35751- vdso64_enabled = simple_strtoul(s, NULL, 0);
35752- return 0;
35753-}
35754-__setup("vdso=", vdso_setup);
35755-#endif
35756-
35757-#ifdef CONFIG_X86_64
35758 static void vgetcpu_cpu_init(void *arg)
35759 {
35760 int cpu = smp_processor_id();
35761diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35762index e88fda8..76ce7ce 100644
35763--- a/arch/x86/xen/Kconfig
35764+++ b/arch/x86/xen/Kconfig
35765@@ -9,6 +9,7 @@ config XEN
35766 select XEN_HAVE_PVMMU
35767 depends on X86_64 || (X86_32 && X86_PAE)
35768 depends on X86_TSC
35769+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35770 help
35771 This is the Linux Xen port. Enabling this will allow the
35772 kernel to boot in a paravirtualized environment under the
35773diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35774index 5240f56..0c12163 100644
35775--- a/arch/x86/xen/enlighten.c
35776+++ b/arch/x86/xen/enlighten.c
35777@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35778
35779 struct shared_info xen_dummy_shared_info;
35780
35781-void *xen_initial_gdt;
35782-
35783 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35784 __read_mostly int xen_have_vector_callback;
35785 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35786@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35787 {
35788 unsigned long va = dtr->address;
35789 unsigned int size = dtr->size + 1;
35790- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35791- unsigned long frames[pages];
35792+ unsigned long frames[65536 / PAGE_SIZE];
35793 int f;
35794
35795 /*
35796@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35797 {
35798 unsigned long va = dtr->address;
35799 unsigned int size = dtr->size + 1;
35800- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35801- unsigned long frames[pages];
35802+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35803 int f;
35804
35805 /*
35806@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35807 * 8-byte entries, or 16 4k pages..
35808 */
35809
35810- BUG_ON(size > 65536);
35811+ BUG_ON(size > GDT_SIZE);
35812 BUG_ON(va & ~PAGE_MASK);
35813
35814 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35815@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35816 return 0;
35817 }
35818
35819-static void set_xen_basic_apic_ops(void)
35820+static void __init set_xen_basic_apic_ops(void)
35821 {
35822 apic->read = xen_apic_read;
35823 apic->write = xen_apic_write;
35824@@ -1308,30 +1304,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35825 #endif
35826 };
35827
35828-static void xen_reboot(int reason)
35829+static __noreturn void xen_reboot(int reason)
35830 {
35831 struct sched_shutdown r = { .reason = reason };
35832
35833- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35834- BUG();
35835+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35836+ BUG();
35837 }
35838
35839-static void xen_restart(char *msg)
35840+static __noreturn void xen_restart(char *msg)
35841 {
35842 xen_reboot(SHUTDOWN_reboot);
35843 }
35844
35845-static void xen_emergency_restart(void)
35846+static __noreturn void xen_emergency_restart(void)
35847 {
35848 xen_reboot(SHUTDOWN_reboot);
35849 }
35850
35851-static void xen_machine_halt(void)
35852+static __noreturn void xen_machine_halt(void)
35853 {
35854 xen_reboot(SHUTDOWN_poweroff);
35855 }
35856
35857-static void xen_machine_power_off(void)
35858+static __noreturn void xen_machine_power_off(void)
35859 {
35860 if (pm_power_off)
35861 pm_power_off();
35862@@ -1484,8 +1480,11 @@ static void __ref xen_setup_gdt(int cpu)
35863 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35864 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35865
35866- setup_stack_canary_segment(0);
35867- switch_to_new_gdt(0);
35868+ setup_stack_canary_segment(cpu);
35869+#ifdef CONFIG_X86_64
35870+ load_percpu_segment(cpu);
35871+#endif
35872+ switch_to_new_gdt(cpu);
35873
35874 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35875 pv_cpu_ops.load_gdt = xen_load_gdt;
35876@@ -1600,7 +1599,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35877 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35878
35879 /* Work out if we support NX */
35880- x86_configure_nx();
35881+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35882+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35883+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35884+ unsigned l, h;
35885+
35886+ __supported_pte_mask |= _PAGE_NX;
35887+ rdmsr(MSR_EFER, l, h);
35888+ l |= EFER_NX;
35889+ wrmsr(MSR_EFER, l, h);
35890+ }
35891+#endif
35892
35893 /* Get mfn list */
35894 xen_build_dynamic_phys_to_machine();
35895@@ -1628,13 +1637,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35896
35897 machine_ops = xen_machine_ops;
35898
35899- /*
35900- * The only reliable way to retain the initial address of the
35901- * percpu gdt_page is to remember it here, so we can go and
35902- * mark it RW later, when the initial percpu area is freed.
35903- */
35904- xen_initial_gdt = &per_cpu(gdt_page, 0);
35905-
35906 xen_smp_init();
35907
35908 #ifdef CONFIG_ACPI_NUMA
35909diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35910index adca9e2..35d6a98 100644
35911--- a/arch/x86/xen/mmu.c
35912+++ b/arch/x86/xen/mmu.c
35913@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35914 return val;
35915 }
35916
35917-static pteval_t pte_pfn_to_mfn(pteval_t val)
35918+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35919 {
35920 if (val & _PAGE_PRESENT) {
35921 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35922@@ -1835,7 +1835,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35923 * L3_k[511] -> level2_fixmap_pgt */
35924 convert_pfn_mfn(level3_kernel_pgt);
35925
35926+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35927+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35928+ convert_pfn_mfn(level3_vmemmap_pgt);
35929 /* L3_k[511][506] -> level1_fixmap_pgt */
35930+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35931 convert_pfn_mfn(level2_fixmap_pgt);
35932 }
35933 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35934@@ -1860,11 +1864,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35935 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35936 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35937 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35938+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35939+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35940+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35941 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35942 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35943+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35944 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35945 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35946 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
35947+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
35948
35949 /* Pin down new L4 */
35950 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
35951@@ -2048,6 +2057,7 @@ static void __init xen_post_allocator_init(void)
35952 pv_mmu_ops.set_pud = xen_set_pud;
35953 #if PAGETABLE_LEVELS == 4
35954 pv_mmu_ops.set_pgd = xen_set_pgd;
35955+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35956 #endif
35957
35958 /* This will work as long as patching hasn't happened yet
35959@@ -2126,6 +2136,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35960 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35961 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35962 .set_pgd = xen_set_pgd_hyper,
35963+ .set_pgd_batched = xen_set_pgd_hyper,
35964
35965 .alloc_pud = xen_alloc_pmd_init,
35966 .release_pud = xen_release_pmd_init,
35967diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35968index 08e8489..b1e182f 100644
35969--- a/arch/x86/xen/smp.c
35970+++ b/arch/x86/xen/smp.c
35971@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35972
35973 if (xen_pv_domain()) {
35974 if (!xen_feature(XENFEAT_writable_page_tables))
35975- /* We've switched to the "real" per-cpu gdt, so make
35976- * sure the old memory can be recycled. */
35977- make_lowmem_page_readwrite(xen_initial_gdt);
35978-
35979 #ifdef CONFIG_X86_32
35980 /*
35981 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35982 * expects __USER_DS
35983 */
35984- loadsegment(ds, __USER_DS);
35985- loadsegment(es, __USER_DS);
35986+ loadsegment(ds, __KERNEL_DS);
35987+ loadsegment(es, __KERNEL_DS);
35988 #endif
35989
35990 xen_filter_cpu_maps();
35991@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35992 #ifdef CONFIG_X86_32
35993 /* Note: PVH is not yet supported on x86_32. */
35994 ctxt->user_regs.fs = __KERNEL_PERCPU;
35995- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
35996+ savesegment(gs, ctxt->user_regs.gs);
35997 #endif
35998 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
35999
36000@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36001 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
36002 ctxt->flags = VGCF_IN_KERNEL;
36003 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
36004- ctxt->user_regs.ds = __USER_DS;
36005- ctxt->user_regs.es = __USER_DS;
36006+ ctxt->user_regs.ds = __KERNEL_DS;
36007+ ctxt->user_regs.es = __KERNEL_DS;
36008 ctxt->user_regs.ss = __KERNEL_DS;
36009
36010 xen_copy_trap_info(ctxt->trap_ctxt);
36011@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
36012 int rc;
36013
36014 per_cpu(current_task, cpu) = idle;
36015+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
36016 #ifdef CONFIG_X86_32
36017 irq_ctx_init(cpu);
36018 #else
36019 clear_tsk_thread_flag(idle, TIF_FORK);
36020 #endif
36021- per_cpu(kernel_stack, cpu) =
36022- (unsigned long)task_stack_page(idle) -
36023- KERNEL_STACK_OFFSET + THREAD_SIZE;
36024+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
36025
36026 xen_setup_runstate_info(cpu);
36027 xen_setup_timer(cpu);
36028@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
36029
36030 void __init xen_smp_init(void)
36031 {
36032- smp_ops = xen_smp_ops;
36033+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
36034 xen_fill_possible_map();
36035 }
36036
36037diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
36038index fd92a64..1f72641 100644
36039--- a/arch/x86/xen/xen-asm_32.S
36040+++ b/arch/x86/xen/xen-asm_32.S
36041@@ -99,7 +99,7 @@ ENTRY(xen_iret)
36042 pushw %fs
36043 movl $(__KERNEL_PERCPU), %eax
36044 movl %eax, %fs
36045- movl %fs:xen_vcpu, %eax
36046+ mov PER_CPU_VAR(xen_vcpu), %eax
36047 POP_FS
36048 #else
36049 movl %ss:xen_vcpu, %eax
36050diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
36051index 674b2225..f1f5dc1 100644
36052--- a/arch/x86/xen/xen-head.S
36053+++ b/arch/x86/xen/xen-head.S
36054@@ -39,6 +39,17 @@ ENTRY(startup_xen)
36055 #ifdef CONFIG_X86_32
36056 mov %esi,xen_start_info
36057 mov $init_thread_union+THREAD_SIZE,%esp
36058+#ifdef CONFIG_SMP
36059+ movl $cpu_gdt_table,%edi
36060+ movl $__per_cpu_load,%eax
36061+ movw %ax,__KERNEL_PERCPU + 2(%edi)
36062+ rorl $16,%eax
36063+ movb %al,__KERNEL_PERCPU + 4(%edi)
36064+ movb %ah,__KERNEL_PERCPU + 7(%edi)
36065+ movl $__per_cpu_end - 1,%eax
36066+ subl $__per_cpu_start,%eax
36067+ movw %ax,__KERNEL_PERCPU + 0(%edi)
36068+#endif
36069 #else
36070 mov %rsi,xen_start_info
36071 mov $init_thread_union+THREAD_SIZE,%rsp
36072diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
36073index 9e195c6..523ed36 100644
36074--- a/arch/x86/xen/xen-ops.h
36075+++ b/arch/x86/xen/xen-ops.h
36076@@ -16,8 +16,6 @@ void xen_syscall_target(void);
36077 void xen_syscall32_target(void);
36078 #endif
36079
36080-extern void *xen_initial_gdt;
36081-
36082 struct trap_info;
36083 void xen_copy_trap_info(struct trap_info *traps);
36084
36085diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
36086index 525bd3d..ef888b1 100644
36087--- a/arch/xtensa/variants/dc232b/include/variant/core.h
36088+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
36089@@ -119,9 +119,9 @@
36090 ----------------------------------------------------------------------*/
36091
36092 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
36093-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
36094 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
36095 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
36096+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36097
36098 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
36099 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
36100diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
36101index 2f33760..835e50a 100644
36102--- a/arch/xtensa/variants/fsf/include/variant/core.h
36103+++ b/arch/xtensa/variants/fsf/include/variant/core.h
36104@@ -11,6 +11,7 @@
36105 #ifndef _XTENSA_CORE_H
36106 #define _XTENSA_CORE_H
36107
36108+#include <linux/const.h>
36109
36110 /****************************************************************************
36111 Parameters Useful for Any Code, USER or PRIVILEGED
36112@@ -112,9 +113,9 @@
36113 ----------------------------------------------------------------------*/
36114
36115 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36116-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36117 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36118 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36119+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36120
36121 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36122 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36123diff --git a/block/bio.c b/block/bio.c
36124index f66a4ea..73ddf55 100644
36125--- a/block/bio.c
36126+++ b/block/bio.c
36127@@ -1172,7 +1172,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36128 /*
36129 * Overflow, abort
36130 */
36131- if (end < start)
36132+ if (end < start || end - start > INT_MAX - nr_pages)
36133 return ERR_PTR(-EINVAL);
36134
36135 nr_pages += end - start;
36136@@ -1297,7 +1297,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
36137 /*
36138 * Overflow, abort
36139 */
36140- if (end < start)
36141+ if (end < start || end - start > INT_MAX - nr_pages)
36142 return ERR_PTR(-EINVAL);
36143
36144 nr_pages += end - start;
36145diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36146index 0736729..2ec3b48 100644
36147--- a/block/blk-iopoll.c
36148+++ b/block/blk-iopoll.c
36149@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36150 }
36151 EXPORT_SYMBOL(blk_iopoll_complete);
36152
36153-static void blk_iopoll_softirq(struct softirq_action *h)
36154+static __latent_entropy void blk_iopoll_softirq(void)
36155 {
36156 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36157 int rearm = 0, budget = blk_iopoll_budget;
36158diff --git a/block/blk-map.c b/block/blk-map.c
36159index b8d2725..08c52b0 100644
36160--- a/block/blk-map.c
36161+++ b/block/blk-map.c
36162@@ -192,7 +192,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36163 if (!len || !kbuf)
36164 return -EINVAL;
36165
36166- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36167+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36168 if (do_copy)
36169 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36170 else
36171diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36172index 53b1737..08177d2e 100644
36173--- a/block/blk-softirq.c
36174+++ b/block/blk-softirq.c
36175@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36176 * Softirq action handler - move entries to local list and loop over them
36177 * while passing them to the queue registered handler.
36178 */
36179-static void blk_done_softirq(struct softirq_action *h)
36180+static __latent_entropy void blk_done_softirq(void)
36181 {
36182 struct list_head *cpu_list, local_list;
36183
36184diff --git a/block/bsg.c b/block/bsg.c
36185index d214e92..9649863 100644
36186--- a/block/bsg.c
36187+++ b/block/bsg.c
36188@@ -140,16 +140,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36189 struct sg_io_v4 *hdr, struct bsg_device *bd,
36190 fmode_t has_write_perm)
36191 {
36192+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36193+ unsigned char *cmdptr;
36194+
36195 if (hdr->request_len > BLK_MAX_CDB) {
36196 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36197 if (!rq->cmd)
36198 return -ENOMEM;
36199- }
36200+ cmdptr = rq->cmd;
36201+ } else
36202+ cmdptr = tmpcmd;
36203
36204- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36205+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36206 hdr->request_len))
36207 return -EFAULT;
36208
36209+ if (cmdptr != rq->cmd)
36210+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36211+
36212 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36213 if (blk_verify_command(rq->cmd, has_write_perm))
36214 return -EPERM;
36215diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36216index f678c73..f35aa18 100644
36217--- a/block/compat_ioctl.c
36218+++ b/block/compat_ioctl.c
36219@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36220 cgc = compat_alloc_user_space(sizeof(*cgc));
36221 cgc32 = compat_ptr(arg);
36222
36223- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36224+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36225 get_user(data, &cgc32->buffer) ||
36226 put_user(compat_ptr(data), &cgc->buffer) ||
36227 copy_in_user(&cgc->buflen, &cgc32->buflen,
36228@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36229 err |= __get_user(f->spec1, &uf->spec1);
36230 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36231 err |= __get_user(name, &uf->name);
36232- f->name = compat_ptr(name);
36233+ f->name = (void __force_kernel *)compat_ptr(name);
36234 if (err) {
36235 err = -EFAULT;
36236 goto out;
36237diff --git a/block/genhd.c b/block/genhd.c
36238index 0a536dc..b8f7aca 100644
36239--- a/block/genhd.c
36240+++ b/block/genhd.c
36241@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36242
36243 /*
36244 * Register device numbers dev..(dev+range-1)
36245- * range must be nonzero
36246+ * Noop if @range is zero.
36247 * The hash chain is sorted on range, so that subranges can override.
36248 */
36249 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36250 struct kobject *(*probe)(dev_t, int *, void *),
36251 int (*lock)(dev_t, void *), void *data)
36252 {
36253- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36254+ if (range)
36255+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36256 }
36257
36258 EXPORT_SYMBOL(blk_register_region);
36259
36260+/* undo blk_register_region(), noop if @range is zero */
36261 void blk_unregister_region(dev_t devt, unsigned long range)
36262 {
36263- kobj_unmap(bdev_map, devt, range);
36264+ if (range)
36265+ kobj_unmap(bdev_map, devt, range);
36266 }
36267
36268 EXPORT_SYMBOL(blk_unregister_region);
36269diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36270index 26cb624..a49c3a5 100644
36271--- a/block/partitions/efi.c
36272+++ b/block/partitions/efi.c
36273@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36274 if (!gpt)
36275 return NULL;
36276
36277+ if (!le32_to_cpu(gpt->num_partition_entries))
36278+ return NULL;
36279+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36280+ if (!pte)
36281+ return NULL;
36282+
36283 count = le32_to_cpu(gpt->num_partition_entries) *
36284 le32_to_cpu(gpt->sizeof_partition_entry);
36285- if (!count)
36286- return NULL;
36287- pte = kmalloc(count, GFP_KERNEL);
36288- if (!pte)
36289- return NULL;
36290-
36291 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36292 (u8 *) pte, count) < count) {
36293 kfree(pte);
36294diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36295index e1f71c3..02d295a 100644
36296--- a/block/scsi_ioctl.c
36297+++ b/block/scsi_ioctl.c
36298@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36299 return put_user(0, p);
36300 }
36301
36302-static int sg_get_timeout(struct request_queue *q)
36303+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36304 {
36305 return jiffies_to_clock_t(q->sg_timeout);
36306 }
36307@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36308 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36309 struct sg_io_hdr *hdr, fmode_t mode)
36310 {
36311- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36312+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36313+ unsigned char *cmdptr;
36314+
36315+ if (rq->cmd != rq->__cmd)
36316+ cmdptr = rq->cmd;
36317+ else
36318+ cmdptr = tmpcmd;
36319+
36320+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36321 return -EFAULT;
36322+
36323+ if (cmdptr != rq->cmd)
36324+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36325+
36326 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36327 return -EPERM;
36328
36329@@ -422,6 +434,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36330 int err;
36331 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36332 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36333+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36334+ unsigned char *cmdptr;
36335
36336 if (!sic)
36337 return -EINVAL;
36338@@ -460,9 +474,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36339 */
36340 err = -EFAULT;
36341 rq->cmd_len = cmdlen;
36342- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36343+
36344+ if (rq->cmd != rq->__cmd)
36345+ cmdptr = rq->cmd;
36346+ else
36347+ cmdptr = tmpcmd;
36348+
36349+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36350 goto error;
36351
36352+ if (rq->cmd != cmdptr)
36353+ memcpy(rq->cmd, cmdptr, cmdlen);
36354+
36355 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36356 goto error;
36357
36358diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36359index 650afac1..f3307de 100644
36360--- a/crypto/cryptd.c
36361+++ b/crypto/cryptd.c
36362@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36363
36364 struct cryptd_blkcipher_request_ctx {
36365 crypto_completion_t complete;
36366-};
36367+} __no_const;
36368
36369 struct cryptd_hash_ctx {
36370 struct crypto_shash *child;
36371@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36372
36373 struct cryptd_aead_request_ctx {
36374 crypto_completion_t complete;
36375-};
36376+} __no_const;
36377
36378 static void cryptd_queue_worker(struct work_struct *work);
36379
36380diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36381index c305d41..a96de79 100644
36382--- a/crypto/pcrypt.c
36383+++ b/crypto/pcrypt.c
36384@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36385 int ret;
36386
36387 pinst->kobj.kset = pcrypt_kset;
36388- ret = kobject_add(&pinst->kobj, NULL, name);
36389+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36390 if (!ret)
36391 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36392
36393diff --git a/crypto/zlib.c b/crypto/zlib.c
36394index 0eefa9d..0fa3d29 100644
36395--- a/crypto/zlib.c
36396+++ b/crypto/zlib.c
36397@@ -95,10 +95,10 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
36398 zlib_comp_exit(ctx);
36399
36400 window_bits = tb[ZLIB_COMP_WINDOWBITS]
36401- ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
36402+ ? nla_get_s32(tb[ZLIB_COMP_WINDOWBITS])
36403 : MAX_WBITS;
36404 mem_level = tb[ZLIB_COMP_MEMLEVEL]
36405- ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
36406+ ? nla_get_s32(tb[ZLIB_COMP_MEMLEVEL])
36407 : DEF_MEM_LEVEL;
36408
36409 workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
36410diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36411index 3b37676..898edfa 100644
36412--- a/drivers/acpi/acpica/hwxfsleep.c
36413+++ b/drivers/acpi/acpica/hwxfsleep.c
36414@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36415 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36416
36417 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36418- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36419- acpi_hw_extended_sleep},
36420- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36421- acpi_hw_extended_wake_prep},
36422- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36423+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36424+ .extended_function = acpi_hw_extended_sleep},
36425+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36426+ .extended_function = acpi_hw_extended_wake_prep},
36427+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36428+ .extended_function = acpi_hw_extended_wake}
36429 };
36430
36431 /*
36432diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36433index 16129c7..8b675cd 100644
36434--- a/drivers/acpi/apei/apei-internal.h
36435+++ b/drivers/acpi/apei/apei-internal.h
36436@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36437 struct apei_exec_ins_type {
36438 u32 flags;
36439 apei_exec_ins_func_t run;
36440-};
36441+} __do_const;
36442
36443 struct apei_exec_context {
36444 u32 ip;
36445diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36446index e82d097..0c855c1 100644
36447--- a/drivers/acpi/apei/ghes.c
36448+++ b/drivers/acpi/apei/ghes.c
36449@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36450 const struct acpi_hest_generic *generic,
36451 const struct acpi_hest_generic_status *estatus)
36452 {
36453- static atomic_t seqno;
36454+ static atomic_unchecked_t seqno;
36455 unsigned int curr_seqno;
36456 char pfx_seq[64];
36457
36458@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36459 else
36460 pfx = KERN_ERR;
36461 }
36462- curr_seqno = atomic_inc_return(&seqno);
36463+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36464 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36465 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36466 pfx_seq, generic->header.source_id);
36467diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36468index a83e3c6..c3d617f 100644
36469--- a/drivers/acpi/bgrt.c
36470+++ b/drivers/acpi/bgrt.c
36471@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36472 if (!bgrt_image)
36473 return -ENODEV;
36474
36475- bin_attr_image.private = bgrt_image;
36476- bin_attr_image.size = bgrt_image_size;
36477+ pax_open_kernel();
36478+ *(void **)&bin_attr_image.private = bgrt_image;
36479+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36480+ pax_close_kernel();
36481
36482 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36483 if (!bgrt_kobj)
36484diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36485index 9b693d5..8953d54 100644
36486--- a/drivers/acpi/blacklist.c
36487+++ b/drivers/acpi/blacklist.c
36488@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36489 u32 is_critical_error;
36490 };
36491
36492-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36493+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36494
36495 /*
36496 * POLICY: If *anything* doesn't work, put it on the blacklist.
36497@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36498 return 0;
36499 }
36500
36501-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36502+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36503 {
36504 .callback = dmi_disable_osi_vista,
36505 .ident = "Fujitsu Siemens",
36506diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
36507index 8b67bd0..b59593e 100644
36508--- a/drivers/acpi/bus.c
36509+++ b/drivers/acpi/bus.c
36510@@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
36511 }
36512 #endif
36513
36514-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36515+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36516 /*
36517 * Invoke DSDT corruption work-around on all Toshiba Satellite.
36518 * https://bugzilla.kernel.org/show_bug.cgi?id=14679
36519@@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36520 {}
36521 };
36522 #else
36523-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36524+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36525 {}
36526 };
36527 #endif
36528diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36529index c68e724..e863008 100644
36530--- a/drivers/acpi/custom_method.c
36531+++ b/drivers/acpi/custom_method.c
36532@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36533 struct acpi_table_header table;
36534 acpi_status status;
36535
36536+#ifdef CONFIG_GRKERNSEC_KMEM
36537+ return -EPERM;
36538+#endif
36539+
36540 if (!(*ppos)) {
36541 /* parse the table header to get the table length */
36542 if (count <= sizeof(struct acpi_table_header))
36543diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36544index 735db11..91e07ff 100644
36545--- a/drivers/acpi/device_pm.c
36546+++ b/drivers/acpi/device_pm.c
36547@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36548
36549 #endif /* CONFIG_PM_SLEEP */
36550
36551+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36552+
36553 static struct dev_pm_domain acpi_general_pm_domain = {
36554 .ops = {
36555 .runtime_suspend = acpi_subsys_runtime_suspend,
36556@@ -1041,6 +1043,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36557 .restore_early = acpi_subsys_resume_early,
36558 #endif
36559 },
36560+ .detach = acpi_dev_pm_detach
36561 };
36562
36563 /**
36564@@ -1110,7 +1113,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36565 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36566 }
36567
36568- dev->pm_domain->detach = acpi_dev_pm_detach;
36569 return 0;
36570 }
36571 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36572diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
36573index a8dd2f7..e15950e 100644
36574--- a/drivers/acpi/ec.c
36575+++ b/drivers/acpi/ec.c
36576@@ -1242,7 +1242,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
36577 return 0;
36578 }
36579
36580-static struct dmi_system_id ec_dmi_table[] __initdata = {
36581+static const struct dmi_system_id ec_dmi_table[] __initconst = {
36582 {
36583 ec_skip_dsdt_scan, "Compal JFL92", {
36584 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
36585diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
36586index 139d9e4..9a9d799 100644
36587--- a/drivers/acpi/pci_slot.c
36588+++ b/drivers/acpi/pci_slot.c
36589@@ -195,7 +195,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d)
36590 return 0;
36591 }
36592
36593-static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
36594+static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = {
36595 /*
36596 * Fujitsu Primequest machines will return 1023 to indicate an
36597 * error if the _SUN method is evaluated on SxFy objects that
36598diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
36599index d9f7158..168e742 100644
36600--- a/drivers/acpi/processor_driver.c
36601+++ b/drivers/acpi/processor_driver.c
36602@@ -159,7 +159,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
36603 return NOTIFY_OK;
36604 }
36605
36606-static struct notifier_block __refdata acpi_cpu_notifier = {
36607+static struct notifier_block __refconst acpi_cpu_notifier = {
36608 .notifier_call = acpi_cpu_soft_notify,
36609 };
36610
36611diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36612index f98db0b..8309c83 100644
36613--- a/drivers/acpi/processor_idle.c
36614+++ b/drivers/acpi/processor_idle.c
36615@@ -912,7 +912,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36616 {
36617 int i, count = CPUIDLE_DRIVER_STATE_START;
36618 struct acpi_processor_cx *cx;
36619- struct cpuidle_state *state;
36620+ cpuidle_state_no_const *state;
36621 struct cpuidle_driver *drv = &acpi_idle_driver;
36622
36623 if (!pr->flags.power_setup_done)
36624diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
36625index e5dd808..1eceed1 100644
36626--- a/drivers/acpi/processor_pdc.c
36627+++ b/drivers/acpi/processor_pdc.c
36628@@ -176,7 +176,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
36629 return 0;
36630 }
36631
36632-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
36633+static const struct dmi_system_id processor_idle_dmi_table[] __initconst = {
36634 {
36635 set_no_mwait, "Extensa 5220", {
36636 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
36637diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
36638index 7f251dd..47b262c 100644
36639--- a/drivers/acpi/sleep.c
36640+++ b/drivers/acpi/sleep.c
36641@@ -148,7 +148,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
36642 return 0;
36643 }
36644
36645-static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
36646+static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
36647 {
36648 .callback = init_old_suspend_ordering,
36649 .ident = "Abit KN9 (nForce4 variant)",
36650diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36651index 13e577c..cef11ee 100644
36652--- a/drivers/acpi/sysfs.c
36653+++ b/drivers/acpi/sysfs.c
36654@@ -423,11 +423,11 @@ static u32 num_counters;
36655 static struct attribute **all_attrs;
36656 static u32 acpi_gpe_count;
36657
36658-static struct attribute_group interrupt_stats_attr_group = {
36659+static attribute_group_no_const interrupt_stats_attr_group = {
36660 .name = "interrupts",
36661 };
36662
36663-static struct kobj_attribute *counter_attrs;
36664+static kobj_attribute_no_const *counter_attrs;
36665
36666 static void delete_gpe_attr_array(void)
36667 {
36668diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
36669index d24fa19..782f1e6 100644
36670--- a/drivers/acpi/thermal.c
36671+++ b/drivers/acpi/thermal.c
36672@@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
36673 return 0;
36674 }
36675
36676-static struct dmi_system_id thermal_dmi_table[] __initdata = {
36677+static const struct dmi_system_id thermal_dmi_table[] __initconst = {
36678 /*
36679 * Award BIOS on this AOpen makes thermal control almost worthless.
36680 * http://bugzilla.kernel.org/show_bug.cgi?id=8842
36681diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
36682index 26eb70c..4d66ddf 100644
36683--- a/drivers/acpi/video.c
36684+++ b/drivers/acpi/video.c
36685@@ -418,7 +418,7 @@ static int __init video_disable_native_backlight(const struct dmi_system_id *d)
36686 return 0;
36687 }
36688
36689-static struct dmi_system_id video_dmi_table[] __initdata = {
36690+static const struct dmi_system_id video_dmi_table[] __initconst = {
36691 /*
36692 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
36693 */
36694diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36695index 61a9c07..ea98fa1 100644
36696--- a/drivers/ata/libahci.c
36697+++ b/drivers/ata/libahci.c
36698@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36699 }
36700 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36701
36702-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36703+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36704 struct ata_taskfile *tf, int is_cmd, u16 flags,
36705 unsigned long timeout_msec)
36706 {
36707diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36708index 23dac3b..89ada44 100644
36709--- a/drivers/ata/libata-core.c
36710+++ b/drivers/ata/libata-core.c
36711@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36712 static void ata_dev_xfermask(struct ata_device *dev);
36713 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36714
36715-atomic_t ata_print_id = ATOMIC_INIT(0);
36716+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36717
36718 struct ata_force_param {
36719 const char *name;
36720@@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36721 struct ata_port *ap;
36722 unsigned int tag;
36723
36724- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36725+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36726 ap = qc->ap;
36727
36728 qc->flags = 0;
36729@@ -4797,7 +4797,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36730 struct ata_port *ap;
36731 struct ata_link *link;
36732
36733- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36734+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36735 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36736 ap = qc->ap;
36737 link = qc->dev->link;
36738@@ -5901,6 +5901,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36739 return;
36740
36741 spin_lock(&lock);
36742+ pax_open_kernel();
36743
36744 for (cur = ops->inherits; cur; cur = cur->inherits) {
36745 void **inherit = (void **)cur;
36746@@ -5914,8 +5915,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36747 if (IS_ERR(*pp))
36748 *pp = NULL;
36749
36750- ops->inherits = NULL;
36751+ *(struct ata_port_operations **)&ops->inherits = NULL;
36752
36753+ pax_close_kernel();
36754 spin_unlock(&lock);
36755 }
36756
36757@@ -6111,7 +6113,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36758
36759 /* give ports names and add SCSI hosts */
36760 for (i = 0; i < host->n_ports; i++) {
36761- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36762+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36763 host->ports[i]->local_port_no = i + 1;
36764 }
36765
36766diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36767index b061ba2..fdcd85f 100644
36768--- a/drivers/ata/libata-scsi.c
36769+++ b/drivers/ata/libata-scsi.c
36770@@ -4172,7 +4172,7 @@ int ata_sas_port_init(struct ata_port *ap)
36771
36772 if (rc)
36773 return rc;
36774- ap->print_id = atomic_inc_return(&ata_print_id);
36775+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36776 return 0;
36777 }
36778 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36779diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36780index f840ca1..edd6ef3 100644
36781--- a/drivers/ata/libata.h
36782+++ b/drivers/ata/libata.h
36783@@ -53,7 +53,7 @@ enum {
36784 ATA_DNXFER_QUIET = (1 << 31),
36785 };
36786
36787-extern atomic_t ata_print_id;
36788+extern atomic_unchecked_t ata_print_id;
36789 extern int atapi_passthru16;
36790 extern int libata_fua;
36791 extern int libata_noacpi;
36792diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36793index a9b0c82..207d97d 100644
36794--- a/drivers/ata/pata_arasan_cf.c
36795+++ b/drivers/ata/pata_arasan_cf.c
36796@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36797 /* Handle platform specific quirks */
36798 if (quirk) {
36799 if (quirk & CF_BROKEN_PIO) {
36800- ap->ops->set_piomode = NULL;
36801+ pax_open_kernel();
36802+ *(void **)&ap->ops->set_piomode = NULL;
36803+ pax_close_kernel();
36804 ap->pio_mask = 0;
36805 }
36806 if (quirk & CF_BROKEN_MWDMA)
36807diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36808index f9b983a..887b9d8 100644
36809--- a/drivers/atm/adummy.c
36810+++ b/drivers/atm/adummy.c
36811@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36812 vcc->pop(vcc, skb);
36813 else
36814 dev_kfree_skb_any(skb);
36815- atomic_inc(&vcc->stats->tx);
36816+ atomic_inc_unchecked(&vcc->stats->tx);
36817
36818 return 0;
36819 }
36820diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36821index f1a9198..f466a4a 100644
36822--- a/drivers/atm/ambassador.c
36823+++ b/drivers/atm/ambassador.c
36824@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36825 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36826
36827 // VC layer stats
36828- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36829+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36830
36831 // free the descriptor
36832 kfree (tx_descr);
36833@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36834 dump_skb ("<<<", vc, skb);
36835
36836 // VC layer stats
36837- atomic_inc(&atm_vcc->stats->rx);
36838+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36839 __net_timestamp(skb);
36840 // end of our responsibility
36841 atm_vcc->push (atm_vcc, skb);
36842@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36843 } else {
36844 PRINTK (KERN_INFO, "dropped over-size frame");
36845 // should we count this?
36846- atomic_inc(&atm_vcc->stats->rx_drop);
36847+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36848 }
36849
36850 } else {
36851@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36852 }
36853
36854 if (check_area (skb->data, skb->len)) {
36855- atomic_inc(&atm_vcc->stats->tx_err);
36856+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36857 return -ENOMEM; // ?
36858 }
36859
36860diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36861index 480fa6f..947067c 100644
36862--- a/drivers/atm/atmtcp.c
36863+++ b/drivers/atm/atmtcp.c
36864@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36865 if (vcc->pop) vcc->pop(vcc,skb);
36866 else dev_kfree_skb(skb);
36867 if (dev_data) return 0;
36868- atomic_inc(&vcc->stats->tx_err);
36869+ atomic_inc_unchecked(&vcc->stats->tx_err);
36870 return -ENOLINK;
36871 }
36872 size = skb->len+sizeof(struct atmtcp_hdr);
36873@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36874 if (!new_skb) {
36875 if (vcc->pop) vcc->pop(vcc,skb);
36876 else dev_kfree_skb(skb);
36877- atomic_inc(&vcc->stats->tx_err);
36878+ atomic_inc_unchecked(&vcc->stats->tx_err);
36879 return -ENOBUFS;
36880 }
36881 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36882@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36883 if (vcc->pop) vcc->pop(vcc,skb);
36884 else dev_kfree_skb(skb);
36885 out_vcc->push(out_vcc,new_skb);
36886- atomic_inc(&vcc->stats->tx);
36887- atomic_inc(&out_vcc->stats->rx);
36888+ atomic_inc_unchecked(&vcc->stats->tx);
36889+ atomic_inc_unchecked(&out_vcc->stats->rx);
36890 return 0;
36891 }
36892
36893@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36894 read_unlock(&vcc_sklist_lock);
36895 if (!out_vcc) {
36896 result = -EUNATCH;
36897- atomic_inc(&vcc->stats->tx_err);
36898+ atomic_inc_unchecked(&vcc->stats->tx_err);
36899 goto done;
36900 }
36901 skb_pull(skb,sizeof(struct atmtcp_hdr));
36902@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36903 __net_timestamp(new_skb);
36904 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36905 out_vcc->push(out_vcc,new_skb);
36906- atomic_inc(&vcc->stats->tx);
36907- atomic_inc(&out_vcc->stats->rx);
36908+ atomic_inc_unchecked(&vcc->stats->tx);
36909+ atomic_inc_unchecked(&out_vcc->stats->rx);
36910 done:
36911 if (vcc->pop) vcc->pop(vcc,skb);
36912 else dev_kfree_skb(skb);
36913diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36914index 6339efd..2b441d5 100644
36915--- a/drivers/atm/eni.c
36916+++ b/drivers/atm/eni.c
36917@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36918 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36919 vcc->dev->number);
36920 length = 0;
36921- atomic_inc(&vcc->stats->rx_err);
36922+ atomic_inc_unchecked(&vcc->stats->rx_err);
36923 }
36924 else {
36925 length = ATM_CELL_SIZE-1; /* no HEC */
36926@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36927 size);
36928 }
36929 eff = length = 0;
36930- atomic_inc(&vcc->stats->rx_err);
36931+ atomic_inc_unchecked(&vcc->stats->rx_err);
36932 }
36933 else {
36934 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36935@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36936 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36937 vcc->dev->number,vcc->vci,length,size << 2,descr);
36938 length = eff = 0;
36939- atomic_inc(&vcc->stats->rx_err);
36940+ atomic_inc_unchecked(&vcc->stats->rx_err);
36941 }
36942 }
36943 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36944@@ -770,7 +770,7 @@ rx_dequeued++;
36945 vcc->push(vcc,skb);
36946 pushed++;
36947 }
36948- atomic_inc(&vcc->stats->rx);
36949+ atomic_inc_unchecked(&vcc->stats->rx);
36950 }
36951 wake_up(&eni_dev->rx_wait);
36952 }
36953@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
36954 DMA_TO_DEVICE);
36955 if (vcc->pop) vcc->pop(vcc,skb);
36956 else dev_kfree_skb_irq(skb);
36957- atomic_inc(&vcc->stats->tx);
36958+ atomic_inc_unchecked(&vcc->stats->tx);
36959 wake_up(&eni_dev->tx_wait);
36960 dma_complete++;
36961 }
36962diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36963index 82f2ae0..f205c02 100644
36964--- a/drivers/atm/firestream.c
36965+++ b/drivers/atm/firestream.c
36966@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36967 }
36968 }
36969
36970- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36971+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36972
36973 fs_dprintk (FS_DEBUG_TXMEM, "i");
36974 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36975@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36976 #endif
36977 skb_put (skb, qe->p1 & 0xffff);
36978 ATM_SKB(skb)->vcc = atm_vcc;
36979- atomic_inc(&atm_vcc->stats->rx);
36980+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36981 __net_timestamp(skb);
36982 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36983 atm_vcc->push (atm_vcc, skb);
36984@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36985 kfree (pe);
36986 }
36987 if (atm_vcc)
36988- atomic_inc(&atm_vcc->stats->rx_drop);
36989+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36990 break;
36991 case 0x1f: /* Reassembly abort: no buffers. */
36992 /* Silently increment error counter. */
36993 if (atm_vcc)
36994- atomic_inc(&atm_vcc->stats->rx_drop);
36995+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36996 break;
36997 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
36998 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
36999diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
37000index 75dde90..4309ead 100644
37001--- a/drivers/atm/fore200e.c
37002+++ b/drivers/atm/fore200e.c
37003@@ -932,9 +932,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
37004 #endif
37005 /* check error condition */
37006 if (*entry->status & STATUS_ERROR)
37007- atomic_inc(&vcc->stats->tx_err);
37008+ atomic_inc_unchecked(&vcc->stats->tx_err);
37009 else
37010- atomic_inc(&vcc->stats->tx);
37011+ atomic_inc_unchecked(&vcc->stats->tx);
37012 }
37013 }
37014
37015@@ -1083,7 +1083,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37016 if (skb == NULL) {
37017 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
37018
37019- atomic_inc(&vcc->stats->rx_drop);
37020+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37021 return -ENOMEM;
37022 }
37023
37024@@ -1126,14 +1126,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37025
37026 dev_kfree_skb_any(skb);
37027
37028- atomic_inc(&vcc->stats->rx_drop);
37029+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37030 return -ENOMEM;
37031 }
37032
37033 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37034
37035 vcc->push(vcc, skb);
37036- atomic_inc(&vcc->stats->rx);
37037+ atomic_inc_unchecked(&vcc->stats->rx);
37038
37039 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37040
37041@@ -1211,7 +1211,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
37042 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
37043 fore200e->atm_dev->number,
37044 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
37045- atomic_inc(&vcc->stats->rx_err);
37046+ atomic_inc_unchecked(&vcc->stats->rx_err);
37047 }
37048 }
37049
37050@@ -1656,7 +1656,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
37051 goto retry_here;
37052 }
37053
37054- atomic_inc(&vcc->stats->tx_err);
37055+ atomic_inc_unchecked(&vcc->stats->tx_err);
37056
37057 fore200e->tx_sat++;
37058 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
37059diff --git a/drivers/atm/he.c b/drivers/atm/he.c
37060index 93dca2e..c5daa69 100644
37061--- a/drivers/atm/he.c
37062+++ b/drivers/atm/he.c
37063@@ -1692,7 +1692,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37064
37065 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
37066 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
37067- atomic_inc(&vcc->stats->rx_drop);
37068+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37069 goto return_host_buffers;
37070 }
37071
37072@@ -1719,7 +1719,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37073 RBRQ_LEN_ERR(he_dev->rbrq_head)
37074 ? "LEN_ERR" : "",
37075 vcc->vpi, vcc->vci);
37076- atomic_inc(&vcc->stats->rx_err);
37077+ atomic_inc_unchecked(&vcc->stats->rx_err);
37078 goto return_host_buffers;
37079 }
37080
37081@@ -1771,7 +1771,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37082 vcc->push(vcc, skb);
37083 spin_lock(&he_dev->global_lock);
37084
37085- atomic_inc(&vcc->stats->rx);
37086+ atomic_inc_unchecked(&vcc->stats->rx);
37087
37088 return_host_buffers:
37089 ++pdus_assembled;
37090@@ -2097,7 +2097,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
37091 tpd->vcc->pop(tpd->vcc, tpd->skb);
37092 else
37093 dev_kfree_skb_any(tpd->skb);
37094- atomic_inc(&tpd->vcc->stats->tx_err);
37095+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
37096 }
37097 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
37098 return;
37099@@ -2509,7 +2509,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37100 vcc->pop(vcc, skb);
37101 else
37102 dev_kfree_skb_any(skb);
37103- atomic_inc(&vcc->stats->tx_err);
37104+ atomic_inc_unchecked(&vcc->stats->tx_err);
37105 return -EINVAL;
37106 }
37107
37108@@ -2520,7 +2520,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37109 vcc->pop(vcc, skb);
37110 else
37111 dev_kfree_skb_any(skb);
37112- atomic_inc(&vcc->stats->tx_err);
37113+ atomic_inc_unchecked(&vcc->stats->tx_err);
37114 return -EINVAL;
37115 }
37116 #endif
37117@@ -2532,7 +2532,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37118 vcc->pop(vcc, skb);
37119 else
37120 dev_kfree_skb_any(skb);
37121- atomic_inc(&vcc->stats->tx_err);
37122+ atomic_inc_unchecked(&vcc->stats->tx_err);
37123 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37124 return -ENOMEM;
37125 }
37126@@ -2574,7 +2574,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37127 vcc->pop(vcc, skb);
37128 else
37129 dev_kfree_skb_any(skb);
37130- atomic_inc(&vcc->stats->tx_err);
37131+ atomic_inc_unchecked(&vcc->stats->tx_err);
37132 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37133 return -ENOMEM;
37134 }
37135@@ -2605,7 +2605,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37136 __enqueue_tpd(he_dev, tpd, cid);
37137 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37138
37139- atomic_inc(&vcc->stats->tx);
37140+ atomic_inc_unchecked(&vcc->stats->tx);
37141
37142 return 0;
37143 }
37144diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
37145index 527bbd5..96570c8 100644
37146--- a/drivers/atm/horizon.c
37147+++ b/drivers/atm/horizon.c
37148@@ -1018,7 +1018,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
37149 {
37150 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
37151 // VC layer stats
37152- atomic_inc(&vcc->stats->rx);
37153+ atomic_inc_unchecked(&vcc->stats->rx);
37154 __net_timestamp(skb);
37155 // end of our responsibility
37156 vcc->push (vcc, skb);
37157@@ -1170,7 +1170,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
37158 dev->tx_iovec = NULL;
37159
37160 // VC layer stats
37161- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37162+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37163
37164 // free the skb
37165 hrz_kfree_skb (skb);
37166diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
37167index 074616b..d6b3d5f 100644
37168--- a/drivers/atm/idt77252.c
37169+++ b/drivers/atm/idt77252.c
37170@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
37171 else
37172 dev_kfree_skb(skb);
37173
37174- atomic_inc(&vcc->stats->tx);
37175+ atomic_inc_unchecked(&vcc->stats->tx);
37176 }
37177
37178 atomic_dec(&scq->used);
37179@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37180 if ((sb = dev_alloc_skb(64)) == NULL) {
37181 printk("%s: Can't allocate buffers for aal0.\n",
37182 card->name);
37183- atomic_add(i, &vcc->stats->rx_drop);
37184+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37185 break;
37186 }
37187 if (!atm_charge(vcc, sb->truesize)) {
37188 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
37189 card->name);
37190- atomic_add(i - 1, &vcc->stats->rx_drop);
37191+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
37192 dev_kfree_skb(sb);
37193 break;
37194 }
37195@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37196 ATM_SKB(sb)->vcc = vcc;
37197 __net_timestamp(sb);
37198 vcc->push(vcc, sb);
37199- atomic_inc(&vcc->stats->rx);
37200+ atomic_inc_unchecked(&vcc->stats->rx);
37201
37202 cell += ATM_CELL_PAYLOAD;
37203 }
37204@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37205 "(CDC: %08x)\n",
37206 card->name, len, rpp->len, readl(SAR_REG_CDC));
37207 recycle_rx_pool_skb(card, rpp);
37208- atomic_inc(&vcc->stats->rx_err);
37209+ atomic_inc_unchecked(&vcc->stats->rx_err);
37210 return;
37211 }
37212 if (stat & SAR_RSQE_CRC) {
37213 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
37214 recycle_rx_pool_skb(card, rpp);
37215- atomic_inc(&vcc->stats->rx_err);
37216+ atomic_inc_unchecked(&vcc->stats->rx_err);
37217 return;
37218 }
37219 if (skb_queue_len(&rpp->queue) > 1) {
37220@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37221 RXPRINTK("%s: Can't alloc RX skb.\n",
37222 card->name);
37223 recycle_rx_pool_skb(card, rpp);
37224- atomic_inc(&vcc->stats->rx_err);
37225+ atomic_inc_unchecked(&vcc->stats->rx_err);
37226 return;
37227 }
37228 if (!atm_charge(vcc, skb->truesize)) {
37229@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37230 __net_timestamp(skb);
37231
37232 vcc->push(vcc, skb);
37233- atomic_inc(&vcc->stats->rx);
37234+ atomic_inc_unchecked(&vcc->stats->rx);
37235
37236 return;
37237 }
37238@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37239 __net_timestamp(skb);
37240
37241 vcc->push(vcc, skb);
37242- atomic_inc(&vcc->stats->rx);
37243+ atomic_inc_unchecked(&vcc->stats->rx);
37244
37245 if (skb->truesize > SAR_FB_SIZE_3)
37246 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37247@@ -1302,14 +1302,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37248 if (vcc->qos.aal != ATM_AAL0) {
37249 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37250 card->name, vpi, vci);
37251- atomic_inc(&vcc->stats->rx_drop);
37252+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37253 goto drop;
37254 }
37255
37256 if ((sb = dev_alloc_skb(64)) == NULL) {
37257 printk("%s: Can't allocate buffers for AAL0.\n",
37258 card->name);
37259- atomic_inc(&vcc->stats->rx_err);
37260+ atomic_inc_unchecked(&vcc->stats->rx_err);
37261 goto drop;
37262 }
37263
37264@@ -1328,7 +1328,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37265 ATM_SKB(sb)->vcc = vcc;
37266 __net_timestamp(sb);
37267 vcc->push(vcc, sb);
37268- atomic_inc(&vcc->stats->rx);
37269+ atomic_inc_unchecked(&vcc->stats->rx);
37270
37271 drop:
37272 skb_pull(queue, 64);
37273@@ -1953,13 +1953,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37274
37275 if (vc == NULL) {
37276 printk("%s: NULL connection in send().\n", card->name);
37277- atomic_inc(&vcc->stats->tx_err);
37278+ atomic_inc_unchecked(&vcc->stats->tx_err);
37279 dev_kfree_skb(skb);
37280 return -EINVAL;
37281 }
37282 if (!test_bit(VCF_TX, &vc->flags)) {
37283 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37284- atomic_inc(&vcc->stats->tx_err);
37285+ atomic_inc_unchecked(&vcc->stats->tx_err);
37286 dev_kfree_skb(skb);
37287 return -EINVAL;
37288 }
37289@@ -1971,14 +1971,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37290 break;
37291 default:
37292 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37293- atomic_inc(&vcc->stats->tx_err);
37294+ atomic_inc_unchecked(&vcc->stats->tx_err);
37295 dev_kfree_skb(skb);
37296 return -EINVAL;
37297 }
37298
37299 if (skb_shinfo(skb)->nr_frags != 0) {
37300 printk("%s: No scatter-gather yet.\n", card->name);
37301- atomic_inc(&vcc->stats->tx_err);
37302+ atomic_inc_unchecked(&vcc->stats->tx_err);
37303 dev_kfree_skb(skb);
37304 return -EINVAL;
37305 }
37306@@ -1986,7 +1986,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37307
37308 err = queue_skb(card, vc, skb, oam);
37309 if (err) {
37310- atomic_inc(&vcc->stats->tx_err);
37311+ atomic_inc_unchecked(&vcc->stats->tx_err);
37312 dev_kfree_skb(skb);
37313 return err;
37314 }
37315@@ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37316 skb = dev_alloc_skb(64);
37317 if (!skb) {
37318 printk("%s: Out of memory in send_oam().\n", card->name);
37319- atomic_inc(&vcc->stats->tx_err);
37320+ atomic_inc_unchecked(&vcc->stats->tx_err);
37321 return -ENOMEM;
37322 }
37323 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37324diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37325index 924f8e2..3375a3e 100644
37326--- a/drivers/atm/iphase.c
37327+++ b/drivers/atm/iphase.c
37328@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37329 status = (u_short) (buf_desc_ptr->desc_mode);
37330 if (status & (RX_CER | RX_PTE | RX_OFL))
37331 {
37332- atomic_inc(&vcc->stats->rx_err);
37333+ atomic_inc_unchecked(&vcc->stats->rx_err);
37334 IF_ERR(printk("IA: bad packet, dropping it");)
37335 if (status & RX_CER) {
37336 IF_ERR(printk(" cause: packet CRC error\n");)
37337@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37338 len = dma_addr - buf_addr;
37339 if (len > iadev->rx_buf_sz) {
37340 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37341- atomic_inc(&vcc->stats->rx_err);
37342+ atomic_inc_unchecked(&vcc->stats->rx_err);
37343 goto out_free_desc;
37344 }
37345
37346@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37347 ia_vcc = INPH_IA_VCC(vcc);
37348 if (ia_vcc == NULL)
37349 {
37350- atomic_inc(&vcc->stats->rx_err);
37351+ atomic_inc_unchecked(&vcc->stats->rx_err);
37352 atm_return(vcc, skb->truesize);
37353 dev_kfree_skb_any(skb);
37354 goto INCR_DLE;
37355@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37356 if ((length > iadev->rx_buf_sz) || (length >
37357 (skb->len - sizeof(struct cpcs_trailer))))
37358 {
37359- atomic_inc(&vcc->stats->rx_err);
37360+ atomic_inc_unchecked(&vcc->stats->rx_err);
37361 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37362 length, skb->len);)
37363 atm_return(vcc, skb->truesize);
37364@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37365
37366 IF_RX(printk("rx_dle_intr: skb push");)
37367 vcc->push(vcc,skb);
37368- atomic_inc(&vcc->stats->rx);
37369+ atomic_inc_unchecked(&vcc->stats->rx);
37370 iadev->rx_pkt_cnt++;
37371 }
37372 INCR_DLE:
37373@@ -2828,15 +2828,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37374 {
37375 struct k_sonet_stats *stats;
37376 stats = &PRIV(_ia_dev[board])->sonet_stats;
37377- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37378- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37379- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37380- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37381- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37382- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37383- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37384- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37385- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37386+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37387+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37388+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37389+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37390+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37391+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37392+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37393+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37394+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37395 }
37396 ia_cmds.status = 0;
37397 break;
37398@@ -2941,7 +2941,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37399 if ((desc == 0) || (desc > iadev->num_tx_desc))
37400 {
37401 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37402- atomic_inc(&vcc->stats->tx);
37403+ atomic_inc_unchecked(&vcc->stats->tx);
37404 if (vcc->pop)
37405 vcc->pop(vcc, skb);
37406 else
37407@@ -3046,14 +3046,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37408 ATM_DESC(skb) = vcc->vci;
37409 skb_queue_tail(&iadev->tx_dma_q, skb);
37410
37411- atomic_inc(&vcc->stats->tx);
37412+ atomic_inc_unchecked(&vcc->stats->tx);
37413 iadev->tx_pkt_cnt++;
37414 /* Increment transaction counter */
37415 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37416
37417 #if 0
37418 /* add flow control logic */
37419- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37420+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37421 if (iavcc->vc_desc_cnt > 10) {
37422 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37423 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37424diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37425index ce43ae3..969de38 100644
37426--- a/drivers/atm/lanai.c
37427+++ b/drivers/atm/lanai.c
37428@@ -1295,7 +1295,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37429 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37430 lanai_endtx(lanai, lvcc);
37431 lanai_free_skb(lvcc->tx.atmvcc, skb);
37432- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37433+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37434 }
37435
37436 /* Try to fill the buffer - don't call unless there is backlog */
37437@@ -1418,7 +1418,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37438 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37439 __net_timestamp(skb);
37440 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37441- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37442+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37443 out:
37444 lvcc->rx.buf.ptr = end;
37445 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37446@@ -1659,7 +1659,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37447 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37448 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37449 lanai->stats.service_rxnotaal5++;
37450- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37451+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37452 return 0;
37453 }
37454 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37455@@ -1671,7 +1671,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37456 int bytes;
37457 read_unlock(&vcc_sklist_lock);
37458 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37459- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37460+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37461 lvcc->stats.x.aal5.service_trash++;
37462 bytes = (SERVICE_GET_END(s) * 16) -
37463 (((unsigned long) lvcc->rx.buf.ptr) -
37464@@ -1683,7 +1683,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37465 }
37466 if (s & SERVICE_STREAM) {
37467 read_unlock(&vcc_sklist_lock);
37468- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37469+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37470 lvcc->stats.x.aal5.service_stream++;
37471 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37472 "PDU on VCI %d!\n", lanai->number, vci);
37473@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37474 return 0;
37475 }
37476 DPRINTK("got rx crc error on vci %d\n", vci);
37477- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37478+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37479 lvcc->stats.x.aal5.service_rxcrc++;
37480 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37481 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37482diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37483index b7e1cc0..eb336bfe 100644
37484--- a/drivers/atm/nicstar.c
37485+++ b/drivers/atm/nicstar.c
37486@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37487 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37488 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37489 card->index);
37490- atomic_inc(&vcc->stats->tx_err);
37491+ atomic_inc_unchecked(&vcc->stats->tx_err);
37492 dev_kfree_skb_any(skb);
37493 return -EINVAL;
37494 }
37495@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37496 if (!vc->tx) {
37497 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37498 card->index);
37499- atomic_inc(&vcc->stats->tx_err);
37500+ atomic_inc_unchecked(&vcc->stats->tx_err);
37501 dev_kfree_skb_any(skb);
37502 return -EINVAL;
37503 }
37504@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37505 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37506 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37507 card->index);
37508- atomic_inc(&vcc->stats->tx_err);
37509+ atomic_inc_unchecked(&vcc->stats->tx_err);
37510 dev_kfree_skb_any(skb);
37511 return -EINVAL;
37512 }
37513
37514 if (skb_shinfo(skb)->nr_frags != 0) {
37515 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37516- atomic_inc(&vcc->stats->tx_err);
37517+ atomic_inc_unchecked(&vcc->stats->tx_err);
37518 dev_kfree_skb_any(skb);
37519 return -EINVAL;
37520 }
37521@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37522 }
37523
37524 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37525- atomic_inc(&vcc->stats->tx_err);
37526+ atomic_inc_unchecked(&vcc->stats->tx_err);
37527 dev_kfree_skb_any(skb);
37528 return -EIO;
37529 }
37530- atomic_inc(&vcc->stats->tx);
37531+ atomic_inc_unchecked(&vcc->stats->tx);
37532
37533 return 0;
37534 }
37535@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37536 printk
37537 ("nicstar%d: Can't allocate buffers for aal0.\n",
37538 card->index);
37539- atomic_add(i, &vcc->stats->rx_drop);
37540+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37541 break;
37542 }
37543 if (!atm_charge(vcc, sb->truesize)) {
37544 RXPRINTK
37545 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37546 card->index);
37547- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37548+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37549 dev_kfree_skb_any(sb);
37550 break;
37551 }
37552@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37553 ATM_SKB(sb)->vcc = vcc;
37554 __net_timestamp(sb);
37555 vcc->push(vcc, sb);
37556- atomic_inc(&vcc->stats->rx);
37557+ atomic_inc_unchecked(&vcc->stats->rx);
37558 cell += ATM_CELL_PAYLOAD;
37559 }
37560
37561@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37562 if (iovb == NULL) {
37563 printk("nicstar%d: Out of iovec buffers.\n",
37564 card->index);
37565- atomic_inc(&vcc->stats->rx_drop);
37566+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37567 recycle_rx_buf(card, skb);
37568 return;
37569 }
37570@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37571 small or large buffer itself. */
37572 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37573 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37574- atomic_inc(&vcc->stats->rx_err);
37575+ atomic_inc_unchecked(&vcc->stats->rx_err);
37576 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37577 NS_MAX_IOVECS);
37578 NS_PRV_IOVCNT(iovb) = 0;
37579@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37580 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37581 card->index);
37582 which_list(card, skb);
37583- atomic_inc(&vcc->stats->rx_err);
37584+ atomic_inc_unchecked(&vcc->stats->rx_err);
37585 recycle_rx_buf(card, skb);
37586 vc->rx_iov = NULL;
37587 recycle_iov_buf(card, iovb);
37588@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37589 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37590 card->index);
37591 which_list(card, skb);
37592- atomic_inc(&vcc->stats->rx_err);
37593+ atomic_inc_unchecked(&vcc->stats->rx_err);
37594 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37595 NS_PRV_IOVCNT(iovb));
37596 vc->rx_iov = NULL;
37597@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37598 printk(" - PDU size mismatch.\n");
37599 else
37600 printk(".\n");
37601- atomic_inc(&vcc->stats->rx_err);
37602+ atomic_inc_unchecked(&vcc->stats->rx_err);
37603 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37604 NS_PRV_IOVCNT(iovb));
37605 vc->rx_iov = NULL;
37606@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37607 /* skb points to a small buffer */
37608 if (!atm_charge(vcc, skb->truesize)) {
37609 push_rxbufs(card, skb);
37610- atomic_inc(&vcc->stats->rx_drop);
37611+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37612 } else {
37613 skb_put(skb, len);
37614 dequeue_sm_buf(card, skb);
37615@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37616 ATM_SKB(skb)->vcc = vcc;
37617 __net_timestamp(skb);
37618 vcc->push(vcc, skb);
37619- atomic_inc(&vcc->stats->rx);
37620+ atomic_inc_unchecked(&vcc->stats->rx);
37621 }
37622 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37623 struct sk_buff *sb;
37624@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37625 if (len <= NS_SMBUFSIZE) {
37626 if (!atm_charge(vcc, sb->truesize)) {
37627 push_rxbufs(card, sb);
37628- atomic_inc(&vcc->stats->rx_drop);
37629+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37630 } else {
37631 skb_put(sb, len);
37632 dequeue_sm_buf(card, sb);
37633@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37634 ATM_SKB(sb)->vcc = vcc;
37635 __net_timestamp(sb);
37636 vcc->push(vcc, sb);
37637- atomic_inc(&vcc->stats->rx);
37638+ atomic_inc_unchecked(&vcc->stats->rx);
37639 }
37640
37641 push_rxbufs(card, skb);
37642@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37643
37644 if (!atm_charge(vcc, skb->truesize)) {
37645 push_rxbufs(card, skb);
37646- atomic_inc(&vcc->stats->rx_drop);
37647+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37648 } else {
37649 dequeue_lg_buf(card, skb);
37650 #ifdef NS_USE_DESTRUCTORS
37651@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37652 ATM_SKB(skb)->vcc = vcc;
37653 __net_timestamp(skb);
37654 vcc->push(vcc, skb);
37655- atomic_inc(&vcc->stats->rx);
37656+ atomic_inc_unchecked(&vcc->stats->rx);
37657 }
37658
37659 push_rxbufs(card, sb);
37660@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37661 printk
37662 ("nicstar%d: Out of huge buffers.\n",
37663 card->index);
37664- atomic_inc(&vcc->stats->rx_drop);
37665+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37666 recycle_iovec_rx_bufs(card,
37667 (struct iovec *)
37668 iovb->data,
37669@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37670 card->hbpool.count++;
37671 } else
37672 dev_kfree_skb_any(hb);
37673- atomic_inc(&vcc->stats->rx_drop);
37674+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37675 } else {
37676 /* Copy the small buffer to the huge buffer */
37677 sb = (struct sk_buff *)iov->iov_base;
37678@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37679 #endif /* NS_USE_DESTRUCTORS */
37680 __net_timestamp(hb);
37681 vcc->push(vcc, hb);
37682- atomic_inc(&vcc->stats->rx);
37683+ atomic_inc_unchecked(&vcc->stats->rx);
37684 }
37685 }
37686
37687diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37688index 74e18b0..f16afa0 100644
37689--- a/drivers/atm/solos-pci.c
37690+++ b/drivers/atm/solos-pci.c
37691@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37692 }
37693 atm_charge(vcc, skb->truesize);
37694 vcc->push(vcc, skb);
37695- atomic_inc(&vcc->stats->rx);
37696+ atomic_inc_unchecked(&vcc->stats->rx);
37697 break;
37698
37699 case PKT_STATUS:
37700@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37701 vcc = SKB_CB(oldskb)->vcc;
37702
37703 if (vcc) {
37704- atomic_inc(&vcc->stats->tx);
37705+ atomic_inc_unchecked(&vcc->stats->tx);
37706 solos_pop(vcc, oldskb);
37707 } else {
37708 dev_kfree_skb_irq(oldskb);
37709diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37710index 0215934..ce9f5b1 100644
37711--- a/drivers/atm/suni.c
37712+++ b/drivers/atm/suni.c
37713@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37714
37715
37716 #define ADD_LIMITED(s,v) \
37717- atomic_add((v),&stats->s); \
37718- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37719+ atomic_add_unchecked((v),&stats->s); \
37720+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37721
37722
37723 static void suni_hz(unsigned long from_timer)
37724diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37725index 5120a96..e2572bd 100644
37726--- a/drivers/atm/uPD98402.c
37727+++ b/drivers/atm/uPD98402.c
37728@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37729 struct sonet_stats tmp;
37730 int error = 0;
37731
37732- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37733+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37734 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37735 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37736 if (zero && !error) {
37737@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37738
37739
37740 #define ADD_LIMITED(s,v) \
37741- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37742- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37743- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37744+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37745+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37746+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37747
37748
37749 static void stat_event(struct atm_dev *dev)
37750@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37751 if (reason & uPD98402_INT_PFM) stat_event(dev);
37752 if (reason & uPD98402_INT_PCO) {
37753 (void) GET(PCOCR); /* clear interrupt cause */
37754- atomic_add(GET(HECCT),
37755+ atomic_add_unchecked(GET(HECCT),
37756 &PRIV(dev)->sonet_stats.uncorr_hcs);
37757 }
37758 if ((reason & uPD98402_INT_RFO) &&
37759@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37760 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37761 uPD98402_INT_LOS),PIMR); /* enable them */
37762 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37763- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37764- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37765- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37766+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37767+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37768+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37769 return 0;
37770 }
37771
37772diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37773index cecfb94..87009ec 100644
37774--- a/drivers/atm/zatm.c
37775+++ b/drivers/atm/zatm.c
37776@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37777 }
37778 if (!size) {
37779 dev_kfree_skb_irq(skb);
37780- if (vcc) atomic_inc(&vcc->stats->rx_err);
37781+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37782 continue;
37783 }
37784 if (!atm_charge(vcc,skb->truesize)) {
37785@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37786 skb->len = size;
37787 ATM_SKB(skb)->vcc = vcc;
37788 vcc->push(vcc,skb);
37789- atomic_inc(&vcc->stats->rx);
37790+ atomic_inc_unchecked(&vcc->stats->rx);
37791 }
37792 zout(pos & 0xffff,MTA(mbx));
37793 #if 0 /* probably a stupid idea */
37794@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37795 skb_queue_head(&zatm_vcc->backlog,skb);
37796 break;
37797 }
37798- atomic_inc(&vcc->stats->tx);
37799+ atomic_inc_unchecked(&vcc->stats->tx);
37800 wake_up(&zatm_vcc->tx_wait);
37801 }
37802
37803diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37804index 79bc203..fa3945b 100644
37805--- a/drivers/base/bus.c
37806+++ b/drivers/base/bus.c
37807@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37808 return -EINVAL;
37809
37810 mutex_lock(&subsys->p->mutex);
37811- list_add_tail(&sif->node, &subsys->p->interfaces);
37812+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37813 if (sif->add_dev) {
37814 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37815 while ((dev = subsys_dev_iter_next(&iter)))
37816@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37817 subsys = sif->subsys;
37818
37819 mutex_lock(&subsys->p->mutex);
37820- list_del_init(&sif->node);
37821+ pax_list_del_init((struct list_head *)&sif->node);
37822 if (sif->remove_dev) {
37823 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37824 while ((dev = subsys_dev_iter_next(&iter)))
37825diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37826index 25798db..15f130e 100644
37827--- a/drivers/base/devtmpfs.c
37828+++ b/drivers/base/devtmpfs.c
37829@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37830 if (!thread)
37831 return 0;
37832
37833- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37834+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37835 if (err)
37836 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37837 else
37838@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37839 *err = sys_unshare(CLONE_NEWNS);
37840 if (*err)
37841 goto out;
37842- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37843+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37844 if (*err)
37845 goto out;
37846- sys_chdir("/.."); /* will traverse into overmounted root */
37847- sys_chroot(".");
37848+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37849+ sys_chroot((char __force_user *)".");
37850 complete(&setup_done);
37851 while (1) {
37852 spin_lock(&req_lock);
37853diff --git a/drivers/base/node.c b/drivers/base/node.c
37854index 36fabe43..8cfc112 100644
37855--- a/drivers/base/node.c
37856+++ b/drivers/base/node.c
37857@@ -615,7 +615,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37858 struct node_attr {
37859 struct device_attribute attr;
37860 enum node_states state;
37861-};
37862+} __do_const;
37863
37864 static ssize_t show_node_state(struct device *dev,
37865 struct device_attribute *attr, char *buf)
37866diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37867index 45937f8..b9a342e 100644
37868--- a/drivers/base/power/domain.c
37869+++ b/drivers/base/power/domain.c
37870@@ -1698,7 +1698,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37871 {
37872 struct cpuidle_driver *cpuidle_drv;
37873 struct gpd_cpuidle_data *cpuidle_data;
37874- struct cpuidle_state *idle_state;
37875+ cpuidle_state_no_const *idle_state;
37876 int ret = 0;
37877
37878 if (IS_ERR_OR_NULL(genpd) || state < 0)
37879@@ -1766,7 +1766,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37880 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37881 {
37882 struct gpd_cpuidle_data *cpuidle_data;
37883- struct cpuidle_state *idle_state;
37884+ cpuidle_state_no_const *idle_state;
37885 int ret = 0;
37886
37887 if (IS_ERR_OR_NULL(genpd))
37888@@ -2195,7 +2195,10 @@ int genpd_dev_pm_attach(struct device *dev)
37889 return ret;
37890 }
37891
37892- dev->pm_domain->detach = genpd_dev_pm_detach;
37893+ pax_open_kernel();
37894+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37895+ pax_close_kernel();
37896+
37897 pm_genpd_poweron(pd);
37898
37899 return 0;
37900diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37901index d2be3f9..0a3167a 100644
37902--- a/drivers/base/power/sysfs.c
37903+++ b/drivers/base/power/sysfs.c
37904@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37905 return -EIO;
37906 }
37907 }
37908- return sprintf(buf, p);
37909+ return sprintf(buf, "%s", p);
37910 }
37911
37912 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37913diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37914index aab7158..b172db2 100644
37915--- a/drivers/base/power/wakeup.c
37916+++ b/drivers/base/power/wakeup.c
37917@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37918 * They need to be modified together atomically, so it's better to use one
37919 * atomic variable to hold them both.
37920 */
37921-static atomic_t combined_event_count = ATOMIC_INIT(0);
37922+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37923
37924 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37925 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37926
37927 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37928 {
37929- unsigned int comb = atomic_read(&combined_event_count);
37930+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37931
37932 *cnt = (comb >> IN_PROGRESS_BITS);
37933 *inpr = comb & MAX_IN_PROGRESS;
37934@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37935 ws->start_prevent_time = ws->last_time;
37936
37937 /* Increment the counter of events in progress. */
37938- cec = atomic_inc_return(&combined_event_count);
37939+ cec = atomic_inc_return_unchecked(&combined_event_count);
37940
37941 trace_wakeup_source_activate(ws->name, cec);
37942 }
37943@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37944 * Increment the counter of registered wakeup events and decrement the
37945 * couter of wakeup events in progress simultaneously.
37946 */
37947- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37948+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37949 trace_wakeup_source_deactivate(ws->name, cec);
37950
37951 split_counters(&cnt, &inpr);
37952diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37953index 8d98a32..61d3165 100644
37954--- a/drivers/base/syscore.c
37955+++ b/drivers/base/syscore.c
37956@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37957 void register_syscore_ops(struct syscore_ops *ops)
37958 {
37959 mutex_lock(&syscore_ops_lock);
37960- list_add_tail(&ops->node, &syscore_ops_list);
37961+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37962 mutex_unlock(&syscore_ops_lock);
37963 }
37964 EXPORT_SYMBOL_GPL(register_syscore_ops);
37965@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37966 void unregister_syscore_ops(struct syscore_ops *ops)
37967 {
37968 mutex_lock(&syscore_ops_lock);
37969- list_del(&ops->node);
37970+ pax_list_del((struct list_head *)&ops->node);
37971 mutex_unlock(&syscore_ops_lock);
37972 }
37973 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37974diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37975index ff20f19..018f1da 100644
37976--- a/drivers/block/cciss.c
37977+++ b/drivers/block/cciss.c
37978@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
37979 while (!list_empty(&h->reqQ)) {
37980 c = list_entry(h->reqQ.next, CommandList_struct, list);
37981 /* can't do anything if fifo is full */
37982- if ((h->access.fifo_full(h))) {
37983+ if ((h->access->fifo_full(h))) {
37984 dev_warn(&h->pdev->dev, "fifo full\n");
37985 break;
37986 }
37987@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
37988 h->Qdepth--;
37989
37990 /* Tell the controller execute command */
37991- h->access.submit_command(h, c);
37992+ h->access->submit_command(h, c);
37993
37994 /* Put job onto the completed Q */
37995 addQ(&h->cmpQ, c);
37996@@ -3444,17 +3444,17 @@ startio:
37997
37998 static inline unsigned long get_next_completion(ctlr_info_t *h)
37999 {
38000- return h->access.command_completed(h);
38001+ return h->access->command_completed(h);
38002 }
38003
38004 static inline int interrupt_pending(ctlr_info_t *h)
38005 {
38006- return h->access.intr_pending(h);
38007+ return h->access->intr_pending(h);
38008 }
38009
38010 static inline long interrupt_not_for_us(ctlr_info_t *h)
38011 {
38012- return ((h->access.intr_pending(h) == 0) ||
38013+ return ((h->access->intr_pending(h) == 0) ||
38014 (h->interrupts_enabled == 0));
38015 }
38016
38017@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
38018 u32 a;
38019
38020 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
38021- return h->access.command_completed(h);
38022+ return h->access->command_completed(h);
38023
38024 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
38025 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
38026@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
38027 trans_support & CFGTBL_Trans_use_short_tags);
38028
38029 /* Change the access methods to the performant access methods */
38030- h->access = SA5_performant_access;
38031+ h->access = &SA5_performant_access;
38032 h->transMethod = CFGTBL_Trans_Performant;
38033
38034 return;
38035@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
38036 if (prod_index < 0)
38037 return -ENODEV;
38038 h->product_name = products[prod_index].product_name;
38039- h->access = *(products[prod_index].access);
38040+ h->access = products[prod_index].access;
38041
38042 if (cciss_board_disabled(h)) {
38043 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
38044@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
38045 }
38046
38047 /* make sure the board interrupts are off */
38048- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38049+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38050 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
38051 if (rc)
38052 goto clean2;
38053@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
38054 * fake ones to scoop up any residual completions.
38055 */
38056 spin_lock_irqsave(&h->lock, flags);
38057- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38058+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38059 spin_unlock_irqrestore(&h->lock, flags);
38060 free_irq(h->intr[h->intr_mode], h);
38061 rc = cciss_request_irq(h, cciss_msix_discard_completions,
38062@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
38063 dev_info(&h->pdev->dev, "Board READY.\n");
38064 dev_info(&h->pdev->dev,
38065 "Waiting for stale completions to drain.\n");
38066- h->access.set_intr_mask(h, CCISS_INTR_ON);
38067+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38068 msleep(10000);
38069- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38070+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38071
38072 rc = controller_reset_failed(h->cfgtable);
38073 if (rc)
38074@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
38075 cciss_scsi_setup(h);
38076
38077 /* Turn the interrupts on so we can service requests */
38078- h->access.set_intr_mask(h, CCISS_INTR_ON);
38079+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38080
38081 /* Get the firmware version */
38082 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
38083@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
38084 kfree(flush_buf);
38085 if (return_code != IO_OK)
38086 dev_warn(&h->pdev->dev, "Error flushing cache\n");
38087- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38088+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38089 free_irq(h->intr[h->intr_mode], h);
38090 }
38091
38092diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
38093index 7fda30e..2f27946 100644
38094--- a/drivers/block/cciss.h
38095+++ b/drivers/block/cciss.h
38096@@ -101,7 +101,7 @@ struct ctlr_info
38097 /* information about each logical volume */
38098 drive_info_struct *drv[CISS_MAX_LUN];
38099
38100- struct access_method access;
38101+ struct access_method *access;
38102
38103 /* queue and queue Info */
38104 struct list_head reqQ;
38105@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
38106 }
38107
38108 static struct access_method SA5_access = {
38109- SA5_submit_command,
38110- SA5_intr_mask,
38111- SA5_fifo_full,
38112- SA5_intr_pending,
38113- SA5_completed,
38114+ .submit_command = SA5_submit_command,
38115+ .set_intr_mask = SA5_intr_mask,
38116+ .fifo_full = SA5_fifo_full,
38117+ .intr_pending = SA5_intr_pending,
38118+ .command_completed = SA5_completed,
38119 };
38120
38121 static struct access_method SA5B_access = {
38122- SA5_submit_command,
38123- SA5B_intr_mask,
38124- SA5_fifo_full,
38125- SA5B_intr_pending,
38126- SA5_completed,
38127+ .submit_command = SA5_submit_command,
38128+ .set_intr_mask = SA5B_intr_mask,
38129+ .fifo_full = SA5_fifo_full,
38130+ .intr_pending = SA5B_intr_pending,
38131+ .command_completed = SA5_completed,
38132 };
38133
38134 static struct access_method SA5_performant_access = {
38135- SA5_submit_command,
38136- SA5_performant_intr_mask,
38137- SA5_fifo_full,
38138- SA5_performant_intr_pending,
38139- SA5_performant_completed,
38140+ .submit_command = SA5_submit_command,
38141+ .set_intr_mask = SA5_performant_intr_mask,
38142+ .fifo_full = SA5_fifo_full,
38143+ .intr_pending = SA5_performant_intr_pending,
38144+ .command_completed = SA5_performant_completed,
38145 };
38146
38147 struct board_type {
38148diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
38149index 2b94403..fd6ad1f 100644
38150--- a/drivers/block/cpqarray.c
38151+++ b/drivers/block/cpqarray.c
38152@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38153 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
38154 goto Enomem4;
38155 }
38156- hba[i]->access.set_intr_mask(hba[i], 0);
38157+ hba[i]->access->set_intr_mask(hba[i], 0);
38158 if (request_irq(hba[i]->intr, do_ida_intr,
38159 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
38160 {
38161@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38162 add_timer(&hba[i]->timer);
38163
38164 /* Enable IRQ now that spinlock and rate limit timer are set up */
38165- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38166+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38167
38168 for(j=0; j<NWD; j++) {
38169 struct gendisk *disk = ida_gendisk[i][j];
38170@@ -694,7 +694,7 @@ DBGINFO(
38171 for(i=0; i<NR_PRODUCTS; i++) {
38172 if (board_id == products[i].board_id) {
38173 c->product_name = products[i].product_name;
38174- c->access = *(products[i].access);
38175+ c->access = products[i].access;
38176 break;
38177 }
38178 }
38179@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
38180 hba[ctlr]->intr = intr;
38181 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
38182 hba[ctlr]->product_name = products[j].product_name;
38183- hba[ctlr]->access = *(products[j].access);
38184+ hba[ctlr]->access = products[j].access;
38185 hba[ctlr]->ctlr = ctlr;
38186 hba[ctlr]->board_id = board_id;
38187 hba[ctlr]->pci_dev = NULL; /* not PCI */
38188@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
38189
38190 while((c = h->reqQ) != NULL) {
38191 /* Can't do anything if we're busy */
38192- if (h->access.fifo_full(h) == 0)
38193+ if (h->access->fifo_full(h) == 0)
38194 return;
38195
38196 /* Get the first entry from the request Q */
38197@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
38198 h->Qdepth--;
38199
38200 /* Tell the controller to do our bidding */
38201- h->access.submit_command(h, c);
38202+ h->access->submit_command(h, c);
38203
38204 /* Get onto the completion Q */
38205 addQ(&h->cmpQ, c);
38206@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38207 unsigned long flags;
38208 __u32 a,a1;
38209
38210- istat = h->access.intr_pending(h);
38211+ istat = h->access->intr_pending(h);
38212 /* Is this interrupt for us? */
38213 if (istat == 0)
38214 return IRQ_NONE;
38215@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38216 */
38217 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
38218 if (istat & FIFO_NOT_EMPTY) {
38219- while((a = h->access.command_completed(h))) {
38220+ while((a = h->access->command_completed(h))) {
38221 a1 = a; a &= ~3;
38222 if ((c = h->cmpQ) == NULL)
38223 {
38224@@ -1448,11 +1448,11 @@ static int sendcmd(
38225 /*
38226 * Disable interrupt
38227 */
38228- info_p->access.set_intr_mask(info_p, 0);
38229+ info_p->access->set_intr_mask(info_p, 0);
38230 /* Make sure there is room in the command FIFO */
38231 /* Actually it should be completely empty at this time. */
38232 for (i = 200000; i > 0; i--) {
38233- temp = info_p->access.fifo_full(info_p);
38234+ temp = info_p->access->fifo_full(info_p);
38235 if (temp != 0) {
38236 break;
38237 }
38238@@ -1465,7 +1465,7 @@ DBG(
38239 /*
38240 * Send the cmd
38241 */
38242- info_p->access.submit_command(info_p, c);
38243+ info_p->access->submit_command(info_p, c);
38244 complete = pollcomplete(ctlr);
38245
38246 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38247@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38248 * we check the new geometry. Then turn interrupts back on when
38249 * we're done.
38250 */
38251- host->access.set_intr_mask(host, 0);
38252+ host->access->set_intr_mask(host, 0);
38253 getgeometry(ctlr);
38254- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38255+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38256
38257 for(i=0; i<NWD; i++) {
38258 struct gendisk *disk = ida_gendisk[ctlr][i];
38259@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38260 /* Wait (up to 2 seconds) for a command to complete */
38261
38262 for (i = 200000; i > 0; i--) {
38263- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38264+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38265 if (done == 0) {
38266 udelay(10); /* a short fixed delay */
38267 } else
38268diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38269index be73e9d..7fbf140 100644
38270--- a/drivers/block/cpqarray.h
38271+++ b/drivers/block/cpqarray.h
38272@@ -99,7 +99,7 @@ struct ctlr_info {
38273 drv_info_t drv[NWD];
38274 struct proc_dir_entry *proc;
38275
38276- struct access_method access;
38277+ struct access_method *access;
38278
38279 cmdlist_t *reqQ;
38280 cmdlist_t *cmpQ;
38281diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38282index 434c77d..6d3219a 100644
38283--- a/drivers/block/drbd/drbd_bitmap.c
38284+++ b/drivers/block/drbd/drbd_bitmap.c
38285@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38286 submit_bio(rw, bio);
38287 /* this should not count as user activity and cause the
38288 * resync to throttle -- see drbd_rs_should_slow_down(). */
38289- atomic_add(len >> 9, &device->rs_sect_ev);
38290+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38291 }
38292 }
38293
38294diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38295index b905e98..0812ed8 100644
38296--- a/drivers/block/drbd/drbd_int.h
38297+++ b/drivers/block/drbd/drbd_int.h
38298@@ -385,7 +385,7 @@ struct drbd_epoch {
38299 struct drbd_connection *connection;
38300 struct list_head list;
38301 unsigned int barrier_nr;
38302- atomic_t epoch_size; /* increased on every request added. */
38303+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38304 atomic_t active; /* increased on every req. added, and dec on every finished. */
38305 unsigned long flags;
38306 };
38307@@ -946,7 +946,7 @@ struct drbd_device {
38308 unsigned int al_tr_number;
38309 int al_tr_cycle;
38310 wait_queue_head_t seq_wait;
38311- atomic_t packet_seq;
38312+ atomic_unchecked_t packet_seq;
38313 unsigned int peer_seq;
38314 spinlock_t peer_seq_lock;
38315 unsigned long comm_bm_set; /* communicated number of set bits. */
38316@@ -955,8 +955,8 @@ struct drbd_device {
38317 struct mutex own_state_mutex;
38318 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38319 char congestion_reason; /* Why we where congested... */
38320- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38321- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38322+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38323+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38324 int rs_last_sect_ev; /* counter to compare with */
38325 int rs_last_events; /* counter of read or write "events" (unit sectors)
38326 * on the lower level device when we last looked. */
38327diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38328index 1fc8342..7e7742b 100644
38329--- a/drivers/block/drbd/drbd_main.c
38330+++ b/drivers/block/drbd/drbd_main.c
38331@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38332 p->sector = sector;
38333 p->block_id = block_id;
38334 p->blksize = blksize;
38335- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38336+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38337 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38338 }
38339
38340@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38341 return -EIO;
38342 p->sector = cpu_to_be64(req->i.sector);
38343 p->block_id = (unsigned long)req;
38344- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38345+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38346 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38347 if (device->state.conn >= C_SYNC_SOURCE &&
38348 device->state.conn <= C_PAUSED_SYNC_T)
38349@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38350 atomic_set(&device->unacked_cnt, 0);
38351 atomic_set(&device->local_cnt, 0);
38352 atomic_set(&device->pp_in_use_by_net, 0);
38353- atomic_set(&device->rs_sect_in, 0);
38354- atomic_set(&device->rs_sect_ev, 0);
38355+ atomic_set_unchecked(&device->rs_sect_in, 0);
38356+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38357 atomic_set(&device->ap_in_flight, 0);
38358 atomic_set(&device->md_io.in_use, 0);
38359
38360@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38361 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38362 struct drbd_resource *resource = connection->resource;
38363
38364- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38365- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38366+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38367+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38368 kfree(connection->current_epoch);
38369
38370 idr_destroy(&connection->peer_devices);
38371diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38372index 74df8cf..e41fc24 100644
38373--- a/drivers/block/drbd/drbd_nl.c
38374+++ b/drivers/block/drbd/drbd_nl.c
38375@@ -3637,13 +3637,13 @@ finish:
38376
38377 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38378 {
38379- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38380+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38381 struct sk_buff *msg;
38382 struct drbd_genlmsghdr *d_out;
38383 unsigned seq;
38384 int err = -ENOMEM;
38385
38386- seq = atomic_inc_return(&drbd_genl_seq);
38387+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38388 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38389 if (!msg)
38390 goto failed;
38391diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38392index cee2035..22f66bd 100644
38393--- a/drivers/block/drbd/drbd_receiver.c
38394+++ b/drivers/block/drbd/drbd_receiver.c
38395@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38396 struct drbd_device *device = peer_device->device;
38397 int err;
38398
38399- atomic_set(&device->packet_seq, 0);
38400+ atomic_set_unchecked(&device->packet_seq, 0);
38401 device->peer_seq = 0;
38402
38403 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38404@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38405 do {
38406 next_epoch = NULL;
38407
38408- epoch_size = atomic_read(&epoch->epoch_size);
38409+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38410
38411 switch (ev & ~EV_CLEANUP) {
38412 case EV_PUT:
38413@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38414 rv = FE_DESTROYED;
38415 } else {
38416 epoch->flags = 0;
38417- atomic_set(&epoch->epoch_size, 0);
38418+ atomic_set_unchecked(&epoch->epoch_size, 0);
38419 /* atomic_set(&epoch->active, 0); is already zero */
38420 if (rv == FE_STILL_LIVE)
38421 rv = FE_RECYCLED;
38422@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38423 conn_wait_active_ee_empty(connection);
38424 drbd_flush(connection);
38425
38426- if (atomic_read(&connection->current_epoch->epoch_size)) {
38427+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38428 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38429 if (epoch)
38430 break;
38431@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38432 }
38433
38434 epoch->flags = 0;
38435- atomic_set(&epoch->epoch_size, 0);
38436+ atomic_set_unchecked(&epoch->epoch_size, 0);
38437 atomic_set(&epoch->active, 0);
38438
38439 spin_lock(&connection->epoch_lock);
38440- if (atomic_read(&connection->current_epoch->epoch_size)) {
38441+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38442 list_add(&epoch->list, &connection->current_epoch->list);
38443 connection->current_epoch = epoch;
38444 connection->epochs++;
38445@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38446 list_add_tail(&peer_req->w.list, &device->sync_ee);
38447 spin_unlock_irq(&device->resource->req_lock);
38448
38449- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38450+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38451 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38452 return 0;
38453
38454@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38455 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38456 }
38457
38458- atomic_add(pi->size >> 9, &device->rs_sect_in);
38459+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38460
38461 return err;
38462 }
38463@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38464
38465 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38466 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38467- atomic_inc(&connection->current_epoch->epoch_size);
38468+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38469 err2 = drbd_drain_block(peer_device, pi->size);
38470 if (!err)
38471 err = err2;
38472@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38473
38474 spin_lock(&connection->epoch_lock);
38475 peer_req->epoch = connection->current_epoch;
38476- atomic_inc(&peer_req->epoch->epoch_size);
38477+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38478 atomic_inc(&peer_req->epoch->active);
38479 spin_unlock(&connection->epoch_lock);
38480
38481@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38482
38483 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38484 (int)part_stat_read(&disk->part0, sectors[1]) -
38485- atomic_read(&device->rs_sect_ev);
38486+ atomic_read_unchecked(&device->rs_sect_ev);
38487
38488 if (atomic_read(&device->ap_actlog_cnt)
38489 || curr_events - device->rs_last_events > 64) {
38490@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38491 device->use_csums = true;
38492 } else if (pi->cmd == P_OV_REPLY) {
38493 /* track progress, we may need to throttle */
38494- atomic_add(size >> 9, &device->rs_sect_in);
38495+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38496 peer_req->w.cb = w_e_end_ov_reply;
38497 dec_rs_pending(device);
38498 /* drbd_rs_begin_io done when we sent this request,
38499@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38500 goto out_free_e;
38501
38502 submit_for_resync:
38503- atomic_add(size >> 9, &device->rs_sect_ev);
38504+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38505
38506 submit:
38507 update_receiver_timing_details(connection, drbd_submit_peer_request);
38508@@ -4564,7 +4564,7 @@ struct data_cmd {
38509 int expect_payload;
38510 size_t pkt_size;
38511 int (*fn)(struct drbd_connection *, struct packet_info *);
38512-};
38513+} __do_const;
38514
38515 static struct data_cmd drbd_cmd_handler[] = {
38516 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38517@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38518 if (!list_empty(&connection->current_epoch->list))
38519 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38520 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38521- atomic_set(&connection->current_epoch->epoch_size, 0);
38522+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38523 connection->send.seen_any_write_yet = false;
38524
38525 drbd_info(connection, "Connection closed\n");
38526@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38527 put_ldev(device);
38528 }
38529 dec_rs_pending(device);
38530- atomic_add(blksize >> 9, &device->rs_sect_in);
38531+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38532
38533 return 0;
38534 }
38535@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38536 struct asender_cmd {
38537 size_t pkt_size;
38538 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38539-};
38540+} __do_const;
38541
38542 static struct asender_cmd asender_tbl[] = {
38543 [P_PING] = { 0, got_Ping },
38544diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38545index d0fae55..4469096 100644
38546--- a/drivers/block/drbd/drbd_worker.c
38547+++ b/drivers/block/drbd/drbd_worker.c
38548@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38549 list_add_tail(&peer_req->w.list, &device->read_ee);
38550 spin_unlock_irq(&device->resource->req_lock);
38551
38552- atomic_add(size >> 9, &device->rs_sect_ev);
38553+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38554 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38555 return 0;
38556
38557@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38558 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38559 int number, mxb;
38560
38561- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38562+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38563 device->rs_in_flight -= sect_in;
38564
38565 rcu_read_lock();
38566@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38567 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38568 struct fifo_buffer *plan;
38569
38570- atomic_set(&device->rs_sect_in, 0);
38571- atomic_set(&device->rs_sect_ev, 0);
38572+ atomic_set_unchecked(&device->rs_sect_in, 0);
38573+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38574 device->rs_in_flight = 0;
38575 device->rs_last_events =
38576 (int)part_stat_read(&disk->part0, sectors[0]) +
38577diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38578index 773e964..e85af00 100644
38579--- a/drivers/block/loop.c
38580+++ b/drivers/block/loop.c
38581@@ -234,7 +234,7 @@ static int __do_lo_send_write(struct file *file,
38582
38583 file_start_write(file);
38584 set_fs(get_ds());
38585- bw = file->f_op->write(file, buf, len, &pos);
38586+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38587 set_fs(old_fs);
38588 file_end_write(file);
38589 if (likely(bw == len))
38590diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38591index 09e628da..7607aaa 100644
38592--- a/drivers/block/pktcdvd.c
38593+++ b/drivers/block/pktcdvd.c
38594@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38595
38596 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38597 {
38598- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38599+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38600 }
38601
38602 /*
38603@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38604 return -EROFS;
38605 }
38606 pd->settings.fp = ti.fp;
38607- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38608+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38609
38610 if (ti.nwa_v) {
38611 pd->nwa = be32_to_cpu(ti.next_writable);
38612diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38613index b67066d..515b7f4 100644
38614--- a/drivers/block/rbd.c
38615+++ b/drivers/block/rbd.c
38616@@ -64,7 +64,7 @@
38617 * If the counter is already at its maximum value returns
38618 * -EINVAL without updating it.
38619 */
38620-static int atomic_inc_return_safe(atomic_t *v)
38621+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38622 {
38623 unsigned int counter;
38624
38625diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38626index e5565fb..71be10b4 100644
38627--- a/drivers/block/smart1,2.h
38628+++ b/drivers/block/smart1,2.h
38629@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38630 }
38631
38632 static struct access_method smart4_access = {
38633- smart4_submit_command,
38634- smart4_intr_mask,
38635- smart4_fifo_full,
38636- smart4_intr_pending,
38637- smart4_completed,
38638+ .submit_command = smart4_submit_command,
38639+ .set_intr_mask = smart4_intr_mask,
38640+ .fifo_full = smart4_fifo_full,
38641+ .intr_pending = smart4_intr_pending,
38642+ .command_completed = smart4_completed,
38643 };
38644
38645 /*
38646@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38647 }
38648
38649 static struct access_method smart2_access = {
38650- smart2_submit_command,
38651- smart2_intr_mask,
38652- smart2_fifo_full,
38653- smart2_intr_pending,
38654- smart2_completed,
38655+ .submit_command = smart2_submit_command,
38656+ .set_intr_mask = smart2_intr_mask,
38657+ .fifo_full = smart2_fifo_full,
38658+ .intr_pending = smart2_intr_pending,
38659+ .command_completed = smart2_completed,
38660 };
38661
38662 /*
38663@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38664 }
38665
38666 static struct access_method smart2e_access = {
38667- smart2e_submit_command,
38668- smart2e_intr_mask,
38669- smart2e_fifo_full,
38670- smart2e_intr_pending,
38671- smart2e_completed,
38672+ .submit_command = smart2e_submit_command,
38673+ .set_intr_mask = smart2e_intr_mask,
38674+ .fifo_full = smart2e_fifo_full,
38675+ .intr_pending = smart2e_intr_pending,
38676+ .command_completed = smart2e_completed,
38677 };
38678
38679 /*
38680@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38681 }
38682
38683 static struct access_method smart1_access = {
38684- smart1_submit_command,
38685- smart1_intr_mask,
38686- smart1_fifo_full,
38687- smart1_intr_pending,
38688- smart1_completed,
38689+ .submit_command = smart1_submit_command,
38690+ .set_intr_mask = smart1_intr_mask,
38691+ .fifo_full = smart1_fifo_full,
38692+ .intr_pending = smart1_intr_pending,
38693+ .command_completed = smart1_completed,
38694 };
38695diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38696index 55c135b..9f8d60c 100644
38697--- a/drivers/bluetooth/btwilink.c
38698+++ b/drivers/bluetooth/btwilink.c
38699@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38700
38701 static int bt_ti_probe(struct platform_device *pdev)
38702 {
38703- static struct ti_st *hst;
38704+ struct ti_st *hst;
38705 struct hci_dev *hdev;
38706 int err;
38707
38708diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38709index 5d28a45..a538f90 100644
38710--- a/drivers/cdrom/cdrom.c
38711+++ b/drivers/cdrom/cdrom.c
38712@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38713 ENSURE(reset, CDC_RESET);
38714 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38715 cdi->mc_flags = 0;
38716- cdo->n_minors = 0;
38717 cdi->options = CDO_USE_FFLAGS;
38718
38719 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38720@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38721 else
38722 cdi->cdda_method = CDDA_OLD;
38723
38724- if (!cdo->generic_packet)
38725- cdo->generic_packet = cdrom_dummy_generic_packet;
38726+ if (!cdo->generic_packet) {
38727+ pax_open_kernel();
38728+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38729+ pax_close_kernel();
38730+ }
38731
38732 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38733 mutex_lock(&cdrom_mutex);
38734@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38735 if (cdi->exit)
38736 cdi->exit(cdi);
38737
38738- cdi->ops->n_minors--;
38739 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38740 }
38741
38742@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38743 */
38744 nr = nframes;
38745 do {
38746- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38747+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38748 if (cgc.buffer)
38749 break;
38750
38751@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38752 struct cdrom_device_info *cdi;
38753 int ret;
38754
38755- ret = scnprintf(info + *pos, max_size - *pos, header);
38756+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38757 if (!ret)
38758 return 1;
38759
38760diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38761index 584bc31..e64a12c 100644
38762--- a/drivers/cdrom/gdrom.c
38763+++ b/drivers/cdrom/gdrom.c
38764@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38765 .audio_ioctl = gdrom_audio_ioctl,
38766 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38767 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38768- .n_minors = 1,
38769 };
38770
38771 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38772diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38773index a4af822..ed58cd1 100644
38774--- a/drivers/char/Kconfig
38775+++ b/drivers/char/Kconfig
38776@@ -17,7 +17,8 @@ config DEVMEM
38777
38778 config DEVKMEM
38779 bool "/dev/kmem virtual device support"
38780- default y
38781+ default n
38782+ depends on !GRKERNSEC_KMEM
38783 help
38784 Say Y here if you want to support the /dev/kmem device. The
38785 /dev/kmem device is rarely used, but can be used for certain
38786@@ -586,6 +587,7 @@ config DEVPORT
38787 bool
38788 depends on !M68K
38789 depends on ISA || PCI
38790+ depends on !GRKERNSEC_KMEM
38791 default y
38792
38793 source "drivers/s390/char/Kconfig"
38794diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38795index a48e05b..6bac831 100644
38796--- a/drivers/char/agp/compat_ioctl.c
38797+++ b/drivers/char/agp/compat_ioctl.c
38798@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38799 return -ENOMEM;
38800 }
38801
38802- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38803+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38804 sizeof(*usegment) * ureserve.seg_count)) {
38805 kfree(usegment);
38806 kfree(ksegment);
38807diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38808index 09f17eb..8531d2f 100644
38809--- a/drivers/char/agp/frontend.c
38810+++ b/drivers/char/agp/frontend.c
38811@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38812 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38813 return -EFAULT;
38814
38815- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38816+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38817 return -EFAULT;
38818
38819 client = agp_find_client_by_pid(reserve.pid);
38820@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38821 if (segment == NULL)
38822 return -ENOMEM;
38823
38824- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38825+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38826 sizeof(struct agp_segment) * reserve.seg_count)) {
38827 kfree(segment);
38828 return -EFAULT;
38829diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38830index 4f94375..413694e 100644
38831--- a/drivers/char/genrtc.c
38832+++ b/drivers/char/genrtc.c
38833@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38834 switch (cmd) {
38835
38836 case RTC_PLL_GET:
38837+ memset(&pll, 0, sizeof(pll));
38838 if (get_rtc_pll(&pll))
38839 return -EINVAL;
38840 else
38841diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38842index 5c0baa9..44011b1 100644
38843--- a/drivers/char/hpet.c
38844+++ b/drivers/char/hpet.c
38845@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38846 }
38847
38848 static int
38849-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38850+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38851 struct hpet_info *info)
38852 {
38853 struct hpet_timer __iomem *timer;
38854diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
38855index 24cc4ed..f9807cf 100644
38856--- a/drivers/char/i8k.c
38857+++ b/drivers/char/i8k.c
38858@@ -788,7 +788,7 @@ static const struct i8k_config_data i8k_config_data[] = {
38859 },
38860 };
38861
38862-static struct dmi_system_id i8k_dmi_table[] __initdata = {
38863+static const struct dmi_system_id i8k_dmi_table[] __initconst = {
38864 {
38865 .ident = "Dell Inspiron",
38866 .matches = {
38867diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38868index 9bb5928..57a7801 100644
38869--- a/drivers/char/ipmi/ipmi_msghandler.c
38870+++ b/drivers/char/ipmi/ipmi_msghandler.c
38871@@ -436,7 +436,7 @@ struct ipmi_smi {
38872 struct proc_dir_entry *proc_dir;
38873 char proc_dir_name[10];
38874
38875- atomic_t stats[IPMI_NUM_STATS];
38876+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38877
38878 /*
38879 * run_to_completion duplicate of smb_info, smi_info
38880@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38881 static DEFINE_MUTEX(smi_watchers_mutex);
38882
38883 #define ipmi_inc_stat(intf, stat) \
38884- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38885+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38886 #define ipmi_get_stat(intf, stat) \
38887- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38888+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38889
38890 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38891 "ACPI", "SMBIOS", "PCI",
38892@@ -2828,7 +2828,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38893 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38894 init_waitqueue_head(&intf->waitq);
38895 for (i = 0; i < IPMI_NUM_STATS; i++)
38896- atomic_set(&intf->stats[i], 0);
38897+ atomic_set_unchecked(&intf->stats[i], 0);
38898
38899 intf->proc_dir = NULL;
38900
38901diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38902index 518585c..6c985cef 100644
38903--- a/drivers/char/ipmi/ipmi_si_intf.c
38904+++ b/drivers/char/ipmi/ipmi_si_intf.c
38905@@ -289,7 +289,7 @@ struct smi_info {
38906 unsigned char slave_addr;
38907
38908 /* Counters and things for the proc filesystem. */
38909- atomic_t stats[SI_NUM_STATS];
38910+ atomic_unchecked_t stats[SI_NUM_STATS];
38911
38912 struct task_struct *thread;
38913
38914@@ -298,9 +298,9 @@ struct smi_info {
38915 };
38916
38917 #define smi_inc_stat(smi, stat) \
38918- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38919+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38920 #define smi_get_stat(smi, stat) \
38921- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38922+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38923
38924 #define SI_MAX_PARMS 4
38925
38926@@ -3498,7 +3498,7 @@ static int try_smi_init(struct smi_info *new_smi)
38927 atomic_set(&new_smi->req_events, 0);
38928 new_smi->run_to_completion = false;
38929 for (i = 0; i < SI_NUM_STATS; i++)
38930- atomic_set(&new_smi->stats[i], 0);
38931+ atomic_set_unchecked(&new_smi->stats[i], 0);
38932
38933 new_smi->interrupt_disabled = true;
38934 atomic_set(&new_smi->need_watch, 0);
38935diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38936index 297110c..3f69b43 100644
38937--- a/drivers/char/mem.c
38938+++ b/drivers/char/mem.c
38939@@ -18,6 +18,7 @@
38940 #include <linux/raw.h>
38941 #include <linux/tty.h>
38942 #include <linux/capability.h>
38943+#include <linux/security.h>
38944 #include <linux/ptrace.h>
38945 #include <linux/device.h>
38946 #include <linux/highmem.h>
38947@@ -36,6 +37,10 @@
38948
38949 #define DEVPORT_MINOR 4
38950
38951+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38952+extern const struct file_operations grsec_fops;
38953+#endif
38954+
38955 static inline unsigned long size_inside_page(unsigned long start,
38956 unsigned long size)
38957 {
38958@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38959
38960 while (cursor < to) {
38961 if (!devmem_is_allowed(pfn)) {
38962+#ifdef CONFIG_GRKERNSEC_KMEM
38963+ gr_handle_mem_readwrite(from, to);
38964+#else
38965 printk(KERN_INFO
38966 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38967 current->comm, from, to);
38968+#endif
38969 return 0;
38970 }
38971 cursor += PAGE_SIZE;
38972@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38973 }
38974 return 1;
38975 }
38976+#elif defined(CONFIG_GRKERNSEC_KMEM)
38977+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38978+{
38979+ return 0;
38980+}
38981 #else
38982 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38983 {
38984@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38985 #endif
38986
38987 while (count > 0) {
38988- unsigned long remaining;
38989+ unsigned long remaining = 0;
38990+ char *temp;
38991
38992 sz = size_inside_page(p, count);
38993
38994@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38995 if (!ptr)
38996 return -EFAULT;
38997
38998- remaining = copy_to_user(buf, ptr, sz);
38999+#ifdef CONFIG_PAX_USERCOPY
39000+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39001+ if (!temp) {
39002+ unxlate_dev_mem_ptr(p, ptr);
39003+ return -ENOMEM;
39004+ }
39005+ remaining = probe_kernel_read(temp, ptr, sz);
39006+#else
39007+ temp = ptr;
39008+#endif
39009+
39010+ if (!remaining)
39011+ remaining = copy_to_user(buf, temp, sz);
39012+
39013+#ifdef CONFIG_PAX_USERCOPY
39014+ kfree(temp);
39015+#endif
39016+
39017 unxlate_dev_mem_ptr(p, ptr);
39018 if (remaining)
39019 return -EFAULT;
39020@@ -380,9 +412,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39021 size_t count, loff_t *ppos)
39022 {
39023 unsigned long p = *ppos;
39024- ssize_t low_count, read, sz;
39025+ ssize_t low_count, read, sz, err = 0;
39026 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
39027- int err = 0;
39028
39029 read = 0;
39030 if (p < (unsigned long) high_memory) {
39031@@ -404,6 +435,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39032 }
39033 #endif
39034 while (low_count > 0) {
39035+ char *temp;
39036+
39037 sz = size_inside_page(p, low_count);
39038
39039 /*
39040@@ -413,7 +446,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39041 */
39042 kbuf = xlate_dev_kmem_ptr((void *)p);
39043
39044- if (copy_to_user(buf, kbuf, sz))
39045+#ifdef CONFIG_PAX_USERCOPY
39046+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39047+ if (!temp)
39048+ return -ENOMEM;
39049+ err = probe_kernel_read(temp, kbuf, sz);
39050+#else
39051+ temp = kbuf;
39052+#endif
39053+
39054+ if (!err)
39055+ err = copy_to_user(buf, temp, sz);
39056+
39057+#ifdef CONFIG_PAX_USERCOPY
39058+ kfree(temp);
39059+#endif
39060+
39061+ if (err)
39062 return -EFAULT;
39063 buf += sz;
39064 p += sz;
39065@@ -804,6 +853,9 @@ static const struct memdev {
39066 #ifdef CONFIG_PRINTK
39067 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
39068 #endif
39069+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39070+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, 0 },
39071+#endif
39072 };
39073
39074 static int memory_open(struct inode *inode, struct file *filp)
39075@@ -865,7 +917,7 @@ static int __init chr_dev_init(void)
39076 continue;
39077
39078 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
39079- NULL, devlist[minor].name);
39080+ NULL, "%s", devlist[minor].name);
39081 }
39082
39083 return tty_init();
39084diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
39085index 9df78e2..01ba9ae 100644
39086--- a/drivers/char/nvram.c
39087+++ b/drivers/char/nvram.c
39088@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
39089
39090 spin_unlock_irq(&rtc_lock);
39091
39092- if (copy_to_user(buf, contents, tmp - contents))
39093+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
39094 return -EFAULT;
39095
39096 *ppos = i;
39097diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
39098index 0ea9986..e7b07e4 100644
39099--- a/drivers/char/pcmcia/synclink_cs.c
39100+++ b/drivers/char/pcmcia/synclink_cs.c
39101@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39102
39103 if (debug_level >= DEBUG_LEVEL_INFO)
39104 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
39105- __FILE__, __LINE__, info->device_name, port->count);
39106+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
39107
39108 if (tty_port_close_start(port, tty, filp) == 0)
39109 goto cleanup;
39110@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39111 cleanup:
39112 if (debug_level >= DEBUG_LEVEL_INFO)
39113 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
39114- tty->driver->name, port->count);
39115+ tty->driver->name, atomic_read(&port->count));
39116 }
39117
39118 /* Wait until the transmitter is empty.
39119@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39120
39121 if (debug_level >= DEBUG_LEVEL_INFO)
39122 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
39123- __FILE__, __LINE__, tty->driver->name, port->count);
39124+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
39125
39126 /* If port is closing, signal caller to try again */
39127 if (port->flags & ASYNC_CLOSING){
39128@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39129 goto cleanup;
39130 }
39131 spin_lock(&port->lock);
39132- port->count++;
39133+ atomic_inc(&port->count);
39134 spin_unlock(&port->lock);
39135 spin_unlock_irqrestore(&info->netlock, flags);
39136
39137- if (port->count == 1) {
39138+ if (atomic_read(&port->count) == 1) {
39139 /* 1st open on this device, init hardware */
39140 retval = startup(info, tty);
39141 if (retval < 0)
39142@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39143 unsigned short new_crctype;
39144
39145 /* return error if TTY interface open */
39146- if (info->port.count)
39147+ if (atomic_read(&info->port.count))
39148 return -EBUSY;
39149
39150 switch (encoding)
39151@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
39152
39153 /* arbitrate between network and tty opens */
39154 spin_lock_irqsave(&info->netlock, flags);
39155- if (info->port.count != 0 || info->netcount != 0) {
39156+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39157 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39158 spin_unlock_irqrestore(&info->netlock, flags);
39159 return -EBUSY;
39160@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39161 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
39162
39163 /* return error if TTY interface open */
39164- if (info->port.count)
39165+ if (atomic_read(&info->port.count))
39166 return -EBUSY;
39167
39168 if (cmd != SIOCWANDEV)
39169diff --git a/drivers/char/random.c b/drivers/char/random.c
39170index 9cd6968..6416f00 100644
39171--- a/drivers/char/random.c
39172+++ b/drivers/char/random.c
39173@@ -289,9 +289,6 @@
39174 /*
39175 * To allow fractional bits to be tracked, the entropy_count field is
39176 * denominated in units of 1/8th bits.
39177- *
39178- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
39179- * credit_entropy_bits() needs to be 64 bits wide.
39180 */
39181 #define ENTROPY_SHIFT 3
39182 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
39183@@ -439,9 +436,9 @@ struct entropy_store {
39184 };
39185
39186 static void push_to_pool(struct work_struct *work);
39187-static __u32 input_pool_data[INPUT_POOL_WORDS];
39188-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
39189-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
39190+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
39191+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39192+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39193
39194 static struct entropy_store input_pool = {
39195 .poolinfo = &poolinfo_table[0],
39196@@ -635,7 +632,7 @@ retry:
39197 /* The +2 corresponds to the /4 in the denominator */
39198
39199 do {
39200- unsigned int anfrac = min(pnfrac, pool_size/2);
39201+ u64 anfrac = min(pnfrac, pool_size/2);
39202 unsigned int add =
39203 ((pool_size - entropy_count)*anfrac*3) >> s;
39204
39205@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
39206
39207 extract_buf(r, tmp);
39208 i = min_t(int, nbytes, EXTRACT_SIZE);
39209- if (copy_to_user(buf, tmp, i)) {
39210+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
39211 ret = -EFAULT;
39212 break;
39213 }
39214@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
39215 static int proc_do_uuid(struct ctl_table *table, int write,
39216 void __user *buffer, size_t *lenp, loff_t *ppos)
39217 {
39218- struct ctl_table fake_table;
39219+ ctl_table_no_const fake_table;
39220 unsigned char buf[64], tmp_uuid[16], *uuid;
39221
39222 uuid = table->data;
39223@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
39224 static int proc_do_entropy(struct ctl_table *table, int write,
39225 void __user *buffer, size_t *lenp, loff_t *ppos)
39226 {
39227- struct ctl_table fake_table;
39228+ ctl_table_no_const fake_table;
39229 int entropy_count;
39230
39231 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
39232diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39233index e496dae..3db53b6 100644
39234--- a/drivers/char/sonypi.c
39235+++ b/drivers/char/sonypi.c
39236@@ -54,6 +54,7 @@
39237
39238 #include <asm/uaccess.h>
39239 #include <asm/io.h>
39240+#include <asm/local.h>
39241
39242 #include <linux/sonypi.h>
39243
39244@@ -490,7 +491,7 @@ static struct sonypi_device {
39245 spinlock_t fifo_lock;
39246 wait_queue_head_t fifo_proc_list;
39247 struct fasync_struct *fifo_async;
39248- int open_count;
39249+ local_t open_count;
39250 int model;
39251 struct input_dev *input_jog_dev;
39252 struct input_dev *input_key_dev;
39253@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39254 static int sonypi_misc_release(struct inode *inode, struct file *file)
39255 {
39256 mutex_lock(&sonypi_device.lock);
39257- sonypi_device.open_count--;
39258+ local_dec(&sonypi_device.open_count);
39259 mutex_unlock(&sonypi_device.lock);
39260 return 0;
39261 }
39262@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39263 {
39264 mutex_lock(&sonypi_device.lock);
39265 /* Flush input queue on first open */
39266- if (!sonypi_device.open_count)
39267+ if (!local_read(&sonypi_device.open_count))
39268 kfifo_reset(&sonypi_device.fifo);
39269- sonypi_device.open_count++;
39270+ local_inc(&sonypi_device.open_count);
39271 mutex_unlock(&sonypi_device.lock);
39272
39273 return 0;
39274@@ -1491,7 +1492,7 @@ static struct platform_driver sonypi_driver = {
39275
39276 static struct platform_device *sonypi_platform_device;
39277
39278-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
39279+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
39280 {
39281 .ident = "Sony Vaio",
39282 .matches = {
39283diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39284index 565a947..dcdc06e 100644
39285--- a/drivers/char/tpm/tpm_acpi.c
39286+++ b/drivers/char/tpm/tpm_acpi.c
39287@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39288 virt = acpi_os_map_iomem(start, len);
39289 if (!virt) {
39290 kfree(log->bios_event_log);
39291+ log->bios_event_log = NULL;
39292 printk("%s: ERROR - Unable to map memory\n", __func__);
39293 return -EIO;
39294 }
39295
39296- memcpy_fromio(log->bios_event_log, virt, len);
39297+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39298
39299 acpi_os_unmap_iomem(virt, len);
39300 return 0;
39301diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39302index 3a56a13..f8cbd25 100644
39303--- a/drivers/char/tpm/tpm_eventlog.c
39304+++ b/drivers/char/tpm/tpm_eventlog.c
39305@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39306 event = addr;
39307
39308 if ((event->event_type == 0 && event->event_size == 0) ||
39309- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39310+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39311 return NULL;
39312
39313 return addr;
39314@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39315 return NULL;
39316
39317 if ((event->event_type == 0 && event->event_size == 0) ||
39318- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39319+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39320 return NULL;
39321
39322 (*pos)++;
39323@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39324 int i;
39325
39326 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39327- seq_putc(m, data[i]);
39328+ if (!seq_putc(m, data[i]))
39329+ return -EFAULT;
39330
39331 return 0;
39332 }
39333diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39334index 72d7028..1586601 100644
39335--- a/drivers/char/virtio_console.c
39336+++ b/drivers/char/virtio_console.c
39337@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39338 if (to_user) {
39339 ssize_t ret;
39340
39341- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39342+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39343 if (ret)
39344 return -EFAULT;
39345 } else {
39346@@ -789,7 +789,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39347 if (!port_has_data(port) && !port->host_connected)
39348 return 0;
39349
39350- return fill_readbuf(port, ubuf, count, true);
39351+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39352 }
39353
39354 static int wait_port_writable(struct port *port, bool nonblock)
39355diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39356index 956b7e5..b655045 100644
39357--- a/drivers/clk/clk-composite.c
39358+++ b/drivers/clk/clk-composite.c
39359@@ -197,7 +197,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39360 struct clk *clk;
39361 struct clk_init_data init;
39362 struct clk_composite *composite;
39363- struct clk_ops *clk_composite_ops;
39364+ clk_ops_no_const *clk_composite_ops;
39365
39366 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39367 if (!composite) {
39368diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39369index dd3a78c..386d49c 100644
39370--- a/drivers/clk/socfpga/clk-gate.c
39371+++ b/drivers/clk/socfpga/clk-gate.c
39372@@ -22,6 +22,7 @@
39373 #include <linux/mfd/syscon.h>
39374 #include <linux/of.h>
39375 #include <linux/regmap.h>
39376+#include <asm/pgtable.h>
39377
39378 #include "clk.h"
39379
39380@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39381 return 0;
39382 }
39383
39384-static struct clk_ops gateclk_ops = {
39385+static clk_ops_no_const gateclk_ops __read_only = {
39386 .prepare = socfpga_clk_prepare,
39387 .recalc_rate = socfpga_clk_recalc_rate,
39388 .get_parent = socfpga_clk_get_parent,
39389@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39390 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39391 socfpga_clk->hw.bit_idx = clk_gate[1];
39392
39393- gateclk_ops.enable = clk_gate_ops.enable;
39394- gateclk_ops.disable = clk_gate_ops.disable;
39395+ pax_open_kernel();
39396+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39397+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39398+ pax_close_kernel();
39399 }
39400
39401 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39402diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39403index de6da95..c98278b 100644
39404--- a/drivers/clk/socfpga/clk-pll.c
39405+++ b/drivers/clk/socfpga/clk-pll.c
39406@@ -21,6 +21,7 @@
39407 #include <linux/io.h>
39408 #include <linux/of.h>
39409 #include <linux/of_address.h>
39410+#include <asm/pgtable.h>
39411
39412 #include "clk.h"
39413
39414@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39415 CLK_MGR_PLL_CLK_SRC_MASK;
39416 }
39417
39418-static struct clk_ops clk_pll_ops = {
39419+static clk_ops_no_const clk_pll_ops __read_only = {
39420 .recalc_rate = clk_pll_recalc_rate,
39421 .get_parent = clk_pll_get_parent,
39422 };
39423@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39424 pll_clk->hw.hw.init = &init;
39425
39426 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39427- clk_pll_ops.enable = clk_gate_ops.enable;
39428- clk_pll_ops.disable = clk_gate_ops.disable;
39429+ pax_open_kernel();
39430+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39431+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39432+ pax_close_kernel();
39433
39434 clk = clk_register(NULL, &pll_clk->hw.hw);
39435 if (WARN_ON(IS_ERR(clk))) {
39436diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39437index b0c18ed..1713a80 100644
39438--- a/drivers/cpufreq/acpi-cpufreq.c
39439+++ b/drivers/cpufreq/acpi-cpufreq.c
39440@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39441 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39442 per_cpu(acfreq_data, cpu) = data;
39443
39444- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39445- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39446+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39447+ pax_open_kernel();
39448+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39449+ pax_close_kernel();
39450+ }
39451
39452 result = acpi_processor_register_performance(data->acpi_data, cpu);
39453 if (result)
39454@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39455 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39456 break;
39457 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39458- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39459+ pax_open_kernel();
39460+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39461+ pax_close_kernel();
39462 break;
39463 default:
39464 break;
39465@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39466 if (!msrs)
39467 return;
39468
39469- acpi_cpufreq_driver.boost_supported = true;
39470- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39471+ pax_open_kernel();
39472+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39473+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39474+ pax_close_kernel();
39475
39476 cpu_notifier_register_begin();
39477
39478diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39479index bab67db..91af7e3 100644
39480--- a/drivers/cpufreq/cpufreq-dt.c
39481+++ b/drivers/cpufreq/cpufreq-dt.c
39482@@ -392,7 +392,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39483 if (!IS_ERR(cpu_reg))
39484 regulator_put(cpu_reg);
39485
39486- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39487+ pax_open_kernel();
39488+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39489+ pax_close_kernel();
39490
39491 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39492 if (ret)
39493diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39494index 8ae655c..3141442 100644
39495--- a/drivers/cpufreq/cpufreq.c
39496+++ b/drivers/cpufreq/cpufreq.c
39497@@ -2108,7 +2108,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39498 }
39499
39500 mutex_lock(&cpufreq_governor_mutex);
39501- list_del(&governor->governor_list);
39502+ pax_list_del(&governor->governor_list);
39503 mutex_unlock(&cpufreq_governor_mutex);
39504 return;
39505 }
39506@@ -2323,7 +2323,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39507 return NOTIFY_OK;
39508 }
39509
39510-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39511+static struct notifier_block cpufreq_cpu_notifier = {
39512 .notifier_call = cpufreq_cpu_callback,
39513 };
39514
39515@@ -2363,13 +2363,17 @@ int cpufreq_boost_trigger_state(int state)
39516 return 0;
39517
39518 write_lock_irqsave(&cpufreq_driver_lock, flags);
39519- cpufreq_driver->boost_enabled = state;
39520+ pax_open_kernel();
39521+ *(bool *)&cpufreq_driver->boost_enabled = state;
39522+ pax_close_kernel();
39523 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39524
39525 ret = cpufreq_driver->set_boost(state);
39526 if (ret) {
39527 write_lock_irqsave(&cpufreq_driver_lock, flags);
39528- cpufreq_driver->boost_enabled = !state;
39529+ pax_open_kernel();
39530+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39531+ pax_close_kernel();
39532 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39533
39534 pr_err("%s: Cannot %s BOOST\n",
39535@@ -2434,16 +2438,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39536 cpufreq_driver = driver_data;
39537 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39538
39539- if (driver_data->setpolicy)
39540- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39541+ if (driver_data->setpolicy) {
39542+ pax_open_kernel();
39543+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39544+ pax_close_kernel();
39545+ }
39546
39547 if (cpufreq_boost_supported()) {
39548 /*
39549 * Check if driver provides function to enable boost -
39550 * if not, use cpufreq_boost_set_sw as default
39551 */
39552- if (!cpufreq_driver->set_boost)
39553- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39554+ if (!cpufreq_driver->set_boost) {
39555+ pax_open_kernel();
39556+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39557+ pax_close_kernel();
39558+ }
39559
39560 ret = cpufreq_sysfs_create_file(&boost.attr);
39561 if (ret) {
39562diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39563index 1b44496..b80ff5e 100644
39564--- a/drivers/cpufreq/cpufreq_governor.c
39565+++ b/drivers/cpufreq/cpufreq_governor.c
39566@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39567 struct dbs_data *dbs_data;
39568 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39569 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39570- struct od_ops *od_ops = NULL;
39571+ const struct od_ops *od_ops = NULL;
39572 struct od_dbs_tuners *od_tuners = NULL;
39573 struct cs_dbs_tuners *cs_tuners = NULL;
39574 struct cpu_dbs_common_info *cpu_cdbs;
39575@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39576
39577 if ((cdata->governor == GOV_CONSERVATIVE) &&
39578 (!policy->governor->initialized)) {
39579- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39580+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39581
39582 cpufreq_register_notifier(cs_ops->notifier_block,
39583 CPUFREQ_TRANSITION_NOTIFIER);
39584@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39585
39586 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39587 (policy->governor->initialized == 1)) {
39588- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39589+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39590
39591 cpufreq_unregister_notifier(cs_ops->notifier_block,
39592 CPUFREQ_TRANSITION_NOTIFIER);
39593diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39594index cc401d1..8197340 100644
39595--- a/drivers/cpufreq/cpufreq_governor.h
39596+++ b/drivers/cpufreq/cpufreq_governor.h
39597@@ -212,7 +212,7 @@ struct common_dbs_data {
39598 void (*exit)(struct dbs_data *dbs_data);
39599
39600 /* Governor specific ops, see below */
39601- void *gov_ops;
39602+ const void *gov_ops;
39603 };
39604
39605 /* Governor Per policy data */
39606@@ -232,7 +232,7 @@ struct od_ops {
39607 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39608 unsigned int freq_next, unsigned int relation);
39609 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39610-};
39611+} __no_const;
39612
39613 struct cs_ops {
39614 struct notifier_block *notifier_block;
39615diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39616index ad3f38f..8f086cd 100644
39617--- a/drivers/cpufreq/cpufreq_ondemand.c
39618+++ b/drivers/cpufreq/cpufreq_ondemand.c
39619@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39620
39621 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39622
39623-static struct od_ops od_ops = {
39624+static struct od_ops od_ops __read_only = {
39625 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39626 .powersave_bias_target = generic_powersave_bias_target,
39627 .freq_increase = dbs_freq_increase,
39628@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39629 (struct cpufreq_policy *, unsigned int, unsigned int),
39630 unsigned int powersave_bias)
39631 {
39632- od_ops.powersave_bias_target = f;
39633+ pax_open_kernel();
39634+ *(void **)&od_ops.powersave_bias_target = f;
39635+ pax_close_kernel();
39636 od_set_powersave_bias(powersave_bias);
39637 }
39638 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39639
39640 void od_unregister_powersave_bias_handler(void)
39641 {
39642- od_ops.powersave_bias_target = generic_powersave_bias_target;
39643+ pax_open_kernel();
39644+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39645+ pax_close_kernel();
39646 od_set_powersave_bias(0);
39647 }
39648 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39649diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39650index 872c577..5fb3c20 100644
39651--- a/drivers/cpufreq/intel_pstate.c
39652+++ b/drivers/cpufreq/intel_pstate.c
39653@@ -133,10 +133,10 @@ struct pstate_funcs {
39654 struct cpu_defaults {
39655 struct pstate_adjust_policy pid_policy;
39656 struct pstate_funcs funcs;
39657-};
39658+} __do_const;
39659
39660 static struct pstate_adjust_policy pid_params;
39661-static struct pstate_funcs pstate_funcs;
39662+static struct pstate_funcs *pstate_funcs;
39663 static int hwp_active;
39664
39665 struct perf_limits {
39666@@ -690,18 +690,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39667
39668 cpu->pstate.current_pstate = pstate;
39669
39670- pstate_funcs.set(cpu, pstate);
39671+ pstate_funcs->set(cpu, pstate);
39672 }
39673
39674 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39675 {
39676- cpu->pstate.min_pstate = pstate_funcs.get_min();
39677- cpu->pstate.max_pstate = pstate_funcs.get_max();
39678- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39679- cpu->pstate.scaling = pstate_funcs.get_scaling();
39680+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39681+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39682+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39683+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39684
39685- if (pstate_funcs.get_vid)
39686- pstate_funcs.get_vid(cpu);
39687+ if (pstate_funcs->get_vid)
39688+ pstate_funcs->get_vid(cpu);
39689 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39690 }
39691
39692@@ -1030,9 +1030,9 @@ static int intel_pstate_msrs_not_valid(void)
39693 rdmsrl(MSR_IA32_APERF, aperf);
39694 rdmsrl(MSR_IA32_MPERF, mperf);
39695
39696- if (!pstate_funcs.get_max() ||
39697- !pstate_funcs.get_min() ||
39698- !pstate_funcs.get_turbo())
39699+ if (!pstate_funcs->get_max() ||
39700+ !pstate_funcs->get_min() ||
39701+ !pstate_funcs->get_turbo())
39702 return -ENODEV;
39703
39704 rdmsrl(MSR_IA32_APERF, tmp);
39705@@ -1046,7 +1046,7 @@ static int intel_pstate_msrs_not_valid(void)
39706 return 0;
39707 }
39708
39709-static void copy_pid_params(struct pstate_adjust_policy *policy)
39710+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39711 {
39712 pid_params.sample_rate_ms = policy->sample_rate_ms;
39713 pid_params.p_gain_pct = policy->p_gain_pct;
39714@@ -1058,12 +1058,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39715
39716 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39717 {
39718- pstate_funcs.get_max = funcs->get_max;
39719- pstate_funcs.get_min = funcs->get_min;
39720- pstate_funcs.get_turbo = funcs->get_turbo;
39721- pstate_funcs.get_scaling = funcs->get_scaling;
39722- pstate_funcs.set = funcs->set;
39723- pstate_funcs.get_vid = funcs->get_vid;
39724+ pstate_funcs = funcs;
39725 }
39726
39727 #if IS_ENABLED(CONFIG_ACPI)
39728diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39729index 529cfd9..0e28fff 100644
39730--- a/drivers/cpufreq/p4-clockmod.c
39731+++ b/drivers/cpufreq/p4-clockmod.c
39732@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39733 case 0x0F: /* Core Duo */
39734 case 0x16: /* Celeron Core */
39735 case 0x1C: /* Atom */
39736- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39737+ pax_open_kernel();
39738+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39739+ pax_close_kernel();
39740 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39741 case 0x0D: /* Pentium M (Dothan) */
39742- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39743+ pax_open_kernel();
39744+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39745+ pax_close_kernel();
39746 /* fall through */
39747 case 0x09: /* Pentium M (Banias) */
39748 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39749@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39750
39751 /* on P-4s, the TSC runs with constant frequency independent whether
39752 * throttling is active or not. */
39753- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39754+ pax_open_kernel();
39755+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39756+ pax_close_kernel();
39757
39758 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39759 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39760diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39761index 9bb42ba..b01b4a2 100644
39762--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39763+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39764@@ -18,14 +18,12 @@
39765 #include <asm/head.h>
39766 #include <asm/timer.h>
39767
39768-static struct cpufreq_driver *cpufreq_us3_driver;
39769-
39770 struct us3_freq_percpu_info {
39771 struct cpufreq_frequency_table table[4];
39772 };
39773
39774 /* Indexed by cpu number. */
39775-static struct us3_freq_percpu_info *us3_freq_table;
39776+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39777
39778 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39779 * in the Safari config register.
39780@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39781
39782 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39783 {
39784- if (cpufreq_us3_driver)
39785- us3_freq_target(policy, 0);
39786+ us3_freq_target(policy, 0);
39787
39788 return 0;
39789 }
39790
39791+static int __init us3_freq_init(void);
39792+static void __exit us3_freq_exit(void);
39793+
39794+static struct cpufreq_driver cpufreq_us3_driver = {
39795+ .init = us3_freq_cpu_init,
39796+ .verify = cpufreq_generic_frequency_table_verify,
39797+ .target_index = us3_freq_target,
39798+ .get = us3_freq_get,
39799+ .exit = us3_freq_cpu_exit,
39800+ .name = "UltraSPARC-III",
39801+
39802+};
39803+
39804 static int __init us3_freq_init(void)
39805 {
39806 unsigned long manuf, impl, ver;
39807- int ret;
39808
39809 if (tlb_type != cheetah && tlb_type != cheetah_plus)
39810 return -ENODEV;
39811@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
39812 (impl == CHEETAH_IMPL ||
39813 impl == CHEETAH_PLUS_IMPL ||
39814 impl == JAGUAR_IMPL ||
39815- impl == PANTHER_IMPL)) {
39816- struct cpufreq_driver *driver;
39817-
39818- ret = -ENOMEM;
39819- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39820- if (!driver)
39821- goto err_out;
39822-
39823- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39824- GFP_KERNEL);
39825- if (!us3_freq_table)
39826- goto err_out;
39827-
39828- driver->init = us3_freq_cpu_init;
39829- driver->verify = cpufreq_generic_frequency_table_verify;
39830- driver->target_index = us3_freq_target;
39831- driver->get = us3_freq_get;
39832- driver->exit = us3_freq_cpu_exit;
39833- strcpy(driver->name, "UltraSPARC-III");
39834-
39835- cpufreq_us3_driver = driver;
39836- ret = cpufreq_register_driver(driver);
39837- if (ret)
39838- goto err_out;
39839-
39840- return 0;
39841-
39842-err_out:
39843- if (driver) {
39844- kfree(driver);
39845- cpufreq_us3_driver = NULL;
39846- }
39847- kfree(us3_freq_table);
39848- us3_freq_table = NULL;
39849- return ret;
39850- }
39851+ impl == PANTHER_IMPL))
39852+ return cpufreq_register_driver(&cpufreq_us3_driver);
39853
39854 return -ENODEV;
39855 }
39856
39857 static void __exit us3_freq_exit(void)
39858 {
39859- if (cpufreq_us3_driver) {
39860- cpufreq_unregister_driver(cpufreq_us3_driver);
39861- kfree(cpufreq_us3_driver);
39862- cpufreq_us3_driver = NULL;
39863- kfree(us3_freq_table);
39864- us3_freq_table = NULL;
39865- }
39866+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39867 }
39868
39869 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39870diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39871index 7d4a315..21bb886 100644
39872--- a/drivers/cpufreq/speedstep-centrino.c
39873+++ b/drivers/cpufreq/speedstep-centrino.c
39874@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39875 !cpu_has(cpu, X86_FEATURE_EST))
39876 return -ENODEV;
39877
39878- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39879- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39880+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39881+ pax_open_kernel();
39882+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39883+ pax_close_kernel();
39884+ }
39885
39886 if (policy->cpu != 0)
39887 return -ENODEV;
39888diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39889index 2697e87..c32476c 100644
39890--- a/drivers/cpuidle/driver.c
39891+++ b/drivers/cpuidle/driver.c
39892@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
39893
39894 static void poll_idle_init(struct cpuidle_driver *drv)
39895 {
39896- struct cpuidle_state *state = &drv->states[0];
39897+ cpuidle_state_no_const *state = &drv->states[0];
39898
39899 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39900 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39901diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39902index fb9f511..213e6cc 100644
39903--- a/drivers/cpuidle/governor.c
39904+++ b/drivers/cpuidle/governor.c
39905@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39906 mutex_lock(&cpuidle_lock);
39907 if (__cpuidle_find_governor(gov->name) == NULL) {
39908 ret = 0;
39909- list_add_tail(&gov->governor_list, &cpuidle_governors);
39910+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39911 if (!cpuidle_curr_governor ||
39912 cpuidle_curr_governor->rating < gov->rating)
39913 cpuidle_switch_governor(gov);
39914diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39915index 832a2c3..1794080 100644
39916--- a/drivers/cpuidle/sysfs.c
39917+++ b/drivers/cpuidle/sysfs.c
39918@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39919 NULL
39920 };
39921
39922-static struct attribute_group cpuidle_attr_group = {
39923+static attribute_group_no_const cpuidle_attr_group = {
39924 .attrs = cpuidle_default_attrs,
39925 .name = "cpuidle",
39926 };
39927diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39928index 8d2a772..33826c9 100644
39929--- a/drivers/crypto/hifn_795x.c
39930+++ b/drivers/crypto/hifn_795x.c
39931@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39932 MODULE_PARM_DESC(hifn_pll_ref,
39933 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39934
39935-static atomic_t hifn_dev_number;
39936+static atomic_unchecked_t hifn_dev_number;
39937
39938 #define ACRYPTO_OP_DECRYPT 0
39939 #define ACRYPTO_OP_ENCRYPT 1
39940@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39941 goto err_out_disable_pci_device;
39942
39943 snprintf(name, sizeof(name), "hifn%d",
39944- atomic_inc_return(&hifn_dev_number)-1);
39945+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39946
39947 err = pci_request_regions(pdev, name);
39948 if (err)
39949diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39950index 30b538d8..1610d75 100644
39951--- a/drivers/devfreq/devfreq.c
39952+++ b/drivers/devfreq/devfreq.c
39953@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39954 goto err_out;
39955 }
39956
39957- list_add(&governor->node, &devfreq_governor_list);
39958+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39959
39960 list_for_each_entry(devfreq, &devfreq_list, node) {
39961 int ret = 0;
39962@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39963 }
39964 }
39965
39966- list_del(&governor->node);
39967+ pax_list_del((struct list_head *)&governor->node);
39968 err_out:
39969 mutex_unlock(&devfreq_list_lock);
39970
39971diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
39972index 8ee383d..736b5de 100644
39973--- a/drivers/dma/sh/shdma-base.c
39974+++ b/drivers/dma/sh/shdma-base.c
39975@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
39976 schan->slave_id = -EINVAL;
39977 }
39978
39979- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
39980- sdev->desc_size, GFP_KERNEL);
39981+ schan->desc = kcalloc(sdev->desc_size,
39982+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
39983 if (!schan->desc) {
39984 ret = -ENOMEM;
39985 goto edescalloc;
39986diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39987index 9f1d4c7..fceff78 100644
39988--- a/drivers/dma/sh/shdmac.c
39989+++ b/drivers/dma/sh/shdmac.c
39990@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39991 return ret;
39992 }
39993
39994-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39995+static struct notifier_block sh_dmae_nmi_notifier = {
39996 .notifier_call = sh_dmae_nmi_handler,
39997
39998 /* Run before NMI debug handler and KGDB */
39999diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
40000index 592af5f..bb1d583 100644
40001--- a/drivers/edac/edac_device.c
40002+++ b/drivers/edac/edac_device.c
40003@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
40004 */
40005 int edac_device_alloc_index(void)
40006 {
40007- static atomic_t device_indexes = ATOMIC_INIT(0);
40008+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
40009
40010- return atomic_inc_return(&device_indexes) - 1;
40011+ return atomic_inc_return_unchecked(&device_indexes) - 1;
40012 }
40013 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
40014
40015diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
40016index c84eecb..4d7381d 100644
40017--- a/drivers/edac/edac_mc_sysfs.c
40018+++ b/drivers/edac/edac_mc_sysfs.c
40019@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
40020 struct dev_ch_attribute {
40021 struct device_attribute attr;
40022 int channel;
40023-};
40024+} __do_const;
40025
40026 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
40027 static struct dev_ch_attribute dev_attr_legacy_##_name = \
40028@@ -1009,15 +1009,17 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
40029 }
40030
40031 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
40032+ pax_open_kernel();
40033 if (mci->get_sdram_scrub_rate) {
40034- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
40035- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
40036+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
40037+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
40038 }
40039
40040 if (mci->set_sdram_scrub_rate) {
40041- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
40042- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
40043+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
40044+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
40045 }
40046+ pax_close_kernel();
40047
40048 err = device_create_file(&mci->dev, &dev_attr_sdram_scrub_rate);
40049 if (err) {
40050diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
40051index 2cf44b4d..6dd2dc7 100644
40052--- a/drivers/edac/edac_pci.c
40053+++ b/drivers/edac/edac_pci.c
40054@@ -29,7 +29,7 @@
40055
40056 static DEFINE_MUTEX(edac_pci_ctls_mutex);
40057 static LIST_HEAD(edac_pci_list);
40058-static atomic_t pci_indexes = ATOMIC_INIT(0);
40059+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
40060
40061 /*
40062 * edac_pci_alloc_ctl_info
40063@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
40064 */
40065 int edac_pci_alloc_index(void)
40066 {
40067- return atomic_inc_return(&pci_indexes) - 1;
40068+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
40069 }
40070 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
40071
40072diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
40073index 24d877f..4e30133 100644
40074--- a/drivers/edac/edac_pci_sysfs.c
40075+++ b/drivers/edac/edac_pci_sysfs.c
40076@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
40077 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
40078 static int edac_pci_poll_msec = 1000; /* one second workq period */
40079
40080-static atomic_t pci_parity_count = ATOMIC_INIT(0);
40081-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
40082+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
40083+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
40084
40085 static struct kobject *edac_pci_top_main_kobj;
40086 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
40087@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
40088 void *value;
40089 ssize_t(*show) (void *, char *);
40090 ssize_t(*store) (void *, const char *, size_t);
40091-};
40092+} __do_const;
40093
40094 /* Set of show/store abstract level functions for PCI Parity object */
40095 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
40096@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40097 edac_printk(KERN_CRIT, EDAC_PCI,
40098 "Signaled System Error on %s\n",
40099 pci_name(dev));
40100- atomic_inc(&pci_nonparity_count);
40101+ atomic_inc_unchecked(&pci_nonparity_count);
40102 }
40103
40104 if (status & (PCI_STATUS_PARITY)) {
40105@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40106 "Master Data Parity Error on %s\n",
40107 pci_name(dev));
40108
40109- atomic_inc(&pci_parity_count);
40110+ atomic_inc_unchecked(&pci_parity_count);
40111 }
40112
40113 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40114@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40115 "Detected Parity Error on %s\n",
40116 pci_name(dev));
40117
40118- atomic_inc(&pci_parity_count);
40119+ atomic_inc_unchecked(&pci_parity_count);
40120 }
40121 }
40122
40123@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40124 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
40125 "Signaled System Error on %s\n",
40126 pci_name(dev));
40127- atomic_inc(&pci_nonparity_count);
40128+ atomic_inc_unchecked(&pci_nonparity_count);
40129 }
40130
40131 if (status & (PCI_STATUS_PARITY)) {
40132@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40133 "Master Data Parity Error on "
40134 "%s\n", pci_name(dev));
40135
40136- atomic_inc(&pci_parity_count);
40137+ atomic_inc_unchecked(&pci_parity_count);
40138 }
40139
40140 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40141@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40142 "Detected Parity Error on %s\n",
40143 pci_name(dev));
40144
40145- atomic_inc(&pci_parity_count);
40146+ atomic_inc_unchecked(&pci_parity_count);
40147 }
40148 }
40149 }
40150@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
40151 if (!check_pci_errors)
40152 return;
40153
40154- before_count = atomic_read(&pci_parity_count);
40155+ before_count = atomic_read_unchecked(&pci_parity_count);
40156
40157 /* scan all PCI devices looking for a Parity Error on devices and
40158 * bridges.
40159@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
40160 /* Only if operator has selected panic on PCI Error */
40161 if (edac_pci_get_panic_on_pe()) {
40162 /* If the count is different 'after' from 'before' */
40163- if (before_count != atomic_read(&pci_parity_count))
40164+ if (before_count != atomic_read_unchecked(&pci_parity_count))
40165 panic("EDAC: PCI Parity Error");
40166 }
40167 }
40168diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
40169index c2359a1..8bd119d 100644
40170--- a/drivers/edac/mce_amd.h
40171+++ b/drivers/edac/mce_amd.h
40172@@ -74,7 +74,7 @@ struct amd_decoder_ops {
40173 bool (*mc0_mce)(u16, u8);
40174 bool (*mc1_mce)(u16, u8);
40175 bool (*mc2_mce)(u16, u8);
40176-};
40177+} __no_const;
40178
40179 void amd_report_gart_errors(bool);
40180 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
40181diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
40182index 57ea7f4..af06b76 100644
40183--- a/drivers/firewire/core-card.c
40184+++ b/drivers/firewire/core-card.c
40185@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
40186 const struct fw_card_driver *driver,
40187 struct device *device)
40188 {
40189- static atomic_t index = ATOMIC_INIT(-1);
40190+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
40191
40192- card->index = atomic_inc_return(&index);
40193+ card->index = atomic_inc_return_unchecked(&index);
40194 card->driver = driver;
40195 card->device = device;
40196 card->current_tlabel = 0;
40197@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
40198
40199 void fw_core_remove_card(struct fw_card *card)
40200 {
40201- struct fw_card_driver dummy_driver = dummy_driver_template;
40202+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
40203
40204 card->driver->update_phy_reg(card, 4,
40205 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
40206diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
40207index f9e3aee..269dbdb 100644
40208--- a/drivers/firewire/core-device.c
40209+++ b/drivers/firewire/core-device.c
40210@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
40211 struct config_rom_attribute {
40212 struct device_attribute attr;
40213 u32 key;
40214-};
40215+} __do_const;
40216
40217 static ssize_t show_immediate(struct device *dev,
40218 struct device_attribute *dattr, char *buf)
40219diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
40220index d6a09b9..18e90dd 100644
40221--- a/drivers/firewire/core-transaction.c
40222+++ b/drivers/firewire/core-transaction.c
40223@@ -38,6 +38,7 @@
40224 #include <linux/timer.h>
40225 #include <linux/types.h>
40226 #include <linux/workqueue.h>
40227+#include <linux/sched.h>
40228
40229 #include <asm/byteorder.h>
40230
40231diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
40232index e1480ff6..1a429bd 100644
40233--- a/drivers/firewire/core.h
40234+++ b/drivers/firewire/core.h
40235@@ -111,6 +111,7 @@ struct fw_card_driver {
40236
40237 int (*stop_iso)(struct fw_iso_context *ctx);
40238 };
40239+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
40240
40241 void fw_card_initialize(struct fw_card *card,
40242 const struct fw_card_driver *driver, struct device *device);
40243diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40244index f51d376..b118e40 100644
40245--- a/drivers/firewire/ohci.c
40246+++ b/drivers/firewire/ohci.c
40247@@ -2049,10 +2049,12 @@ static void bus_reset_work(struct work_struct *work)
40248 be32_to_cpu(ohci->next_header));
40249 }
40250
40251+#ifndef CONFIG_GRKERNSEC
40252 if (param_remote_dma) {
40253 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40254 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40255 }
40256+#endif
40257
40258 spin_unlock_irq(&ohci->lock);
40259
40260@@ -2584,8 +2586,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40261 unsigned long flags;
40262 int n, ret = 0;
40263
40264+#ifndef CONFIG_GRKERNSEC
40265 if (param_remote_dma)
40266 return 0;
40267+#endif
40268
40269 /*
40270 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40271diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40272index 94a58a0..f5eba42 100644
40273--- a/drivers/firmware/dmi-id.c
40274+++ b/drivers/firmware/dmi-id.c
40275@@ -16,7 +16,7 @@
40276 struct dmi_device_attribute{
40277 struct device_attribute dev_attr;
40278 int field;
40279-};
40280+} __do_const;
40281 #define to_dmi_dev_attr(_dev_attr) \
40282 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40283
40284diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40285index 2eebd28b..4261350 100644
40286--- a/drivers/firmware/dmi_scan.c
40287+++ b/drivers/firmware/dmi_scan.c
40288@@ -893,7 +893,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40289 if (buf == NULL)
40290 return -1;
40291
40292- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40293+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40294
40295 dmi_unmap(buf);
40296 return 0;
40297diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40298index 4fd9961..52d60ce 100644
40299--- a/drivers/firmware/efi/cper.c
40300+++ b/drivers/firmware/efi/cper.c
40301@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40302 */
40303 u64 cper_next_record_id(void)
40304 {
40305- static atomic64_t seq;
40306+ static atomic64_unchecked_t seq;
40307
40308- if (!atomic64_read(&seq))
40309- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40310+ if (!atomic64_read_unchecked(&seq))
40311+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40312
40313- return atomic64_inc_return(&seq);
40314+ return atomic64_inc_return_unchecked(&seq);
40315 }
40316 EXPORT_SYMBOL_GPL(cper_next_record_id);
40317
40318diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40319index 3061bb8..92b5fcc 100644
40320--- a/drivers/firmware/efi/efi.c
40321+++ b/drivers/firmware/efi/efi.c
40322@@ -160,14 +160,16 @@ static struct attribute_group efi_subsys_attr_group = {
40323 };
40324
40325 static struct efivars generic_efivars;
40326-static struct efivar_operations generic_ops;
40327+static efivar_operations_no_const generic_ops __read_only;
40328
40329 static int generic_ops_register(void)
40330 {
40331- generic_ops.get_variable = efi.get_variable;
40332- generic_ops.set_variable = efi.set_variable;
40333- generic_ops.get_next_variable = efi.get_next_variable;
40334- generic_ops.query_variable_store = efi_query_variable_store;
40335+ pax_open_kernel();
40336+ *(void **)&generic_ops.get_variable = efi.get_variable;
40337+ *(void **)&generic_ops.set_variable = efi.set_variable;
40338+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40339+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40340+ pax_close_kernel();
40341
40342 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40343 }
40344diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40345index 7b2e049..a253334 100644
40346--- a/drivers/firmware/efi/efivars.c
40347+++ b/drivers/firmware/efi/efivars.c
40348@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40349 static int
40350 create_efivars_bin_attributes(void)
40351 {
40352- struct bin_attribute *attr;
40353+ bin_attribute_no_const *attr;
40354 int error;
40355
40356 /* new_var */
40357diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
40358index 87b8e3b..c4afb35 100644
40359--- a/drivers/firmware/efi/runtime-map.c
40360+++ b/drivers/firmware/efi/runtime-map.c
40361@@ -97,7 +97,7 @@ static void map_release(struct kobject *kobj)
40362 kfree(entry);
40363 }
40364
40365-static struct kobj_type __refdata map_ktype = {
40366+static const struct kobj_type __refconst map_ktype = {
40367 .sysfs_ops = &map_attr_ops,
40368 .default_attrs = def_attrs,
40369 .release = map_release,
40370diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
40371index f1ab05e..ab51228 100644
40372--- a/drivers/firmware/google/gsmi.c
40373+++ b/drivers/firmware/google/gsmi.c
40374@@ -709,7 +709,7 @@ static u32 __init hash_oem_table_id(char s[8])
40375 return local_hash_64(input, 32);
40376 }
40377
40378-static struct dmi_system_id gsmi_dmi_table[] __initdata = {
40379+static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
40380 {
40381 .ident = "Google Board",
40382 .matches = {
40383diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40384index 2f569aa..26e4f39 100644
40385--- a/drivers/firmware/google/memconsole.c
40386+++ b/drivers/firmware/google/memconsole.c
40387@@ -136,7 +136,7 @@ static bool __init found_memconsole(void)
40388 return false;
40389 }
40390
40391-static struct dmi_system_id memconsole_dmi_table[] __initdata = {
40392+static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
40393 {
40394 .ident = "Google Board",
40395 .matches = {
40396@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40397 if (!found_memconsole())
40398 return -ENODEV;
40399
40400- memconsole_bin_attr.size = memconsole_length;
40401+ pax_open_kernel();
40402+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40403+ pax_close_kernel();
40404+
40405 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40406 }
40407
40408diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
40409index cc016c61..d35279e 100644
40410--- a/drivers/firmware/memmap.c
40411+++ b/drivers/firmware/memmap.c
40412@@ -124,7 +124,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
40413 kfree(entry);
40414 }
40415
40416-static struct kobj_type __refdata memmap_ktype = {
40417+static const struct kobj_type __refconst memmap_ktype = {
40418 .release = release_firmware_map_entry,
40419 .sysfs_ops = &memmap_attr_ops,
40420 .default_attrs = def_attrs,
40421diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40422index 3cfcfc6..09d6f117 100644
40423--- a/drivers/gpio/gpio-em.c
40424+++ b/drivers/gpio/gpio-em.c
40425@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40426 struct em_gio_priv *p;
40427 struct resource *io[2], *irq[2];
40428 struct gpio_chip *gpio_chip;
40429- struct irq_chip *irq_chip;
40430+ irq_chip_no_const *irq_chip;
40431 const char *name = dev_name(&pdev->dev);
40432 int ret;
40433
40434diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40435index 7818cd1..1be40e5 100644
40436--- a/drivers/gpio/gpio-ich.c
40437+++ b/drivers/gpio/gpio-ich.c
40438@@ -94,7 +94,7 @@ struct ichx_desc {
40439 * this option allows driver caching written output values
40440 */
40441 bool use_outlvl_cache;
40442-};
40443+} __do_const;
40444
40445 static struct {
40446 spinlock_t lock;
40447diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40448index f476ae2..05e1bdd 100644
40449--- a/drivers/gpio/gpio-omap.c
40450+++ b/drivers/gpio/gpio-omap.c
40451@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40452 const struct omap_gpio_platform_data *pdata;
40453 struct resource *res;
40454 struct gpio_bank *bank;
40455- struct irq_chip *irqc;
40456+ irq_chip_no_const *irqc;
40457 int ret;
40458
40459 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40460diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40461index c49522e..9a7ee54 100644
40462--- a/drivers/gpio/gpio-rcar.c
40463+++ b/drivers/gpio/gpio-rcar.c
40464@@ -348,7 +348,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40465 struct gpio_rcar_priv *p;
40466 struct resource *io, *irq;
40467 struct gpio_chip *gpio_chip;
40468- struct irq_chip *irq_chip;
40469+ irq_chip_no_const *irq_chip;
40470 struct device *dev = &pdev->dev;
40471 const char *name = dev_name(dev);
40472 int ret;
40473diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40474index c1caa45..f0f97d2 100644
40475--- a/drivers/gpio/gpio-vr41xx.c
40476+++ b/drivers/gpio/gpio-vr41xx.c
40477@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40478 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40479 maskl, pendl, maskh, pendh);
40480
40481- atomic_inc(&irq_err_count);
40482+ atomic_inc_unchecked(&irq_err_count);
40483
40484 return -EINVAL;
40485 }
40486diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40487index 1ca9295..9f3d481 100644
40488--- a/drivers/gpio/gpiolib.c
40489+++ b/drivers/gpio/gpiolib.c
40490@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40491 }
40492
40493 if (gpiochip->irqchip) {
40494- gpiochip->irqchip->irq_request_resources = NULL;
40495- gpiochip->irqchip->irq_release_resources = NULL;
40496+ pax_open_kernel();
40497+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40498+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40499+ pax_close_kernel();
40500 gpiochip->irqchip = NULL;
40501 }
40502 }
40503@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40504 gpiochip->irqchip = NULL;
40505 return -EINVAL;
40506 }
40507- irqchip->irq_request_resources = gpiochip_irq_reqres;
40508- irqchip->irq_release_resources = gpiochip_irq_relres;
40509+
40510+ pax_open_kernel();
40511+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40512+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40513+ pax_close_kernel();
40514
40515 /*
40516 * Prepare the mapping since the irqchip shall be orthogonal to
40517diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40518index 488f51d..301d462 100644
40519--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40520+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40521@@ -118,7 +118,7 @@ struct device_queue_manager_ops {
40522 enum cache_policy alternate_policy,
40523 void __user *alternate_aperture_base,
40524 uint64_t alternate_aperture_size);
40525-};
40526+} __no_const;
40527
40528 /**
40529 * struct device_queue_manager
40530diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40531index 5940531..a75b0e5 100644
40532--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40533+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40534@@ -62,7 +62,7 @@ struct kernel_queue_ops {
40535
40536 void (*submit_packet)(struct kernel_queue *kq);
40537 void (*rollback_packet)(struct kernel_queue *kq);
40538-};
40539+} __no_const;
40540
40541 struct kernel_queue {
40542 struct kernel_queue_ops ops;
40543diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
40544index 9b23525..09af26c 100644
40545--- a/drivers/gpu/drm/drm_context.c
40546+++ b/drivers/gpu/drm/drm_context.c
40547@@ -53,6 +53,9 @@ struct drm_ctx_list {
40548 */
40549 void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
40550 {
40551+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40552+ return -EINVAL;
40553+
40554 mutex_lock(&dev->struct_mutex);
40555 idr_remove(&dev->ctx_idr, ctx_handle);
40556 mutex_unlock(&dev->struct_mutex);
40557@@ -87,6 +90,9 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
40558 */
40559 int drm_legacy_ctxbitmap_init(struct drm_device * dev)
40560 {
40561+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40562+ return -EINVAL;
40563+
40564 idr_init(&dev->ctx_idr);
40565 return 0;
40566 }
40567@@ -101,6 +107,9 @@ int drm_legacy_ctxbitmap_init(struct drm_device * dev)
40568 */
40569 void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
40570 {
40571+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40572+ return -EINVAL;
40573+
40574 mutex_lock(&dev->struct_mutex);
40575 idr_destroy(&dev->ctx_idr);
40576 mutex_unlock(&dev->struct_mutex);
40577@@ -119,11 +128,14 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
40578 {
40579 struct drm_ctx_list *pos, *tmp;
40580
40581+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40582+ return -EINVAL;
40583+
40584 mutex_lock(&dev->ctxlist_mutex);
40585
40586 list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
40587 if (pos->tag == file &&
40588- pos->handle != DRM_KERNEL_CONTEXT) {
40589+ _DRM_LOCKING_CONTEXT(pos->handle) != DRM_KERNEL_CONTEXT) {
40590 if (dev->driver->context_dtor)
40591 dev->driver->context_dtor(dev, pos->handle);
40592
40593@@ -161,6 +173,9 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
40594 struct drm_local_map *map;
40595 struct drm_map_list *_entry;
40596
40597+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40598+ return -EINVAL;
40599+
40600 mutex_lock(&dev->struct_mutex);
40601
40602 map = idr_find(&dev->ctx_idr, request->ctx_id);
40603@@ -205,6 +220,9 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
40604 struct drm_local_map *map = NULL;
40605 struct drm_map_list *r_list = NULL;
40606
40607+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40608+ return -EINVAL;
40609+
40610 mutex_lock(&dev->struct_mutex);
40611 list_for_each_entry(r_list, &dev->maplist, head) {
40612 if (r_list->map
40613@@ -277,7 +295,13 @@ static int drm_context_switch_complete(struct drm_device *dev,
40614 {
40615 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
40616
40617- if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
40618+ if (file_priv->master->lock.hw_lock == NULL) {
40619+ DRM_ERROR(
40620+ "Device has been unregistered. Hard exit. Process %d\n",
40621+ task_pid_nr(current));
40622+ send_sig(SIGTERM, current, 0);
40623+ return -EPERM;
40624+ } else if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
40625 DRM_ERROR("Lock isn't held after context switch\n");
40626 }
40627
40628@@ -305,6 +329,9 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
40629 struct drm_ctx ctx;
40630 int i;
40631
40632+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40633+ return -EINVAL;
40634+
40635 if (res->count >= DRM_RESERVED_CONTEXTS) {
40636 memset(&ctx, 0, sizeof(ctx));
40637 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
40638@@ -335,8 +362,11 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
40639 struct drm_ctx_list *ctx_entry;
40640 struct drm_ctx *ctx = data;
40641
40642+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40643+ return -EINVAL;
40644+
40645 ctx->handle = drm_legacy_ctxbitmap_next(dev);
40646- if (ctx->handle == DRM_KERNEL_CONTEXT) {
40647+ if (_DRM_LOCKING_CONTEXT(ctx->handle) == DRM_KERNEL_CONTEXT) {
40648 /* Skip kernel's context and get a new one. */
40649 ctx->handle = drm_legacy_ctxbitmap_next(dev);
40650 }
40651@@ -378,6 +408,9 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
40652 {
40653 struct drm_ctx *ctx = data;
40654
40655+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40656+ return -EINVAL;
40657+
40658 /* This is 0, because we don't handle any context flags */
40659 ctx->flags = 0;
40660
40661@@ -400,6 +433,9 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
40662 {
40663 struct drm_ctx *ctx = data;
40664
40665+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40666+ return -EINVAL;
40667+
40668 DRM_DEBUG("%d\n", ctx->handle);
40669 return drm_context_switch(dev, dev->last_context, ctx->handle);
40670 }
40671@@ -420,6 +456,9 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
40672 {
40673 struct drm_ctx *ctx = data;
40674
40675+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40676+ return -EINVAL;
40677+
40678 DRM_DEBUG("%d\n", ctx->handle);
40679 drm_context_switch_complete(dev, file_priv, ctx->handle);
40680
40681@@ -442,8 +481,11 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
40682 {
40683 struct drm_ctx *ctx = data;
40684
40685+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40686+ return -EINVAL;
40687+
40688 DRM_DEBUG("%d\n", ctx->handle);
40689- if (ctx->handle != DRM_KERNEL_CONTEXT) {
40690+ if (_DRM_LOCKING_CONTEXT(ctx->handle) != DRM_KERNEL_CONTEXT) {
40691 if (dev->driver->context_dtor)
40692 dev->driver->context_dtor(dev, ctx->handle);
40693 drm_legacy_ctxbitmap_free(dev, ctx->handle);
40694diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40695index b6f076b..2918de2 100644
40696--- a/drivers/gpu/drm/drm_crtc.c
40697+++ b/drivers/gpu/drm/drm_crtc.c
40698@@ -4118,7 +4118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40699 goto done;
40700 }
40701
40702- if (copy_to_user(&enum_ptr[copied].name,
40703+ if (copy_to_user(enum_ptr[copied].name,
40704 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40705 ret = -EFAULT;
40706 goto done;
40707diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40708index d512134..a80a8e4 100644
40709--- a/drivers/gpu/drm/drm_drv.c
40710+++ b/drivers/gpu/drm/drm_drv.c
40711@@ -448,7 +448,7 @@ void drm_unplug_dev(struct drm_device *dev)
40712
40713 drm_device_set_unplugged(dev);
40714
40715- if (dev->open_count == 0) {
40716+ if (local_read(&dev->open_count) == 0) {
40717 drm_put_dev(dev);
40718 }
40719 mutex_unlock(&drm_global_mutex);
40720@@ -596,10 +596,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
40721 if (drm_ht_create(&dev->map_hash, 12))
40722 goto err_minors;
40723
40724- ret = drm_legacy_ctxbitmap_init(dev);
40725- if (ret) {
40726- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
40727- goto err_ht;
40728+ if (drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT)) {
40729+ ret = drm_legacy_ctxbitmap_init(dev);
40730+ if (ret) {
40731+ DRM_ERROR(
40732+ "Cannot allocate memory for context bitmap.\n");
40733+ goto err_ht;
40734+ }
40735 }
40736
40737 if (drm_core_check_feature(dev, DRIVER_GEM)) {
40738diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40739index 076dd60..e4a4ba7 100644
40740--- a/drivers/gpu/drm/drm_fops.c
40741+++ b/drivers/gpu/drm/drm_fops.c
40742@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40743 return PTR_ERR(minor);
40744
40745 dev = minor->dev;
40746- if (!dev->open_count++)
40747+ if (local_inc_return(&dev->open_count) == 1)
40748 need_setup = 1;
40749
40750 /* share address_space across all char-devs of a single device */
40751@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40752 return 0;
40753
40754 err_undo:
40755- dev->open_count--;
40756+ local_dec(&dev->open_count);
40757 drm_minor_release(minor);
40758 return retcode;
40759 }
40760@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40761
40762 mutex_lock(&drm_global_mutex);
40763
40764- DRM_DEBUG("open_count = %d\n", dev->open_count);
40765+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40766
40767 mutex_lock(&dev->struct_mutex);
40768 list_del(&file_priv->lhead);
40769@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40770 * Begin inline drm_release
40771 */
40772
40773- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40774+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40775 task_pid_nr(current),
40776 (long)old_encode_dev(file_priv->minor->kdev->devt),
40777- dev->open_count);
40778+ local_read(&dev->open_count));
40779
40780 /* Release any auth tokens that might point to this file_priv,
40781 (do that under the drm_global_mutex) */
40782@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
40783 * End inline drm_release
40784 */
40785
40786- if (!--dev->open_count) {
40787+ if (local_dec_and_test(&dev->open_count)) {
40788 retcode = drm_lastclose(dev);
40789 if (drm_device_is_unplugged(dev))
40790 drm_put_dev(dev);
40791diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40792index 3d2e91c..d31c4c9 100644
40793--- a/drivers/gpu/drm/drm_global.c
40794+++ b/drivers/gpu/drm/drm_global.c
40795@@ -36,7 +36,7 @@
40796 struct drm_global_item {
40797 struct mutex mutex;
40798 void *object;
40799- int refcount;
40800+ atomic_t refcount;
40801 };
40802
40803 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40804@@ -49,7 +49,7 @@ void drm_global_init(void)
40805 struct drm_global_item *item = &glob[i];
40806 mutex_init(&item->mutex);
40807 item->object = NULL;
40808- item->refcount = 0;
40809+ atomic_set(&item->refcount, 0);
40810 }
40811 }
40812
40813@@ -59,7 +59,7 @@ void drm_global_release(void)
40814 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40815 struct drm_global_item *item = &glob[i];
40816 BUG_ON(item->object != NULL);
40817- BUG_ON(item->refcount != 0);
40818+ BUG_ON(atomic_read(&item->refcount) != 0);
40819 }
40820 }
40821
40822@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40823 struct drm_global_item *item = &glob[ref->global_type];
40824
40825 mutex_lock(&item->mutex);
40826- if (item->refcount == 0) {
40827+ if (atomic_read(&item->refcount) == 0) {
40828 item->object = kzalloc(ref->size, GFP_KERNEL);
40829 if (unlikely(item->object == NULL)) {
40830 ret = -ENOMEM;
40831@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40832 goto out_err;
40833
40834 }
40835- ++item->refcount;
40836+ atomic_inc(&item->refcount);
40837 ref->object = item->object;
40838 mutex_unlock(&item->mutex);
40839 return 0;
40840@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
40841 struct drm_global_item *item = &glob[ref->global_type];
40842
40843 mutex_lock(&item->mutex);
40844- BUG_ON(item->refcount == 0);
40845+ BUG_ON(atomic_read(&item->refcount) == 0);
40846 BUG_ON(ref->object != item->object);
40847- if (--item->refcount == 0) {
40848+ if (atomic_dec_and_test(&item->refcount)) {
40849 ref->release(ref);
40850 item->object = NULL;
40851 }
40852diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
40853index f1b32f9..394f791 100644
40854--- a/drivers/gpu/drm/drm_info.c
40855+++ b/drivers/gpu/drm/drm_info.c
40856@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
40857 struct drm_local_map *map;
40858 struct drm_map_list *r_list;
40859
40860- /* Hardcoded from _DRM_FRAME_BUFFER,
40861- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
40862- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
40863- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
40864+ static const char * const types[] = {
40865+ [_DRM_FRAME_BUFFER] = "FB",
40866+ [_DRM_REGISTERS] = "REG",
40867+ [_DRM_SHM] = "SHM",
40868+ [_DRM_AGP] = "AGP",
40869+ [_DRM_SCATTER_GATHER] = "SG",
40870+ [_DRM_CONSISTENT] = "PCI"};
40871 const char *type;
40872 int i;
40873
40874@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
40875 map = r_list->map;
40876 if (!map)
40877 continue;
40878- if (map->type < 0 || map->type > 5)
40879+ if (map->type >= ARRAY_SIZE(types))
40880 type = "??";
40881 else
40882 type = types[map->type];
40883diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
40884index 2f4c4343..dd12cd2 100644
40885--- a/drivers/gpu/drm/drm_ioc32.c
40886+++ b/drivers/gpu/drm/drm_ioc32.c
40887@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
40888 request = compat_alloc_user_space(nbytes);
40889 if (!access_ok(VERIFY_WRITE, request, nbytes))
40890 return -EFAULT;
40891- list = (struct drm_buf_desc *) (request + 1);
40892+ list = (struct drm_buf_desc __user *) (request + 1);
40893
40894 if (__put_user(count, &request->count)
40895 || __put_user(list, &request->list))
40896@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
40897 request = compat_alloc_user_space(nbytes);
40898 if (!access_ok(VERIFY_WRITE, request, nbytes))
40899 return -EFAULT;
40900- list = (struct drm_buf_pub *) (request + 1);
40901+ list = (struct drm_buf_pub __user *) (request + 1);
40902
40903 if (__put_user(count, &request->count)
40904 || __put_user(list, &request->list))
40905@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
40906 return 0;
40907 }
40908
40909-drm_ioctl_compat_t *drm_compat_ioctls[] = {
40910+drm_ioctl_compat_t drm_compat_ioctls[] = {
40911 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
40912 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
40913 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
40914@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
40915 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40916 {
40917 unsigned int nr = DRM_IOCTL_NR(cmd);
40918- drm_ioctl_compat_t *fn;
40919 int ret;
40920
40921 /* Assume that ioctls without an explicit compat routine will just
40922@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40923 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
40924 return drm_ioctl(filp, cmd, arg);
40925
40926- fn = drm_compat_ioctls[nr];
40927-
40928- if (fn != NULL)
40929- ret = (*fn) (filp, cmd, arg);
40930+ if (drm_compat_ioctls[nr] != NULL)
40931+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
40932 else
40933 ret = drm_ioctl(filp, cmd, arg);
40934
40935diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
40936index 3785d66..1c489ef 100644
40937--- a/drivers/gpu/drm/drm_ioctl.c
40938+++ b/drivers/gpu/drm/drm_ioctl.c
40939@@ -655,7 +655,7 @@ long drm_ioctl(struct file *filp,
40940 struct drm_file *file_priv = filp->private_data;
40941 struct drm_device *dev;
40942 const struct drm_ioctl_desc *ioctl = NULL;
40943- drm_ioctl_t *func;
40944+ drm_ioctl_no_const_t func;
40945 unsigned int nr = DRM_IOCTL_NR(cmd);
40946 int retcode = -EINVAL;
40947 char stack_kdata[128];
40948diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
40949index f861361..b61d4c7 100644
40950--- a/drivers/gpu/drm/drm_lock.c
40951+++ b/drivers/gpu/drm/drm_lock.c
40952@@ -61,9 +61,12 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
40953 struct drm_master *master = file_priv->master;
40954 int ret = 0;
40955
40956+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40957+ return -EINVAL;
40958+
40959 ++file_priv->lock_count;
40960
40961- if (lock->context == DRM_KERNEL_CONTEXT) {
40962+ if (_DRM_LOCKING_CONTEXT(lock->context) == DRM_KERNEL_CONTEXT) {
40963 DRM_ERROR("Process %d using kernel context %d\n",
40964 task_pid_nr(current), lock->context);
40965 return -EINVAL;
40966@@ -153,12 +156,23 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
40967 struct drm_lock *lock = data;
40968 struct drm_master *master = file_priv->master;
40969
40970- if (lock->context == DRM_KERNEL_CONTEXT) {
40971+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40972+ return -EINVAL;
40973+
40974+ if (_DRM_LOCKING_CONTEXT(lock->context) == DRM_KERNEL_CONTEXT) {
40975 DRM_ERROR("Process %d using kernel context %d\n",
40976 task_pid_nr(current), lock->context);
40977 return -EINVAL;
40978 }
40979
40980+ if (!master->lock.hw_lock) {
40981+ DRM_ERROR(
40982+ "Device has been unregistered. Hard exit. Process %d\n",
40983+ task_pid_nr(current));
40984+ send_sig(SIGTERM, current, 0);
40985+ return -EPERM;
40986+ }
40987+
40988 if (drm_legacy_lock_free(&master->lock, lock->context)) {
40989 /* FIXME: Should really bail out here. */
40990 }
40991diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40992index d4813e0..6c1ab4d 100644
40993--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40994+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40995@@ -825,10 +825,16 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
40996 u32 pipeconf_reg = PIPEACONF;
40997 u32 dspcntr_reg = DSPACNTR;
40998
40999- u32 pipeconf = dev_priv->pipeconf[pipe];
41000- u32 dspcntr = dev_priv->dspcntr[pipe];
41001+ u32 pipeconf;
41002+ u32 dspcntr;
41003 u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
41004
41005+ if (pipe == -1)
41006+ return;
41007+
41008+ pipeconf = dev_priv->pipeconf[pipe];
41009+ dspcntr = dev_priv->dspcntr[pipe];
41010+
41011 if (pipe) {
41012 pipeconf_reg = PIPECCONF;
41013 dspcntr_reg = DSPCCNTR;
41014diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
41015index 93ec5dc..82acbaf 100644
41016--- a/drivers/gpu/drm/i810/i810_drv.h
41017+++ b/drivers/gpu/drm/i810/i810_drv.h
41018@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
41019 int page_flipping;
41020
41021 wait_queue_head_t irq_queue;
41022- atomic_t irq_received;
41023- atomic_t irq_emitted;
41024+ atomic_unchecked_t irq_received;
41025+ atomic_unchecked_t irq_emitted;
41026
41027 int front_offset;
41028 } drm_i810_private_t;
41029diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
41030index 1a46787..7fb387c 100644
41031--- a/drivers/gpu/drm/i915/i915_dma.c
41032+++ b/drivers/gpu/drm/i915/i915_dma.c
41033@@ -149,6 +149,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
41034 case I915_PARAM_MMAP_VERSION:
41035 value = 1;
41036 break;
41037+ case I915_PARAM_HAS_LEGACY_CONTEXT:
41038+ value = drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT);
41039+ break;
41040 default:
41041 DRM_DEBUG("Unknown parameter %d\n", param->param);
41042 return -EINVAL;
41043@@ -362,7 +365,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
41044 * locking inversion with the driver load path. And the access here is
41045 * completely racy anyway. So don't bother with locking for now.
41046 */
41047- return dev->open_count == 0;
41048+ return local_read(&dev->open_count) == 0;
41049 }
41050
41051 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
41052diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41053index 38a7425..5322b16 100644
41054--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41055+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41056@@ -872,12 +872,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
41057 static int
41058 validate_exec_list(struct drm_device *dev,
41059 struct drm_i915_gem_exec_object2 *exec,
41060- int count)
41061+ unsigned int count)
41062 {
41063 unsigned relocs_total = 0;
41064 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
41065 unsigned invalid_flags;
41066- int i;
41067+ unsigned int i;
41068
41069 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
41070 if (USES_FULL_PPGTT(dev))
41071diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
41072index 176de63..b50b66a 100644
41073--- a/drivers/gpu/drm/i915/i915_ioc32.c
41074+++ b/drivers/gpu/drm/i915/i915_ioc32.c
41075@@ -62,7 +62,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
41076 || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
41077 || __put_user(batchbuffer32.num_cliprects,
41078 &batchbuffer->num_cliprects)
41079- || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
41080+ || __put_user((struct drm_clip_rect __user *)(unsigned long)batchbuffer32.cliprects,
41081 &batchbuffer->cliprects))
41082 return -EFAULT;
41083
41084@@ -91,13 +91,13 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
41085
41086 cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
41087 if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
41088- || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
41089+ || __put_user((char __user *)(unsigned long)cmdbuffer32.buf,
41090 &cmdbuffer->buf)
41091 || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
41092 || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
41093 || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
41094 || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
41095- || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
41096+ || __put_user((struct drm_clip_rect __user *)(unsigned long)cmdbuffer32.cliprects,
41097 &cmdbuffer->cliprects))
41098 return -EFAULT;
41099
41100@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
41101 (unsigned long)request);
41102 }
41103
41104-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41105+static drm_ioctl_compat_t i915_compat_ioctls[] = {
41106 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
41107 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
41108 [DRM_I915_GETPARAM] = compat_i915_getparam,
41109@@ -201,17 +201,13 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41110 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41111 {
41112 unsigned int nr = DRM_IOCTL_NR(cmd);
41113- drm_ioctl_compat_t *fn = NULL;
41114 int ret;
41115
41116 if (nr < DRM_COMMAND_BASE)
41117 return drm_compat_ioctl(filp, cmd, arg);
41118
41119- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
41120- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
41121-
41122- if (fn != NULL)
41123- ret = (*fn) (filp, cmd, arg);
41124+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls) && i915_compat_ioctls[nr - DRM_COMMAND_BASE])
41125+ ret = (*i915_compat_ioctls[nr - DRM_COMMAND_BASE])(filp, cmd, arg);
41126 else
41127 ret = drm_ioctl(filp, cmd, arg);
41128
41129diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
41130index f75173c..f283e45 100644
41131--- a/drivers/gpu/drm/i915/intel_display.c
41132+++ b/drivers/gpu/drm/i915/intel_display.c
41133@@ -13056,13 +13056,13 @@ struct intel_quirk {
41134 int subsystem_vendor;
41135 int subsystem_device;
41136 void (*hook)(struct drm_device *dev);
41137-};
41138+} __do_const;
41139
41140 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
41141 struct intel_dmi_quirk {
41142 void (*hook)(struct drm_device *dev);
41143 const struct dmi_system_id (*dmi_id_list)[];
41144-};
41145+} __do_const;
41146
41147 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41148 {
41149@@ -13070,18 +13070,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41150 return 1;
41151 }
41152
41153-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41154+static const struct dmi_system_id intel_dmi_quirks_table[] = {
41155 {
41156- .dmi_id_list = &(const struct dmi_system_id[]) {
41157- {
41158- .callback = intel_dmi_reverse_brightness,
41159- .ident = "NCR Corporation",
41160- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41161- DMI_MATCH(DMI_PRODUCT_NAME, ""),
41162- },
41163- },
41164- { } /* terminating entry */
41165+ .callback = intel_dmi_reverse_brightness,
41166+ .ident = "NCR Corporation",
41167+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41168+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
41169 },
41170+ },
41171+ { } /* terminating entry */
41172+};
41173+
41174+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41175+ {
41176+ .dmi_id_list = &intel_dmi_quirks_table,
41177 .hook = quirk_invert_brightness,
41178 },
41179 };
41180diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
41181index a002f53..0d60514 100644
41182--- a/drivers/gpu/drm/imx/imx-drm-core.c
41183+++ b/drivers/gpu/drm/imx/imx-drm-core.c
41184@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
41185 if (imxdrm->pipes >= MAX_CRTC)
41186 return -EINVAL;
41187
41188- if (imxdrm->drm->open_count)
41189+ if (local_read(&imxdrm->drm->open_count))
41190 return -EBUSY;
41191
41192 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
41193diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
41194index b4a20149..219ab78 100644
41195--- a/drivers/gpu/drm/mga/mga_drv.h
41196+++ b/drivers/gpu/drm/mga/mga_drv.h
41197@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
41198 u32 clear_cmd;
41199 u32 maccess;
41200
41201- atomic_t vbl_received; /**< Number of vblanks received. */
41202+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
41203 wait_queue_head_t fence_queue;
41204- atomic_t last_fence_retired;
41205+ atomic_unchecked_t last_fence_retired;
41206 u32 next_fence_to_post;
41207
41208 unsigned int fb_cpp;
41209diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
41210index 729bfd5..14bae78 100644
41211--- a/drivers/gpu/drm/mga/mga_ioc32.c
41212+++ b/drivers/gpu/drm/mga/mga_ioc32.c
41213@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
41214 return 0;
41215 }
41216
41217-drm_ioctl_compat_t *mga_compat_ioctls[] = {
41218+drm_ioctl_compat_t mga_compat_ioctls[] = {
41219 [DRM_MGA_INIT] = compat_mga_init,
41220 [DRM_MGA_GETPARAM] = compat_mga_getparam,
41221 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
41222@@ -208,17 +208,13 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
41223 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41224 {
41225 unsigned int nr = DRM_IOCTL_NR(cmd);
41226- drm_ioctl_compat_t *fn = NULL;
41227 int ret;
41228
41229 if (nr < DRM_COMMAND_BASE)
41230 return drm_compat_ioctl(filp, cmd, arg);
41231
41232- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
41233- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
41234-
41235- if (fn != NULL)
41236- ret = (*fn) (filp, cmd, arg);
41237+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls) && mga_compat_ioctls[nr - DRM_COMMAND_BASE])
41238+ ret = (*mga_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41239 else
41240 ret = drm_ioctl(filp, cmd, arg);
41241
41242diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
41243index 1b071b8..de8601a 100644
41244--- a/drivers/gpu/drm/mga/mga_irq.c
41245+++ b/drivers/gpu/drm/mga/mga_irq.c
41246@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
41247 if (crtc != 0)
41248 return 0;
41249
41250- return atomic_read(&dev_priv->vbl_received);
41251+ return atomic_read_unchecked(&dev_priv->vbl_received);
41252 }
41253
41254
41255@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41256 /* VBLANK interrupt */
41257 if (status & MGA_VLINEPEN) {
41258 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
41259- atomic_inc(&dev_priv->vbl_received);
41260+ atomic_inc_unchecked(&dev_priv->vbl_received);
41261 drm_handle_vblank(dev, 0);
41262 handled = 1;
41263 }
41264@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41265 if ((prim_start & ~0x03) != (prim_end & ~0x03))
41266 MGA_WRITE(MGA_PRIMEND, prim_end);
41267
41268- atomic_inc(&dev_priv->last_fence_retired);
41269+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
41270 wake_up(&dev_priv->fence_queue);
41271 handled = 1;
41272 }
41273@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
41274 * using fences.
41275 */
41276 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
41277- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
41278+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
41279 - *sequence) <= (1 << 23)));
41280
41281 *sequence = cur_fence;
41282diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
41283index 0190b69..60c3eaf 100644
41284--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
41285+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
41286@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
41287 struct bit_table {
41288 const char id;
41289 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
41290-};
41291+} __no_const;
41292
41293 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
41294
41295diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
41296index 8763deb..936b423 100644
41297--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
41298+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
41299@@ -940,7 +940,8 @@ static struct drm_driver
41300 driver_stub = {
41301 .driver_features =
41302 DRIVER_USE_AGP |
41303- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
41304+ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
41305+ DRIVER_KMS_LEGACY_CONTEXT,
41306
41307 .load = nouveau_drm_load,
41308 .unload = nouveau_drm_unload,
41309diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
41310index fc68f09..0511d71 100644
41311--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
41312+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
41313@@ -121,7 +121,6 @@ struct nouveau_drm {
41314 struct drm_global_reference mem_global_ref;
41315 struct ttm_bo_global_ref bo_global_ref;
41316 struct ttm_bo_device bdev;
41317- atomic_t validate_sequence;
41318 int (*move)(struct nouveau_channel *,
41319 struct ttm_buffer_object *,
41320 struct ttm_mem_reg *, struct ttm_mem_reg *);
41321diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41322index 462679a..88e32a7 100644
41323--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41324+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41325@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
41326 unsigned long arg)
41327 {
41328 unsigned int nr = DRM_IOCTL_NR(cmd);
41329- drm_ioctl_compat_t *fn = NULL;
41330+ drm_ioctl_compat_t fn = NULL;
41331 int ret;
41332
41333 if (nr < DRM_COMMAND_BASE)
41334diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41335index 273e501..3b6c0a2 100644
41336--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
41337+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41338@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41339 }
41340
41341 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
41342- nouveau_vram_manager_init,
41343- nouveau_vram_manager_fini,
41344- nouveau_vram_manager_new,
41345- nouveau_vram_manager_del,
41346- nouveau_vram_manager_debug
41347+ .init = nouveau_vram_manager_init,
41348+ .takedown = nouveau_vram_manager_fini,
41349+ .get_node = nouveau_vram_manager_new,
41350+ .put_node = nouveau_vram_manager_del,
41351+ .debug = nouveau_vram_manager_debug
41352 };
41353
41354 static int
41355@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41356 }
41357
41358 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
41359- nouveau_gart_manager_init,
41360- nouveau_gart_manager_fini,
41361- nouveau_gart_manager_new,
41362- nouveau_gart_manager_del,
41363- nouveau_gart_manager_debug
41364+ .init = nouveau_gart_manager_init,
41365+ .takedown = nouveau_gart_manager_fini,
41366+ .get_node = nouveau_gart_manager_new,
41367+ .put_node = nouveau_gart_manager_del,
41368+ .debug = nouveau_gart_manager_debug
41369 };
41370
41371 /*XXX*/
41372@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41373 }
41374
41375 const struct ttm_mem_type_manager_func nv04_gart_manager = {
41376- nv04_gart_manager_init,
41377- nv04_gart_manager_fini,
41378- nv04_gart_manager_new,
41379- nv04_gart_manager_del,
41380- nv04_gart_manager_debug
41381+ .init = nv04_gart_manager_init,
41382+ .takedown = nv04_gart_manager_fini,
41383+ .get_node = nv04_gart_manager_new,
41384+ .put_node = nv04_gart_manager_del,
41385+ .debug = nv04_gart_manager_debug
41386 };
41387
41388 int
41389diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
41390index c7592ec..dd45ebc 100644
41391--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
41392+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
41393@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
41394 * locking inversion with the driver load path. And the access here is
41395 * completely racy anyway. So don't bother with locking for now.
41396 */
41397- return dev->open_count == 0;
41398+ return local_read(&dev->open_count) == 0;
41399 }
41400
41401 static const struct vga_switcheroo_client_ops
41402diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
41403index 9782364..89bd954 100644
41404--- a/drivers/gpu/drm/qxl/qxl_cmd.c
41405+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
41406@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
41407 int ret;
41408
41409 mutex_lock(&qdev->async_io_mutex);
41410- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41411+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41412 if (qdev->last_sent_io_cmd > irq_num) {
41413 if (intr)
41414 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41415- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41416+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41417 else
41418 ret = wait_event_timeout(qdev->io_cmd_event,
41419- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41420+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41421 /* 0 is timeout, just bail the "hw" has gone away */
41422 if (ret <= 0)
41423 goto out;
41424- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41425+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41426 }
41427 outb(val, addr);
41428 qdev->last_sent_io_cmd = irq_num + 1;
41429 if (intr)
41430 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41431- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41432+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41433 else
41434 ret = wait_event_timeout(qdev->io_cmd_event,
41435- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41436+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41437 out:
41438 if (ret > 0)
41439 ret = 0;
41440diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
41441index 6911b8c..89d6867 100644
41442--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
41443+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
41444@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
41445 struct drm_info_node *node = (struct drm_info_node *) m->private;
41446 struct qxl_device *qdev = node->minor->dev->dev_private;
41447
41448- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
41449- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
41450- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
41451- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
41452+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
41453+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
41454+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
41455+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
41456 seq_printf(m, "%d\n", qdev->irq_received_error);
41457 return 0;
41458 }
41459diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
41460index 7c6cafe..460f542 100644
41461--- a/drivers/gpu/drm/qxl/qxl_drv.h
41462+++ b/drivers/gpu/drm/qxl/qxl_drv.h
41463@@ -290,10 +290,10 @@ struct qxl_device {
41464 unsigned int last_sent_io_cmd;
41465
41466 /* interrupt handling */
41467- atomic_t irq_received;
41468- atomic_t irq_received_display;
41469- atomic_t irq_received_cursor;
41470- atomic_t irq_received_io_cmd;
41471+ atomic_unchecked_t irq_received;
41472+ atomic_unchecked_t irq_received_display;
41473+ atomic_unchecked_t irq_received_cursor;
41474+ atomic_unchecked_t irq_received_io_cmd;
41475 unsigned irq_received_error;
41476 wait_queue_head_t display_event;
41477 wait_queue_head_t cursor_event;
41478diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
41479index b110883..dd06418 100644
41480--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
41481+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
41482@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41483
41484 /* TODO copy slow path code from i915 */
41485 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
41486- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
41487+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
41488
41489 {
41490 struct qxl_drawable *draw = fb_cmd;
41491@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41492 struct drm_qxl_reloc reloc;
41493
41494 if (copy_from_user(&reloc,
41495- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
41496+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
41497 sizeof(reloc))) {
41498 ret = -EFAULT;
41499 goto out_free_bos;
41500@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
41501
41502 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
41503
41504- struct drm_qxl_command *commands =
41505- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
41506+ struct drm_qxl_command __user *commands =
41507+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
41508
41509- if (copy_from_user(&user_cmd, &commands[cmd_num],
41510+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
41511 sizeof(user_cmd)))
41512 return -EFAULT;
41513
41514diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
41515index 0bf1e20..42a7310 100644
41516--- a/drivers/gpu/drm/qxl/qxl_irq.c
41517+++ b/drivers/gpu/drm/qxl/qxl_irq.c
41518@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
41519 if (!pending)
41520 return IRQ_NONE;
41521
41522- atomic_inc(&qdev->irq_received);
41523+ atomic_inc_unchecked(&qdev->irq_received);
41524
41525 if (pending & QXL_INTERRUPT_DISPLAY) {
41526- atomic_inc(&qdev->irq_received_display);
41527+ atomic_inc_unchecked(&qdev->irq_received_display);
41528 wake_up_all(&qdev->display_event);
41529 qxl_queue_garbage_collect(qdev, false);
41530 }
41531 if (pending & QXL_INTERRUPT_CURSOR) {
41532- atomic_inc(&qdev->irq_received_cursor);
41533+ atomic_inc_unchecked(&qdev->irq_received_cursor);
41534 wake_up_all(&qdev->cursor_event);
41535 }
41536 if (pending & QXL_INTERRUPT_IO_CMD) {
41537- atomic_inc(&qdev->irq_received_io_cmd);
41538+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
41539 wake_up_all(&qdev->io_cmd_event);
41540 }
41541 if (pending & QXL_INTERRUPT_ERROR) {
41542@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
41543 init_waitqueue_head(&qdev->io_cmd_event);
41544 INIT_WORK(&qdev->client_monitors_config_work,
41545 qxl_client_monitors_config_work_func);
41546- atomic_set(&qdev->irq_received, 0);
41547- atomic_set(&qdev->irq_received_display, 0);
41548- atomic_set(&qdev->irq_received_cursor, 0);
41549- atomic_set(&qdev->irq_received_io_cmd, 0);
41550+ atomic_set_unchecked(&qdev->irq_received, 0);
41551+ atomic_set_unchecked(&qdev->irq_received_display, 0);
41552+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
41553+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
41554 qdev->irq_received_error = 0;
41555 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
41556 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
41557diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
41558index 0cbc4c9..0e46686 100644
41559--- a/drivers/gpu/drm/qxl/qxl_ttm.c
41560+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
41561@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
41562 }
41563 }
41564
41565-static struct vm_operations_struct qxl_ttm_vm_ops;
41566+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
41567 static const struct vm_operations_struct *ttm_vm_ops;
41568
41569 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41570@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
41571 return r;
41572 if (unlikely(ttm_vm_ops == NULL)) {
41573 ttm_vm_ops = vma->vm_ops;
41574+ pax_open_kernel();
41575 qxl_ttm_vm_ops = *ttm_vm_ops;
41576 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
41577+ pax_close_kernel();
41578 }
41579 vma->vm_ops = &qxl_ttm_vm_ops;
41580 return 0;
41581@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
41582 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
41583 {
41584 #if defined(CONFIG_DEBUG_FS)
41585- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
41586- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
41587- unsigned i;
41588+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
41589+ {
41590+ .name = "qxl_mem_mm",
41591+ .show = &qxl_mm_dump_table,
41592+ },
41593+ {
41594+ .name = "qxl_surf_mm",
41595+ .show = &qxl_mm_dump_table,
41596+ }
41597+ };
41598
41599- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41600- if (i == 0)
41601- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41602- else
41603- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41604- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41605- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41606- qxl_mem_types_list[i].driver_features = 0;
41607- if (i == 0)
41608- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41609- else
41610- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41611+ pax_open_kernel();
41612+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41613+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41614+ pax_close_kernel();
41615
41616- }
41617- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41618+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41619 #else
41620 return 0;
41621 #endif
41622diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41623index 2c45ac9..5d740f8 100644
41624--- a/drivers/gpu/drm/r128/r128_cce.c
41625+++ b/drivers/gpu/drm/r128/r128_cce.c
41626@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41627
41628 /* GH: Simple idle check.
41629 */
41630- atomic_set(&dev_priv->idle_count, 0);
41631+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41632
41633 /* We don't support anything other than bus-mastering ring mode,
41634 * but the ring can be in either AGP or PCI space for the ring
41635diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41636index 723e5d6..102dbaf 100644
41637--- a/drivers/gpu/drm/r128/r128_drv.h
41638+++ b/drivers/gpu/drm/r128/r128_drv.h
41639@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41640 int is_pci;
41641 unsigned long cce_buffers_offset;
41642
41643- atomic_t idle_count;
41644+ atomic_unchecked_t idle_count;
41645
41646 int page_flipping;
41647 int current_page;
41648 u32 crtc_offset;
41649 u32 crtc_offset_cntl;
41650
41651- atomic_t vbl_received;
41652+ atomic_unchecked_t vbl_received;
41653
41654 u32 color_fmt;
41655 unsigned int front_offset;
41656diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41657index 663f38c..ec159a1 100644
41658--- a/drivers/gpu/drm/r128/r128_ioc32.c
41659+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41660@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41661 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41662 }
41663
41664-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41665+drm_ioctl_compat_t r128_compat_ioctls[] = {
41666 [DRM_R128_INIT] = compat_r128_init,
41667 [DRM_R128_DEPTH] = compat_r128_depth,
41668 [DRM_R128_STIPPLE] = compat_r128_stipple,
41669@@ -197,17 +197,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41670 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41671 {
41672 unsigned int nr = DRM_IOCTL_NR(cmd);
41673- drm_ioctl_compat_t *fn = NULL;
41674 int ret;
41675
41676 if (nr < DRM_COMMAND_BASE)
41677 return drm_compat_ioctl(filp, cmd, arg);
41678
41679- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41680- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41681-
41682- if (fn != NULL)
41683- ret = (*fn) (filp, cmd, arg);
41684+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls) && r128_compat_ioctls[nr - DRM_COMMAND_BASE])
41685+ ret = (*r128_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41686 else
41687 ret = drm_ioctl(filp, cmd, arg);
41688
41689diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41690index c2ae496..30b5993 100644
41691--- a/drivers/gpu/drm/r128/r128_irq.c
41692+++ b/drivers/gpu/drm/r128/r128_irq.c
41693@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41694 if (crtc != 0)
41695 return 0;
41696
41697- return atomic_read(&dev_priv->vbl_received);
41698+ return atomic_read_unchecked(&dev_priv->vbl_received);
41699 }
41700
41701 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41702@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41703 /* VBLANK interrupt */
41704 if (status & R128_CRTC_VBLANK_INT) {
41705 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41706- atomic_inc(&dev_priv->vbl_received);
41707+ atomic_inc_unchecked(&dev_priv->vbl_received);
41708 drm_handle_vblank(dev, 0);
41709 return IRQ_HANDLED;
41710 }
41711diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41712index 8fd2d9f..18c9660 100644
41713--- a/drivers/gpu/drm/r128/r128_state.c
41714+++ b/drivers/gpu/drm/r128/r128_state.c
41715@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41716
41717 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41718 {
41719- if (atomic_read(&dev_priv->idle_count) == 0)
41720+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41721 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41722 else
41723- atomic_set(&dev_priv->idle_count, 0);
41724+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41725 }
41726
41727 #endif
41728diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41729index b928c17..e5d9400 100644
41730--- a/drivers/gpu/drm/radeon/mkregtable.c
41731+++ b/drivers/gpu/drm/radeon/mkregtable.c
41732@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41733 regex_t mask_rex;
41734 regmatch_t match[4];
41735 char buf[1024];
41736- size_t end;
41737+ long end;
41738 int len;
41739 int done = 0;
41740 int r;
41741 unsigned o;
41742 struct offset *offset;
41743 char last_reg_s[10];
41744- int last_reg;
41745+ unsigned long last_reg;
41746
41747 if (regcomp
41748 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41749diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41750index bd7519f..e1c2cd95 100644
41751--- a/drivers/gpu/drm/radeon/radeon_device.c
41752+++ b/drivers/gpu/drm/radeon/radeon_device.c
41753@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41754 * locking inversion with the driver load path. And the access here is
41755 * completely racy anyway. So don't bother with locking for now.
41756 */
41757- return dev->open_count == 0;
41758+ return local_read(&dev->open_count) == 0;
41759 }
41760
41761 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41762diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41763index 46bd393..6ae4719 100644
41764--- a/drivers/gpu/drm/radeon/radeon_drv.h
41765+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41766@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41767
41768 /* SW interrupt */
41769 wait_queue_head_t swi_queue;
41770- atomic_t swi_emitted;
41771+ atomic_unchecked_t swi_emitted;
41772 int vblank_crtc;
41773 uint32_t irq_enable_reg;
41774 uint32_t r500_disp_irq_reg;
41775diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41776index 0b98ea1..a3c770f 100644
41777--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41778+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41779@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41780 request = compat_alloc_user_space(sizeof(*request));
41781 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41782 || __put_user(req32.param, &request->param)
41783- || __put_user((void __user *)(unsigned long)req32.value,
41784+ || __put_user((unsigned long)req32.value,
41785 &request->value))
41786 return -EFAULT;
41787
41788@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41789 #define compat_radeon_cp_setparam NULL
41790 #endif /* X86_64 || IA64 */
41791
41792-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41793+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41794 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41795 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41796 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41797@@ -393,17 +393,13 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41798 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41799 {
41800 unsigned int nr = DRM_IOCTL_NR(cmd);
41801- drm_ioctl_compat_t *fn = NULL;
41802 int ret;
41803
41804 if (nr < DRM_COMMAND_BASE)
41805 return drm_compat_ioctl(filp, cmd, arg);
41806
41807- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
41808- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41809-
41810- if (fn != NULL)
41811- ret = (*fn) (filp, cmd, arg);
41812+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls) && radeon_compat_ioctls[nr - DRM_COMMAND_BASE])
41813+ ret = (*radeon_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41814 else
41815 ret = drm_ioctl(filp, cmd, arg);
41816
41817diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41818index 244b19b..c19226d 100644
41819--- a/drivers/gpu/drm/radeon/radeon_irq.c
41820+++ b/drivers/gpu/drm/radeon/radeon_irq.c
41821@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
41822 unsigned int ret;
41823 RING_LOCALS;
41824
41825- atomic_inc(&dev_priv->swi_emitted);
41826- ret = atomic_read(&dev_priv->swi_emitted);
41827+ atomic_inc_unchecked(&dev_priv->swi_emitted);
41828+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
41829
41830 BEGIN_RING(4);
41831 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
41832@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
41833 drm_radeon_private_t *dev_priv =
41834 (drm_radeon_private_t *) dev->dev_private;
41835
41836- atomic_set(&dev_priv->swi_emitted, 0);
41837+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
41838 init_waitqueue_head(&dev_priv->swi_queue);
41839
41840 dev->max_vblank_count = 0x001fffff;
41841diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
41842index 15aee72..cda326e 100644
41843--- a/drivers/gpu/drm/radeon/radeon_state.c
41844+++ b/drivers/gpu/drm/radeon/radeon_state.c
41845@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
41846 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
41847 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
41848
41849- if (copy_from_user(&depth_boxes, clear->depth_boxes,
41850+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
41851 sarea_priv->nbox * sizeof(depth_boxes[0])))
41852 return -EFAULT;
41853
41854@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
41855 {
41856 drm_radeon_private_t *dev_priv = dev->dev_private;
41857 drm_radeon_getparam_t *param = data;
41858- int value;
41859+ int value = 0;
41860
41861 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
41862
41863diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
41864index edafd3c..3af7c9c 100644
41865--- a/drivers/gpu/drm/radeon/radeon_ttm.c
41866+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
41867@@ -961,7 +961,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
41868 man->size = size >> PAGE_SHIFT;
41869 }
41870
41871-static struct vm_operations_struct radeon_ttm_vm_ops;
41872+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
41873 static const struct vm_operations_struct *ttm_vm_ops = NULL;
41874
41875 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41876@@ -1002,8 +1002,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
41877 }
41878 if (unlikely(ttm_vm_ops == NULL)) {
41879 ttm_vm_ops = vma->vm_ops;
41880+ pax_open_kernel();
41881 radeon_ttm_vm_ops = *ttm_vm_ops;
41882 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
41883+ pax_close_kernel();
41884 }
41885 vma->vm_ops = &radeon_ttm_vm_ops;
41886 return 0;
41887diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41888index 1a52522..8e78043 100644
41889--- a/drivers/gpu/drm/tegra/dc.c
41890+++ b/drivers/gpu/drm/tegra/dc.c
41891@@ -1585,7 +1585,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41892 }
41893
41894 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41895- dc->debugfs_files[i].data = dc;
41896+ *(void **)&dc->debugfs_files[i].data = dc;
41897
41898 err = drm_debugfs_create_files(dc->debugfs_files,
41899 ARRAY_SIZE(debugfs_files),
41900diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
41901index ed970f6..4eeea42 100644
41902--- a/drivers/gpu/drm/tegra/dsi.c
41903+++ b/drivers/gpu/drm/tegra/dsi.c
41904@@ -62,7 +62,7 @@ struct tegra_dsi {
41905 struct clk *clk_lp;
41906 struct clk *clk;
41907
41908- struct drm_info_list *debugfs_files;
41909+ drm_info_list_no_const *debugfs_files;
41910 struct drm_minor *minor;
41911 struct dentry *debugfs;
41912
41913diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41914index 7eaaee74..cc2bc04 100644
41915--- a/drivers/gpu/drm/tegra/hdmi.c
41916+++ b/drivers/gpu/drm/tegra/hdmi.c
41917@@ -64,7 +64,7 @@ struct tegra_hdmi {
41918 bool stereo;
41919 bool dvi;
41920
41921- struct drm_info_list *debugfs_files;
41922+ drm_info_list_no_const *debugfs_files;
41923 struct drm_minor *minor;
41924 struct dentry *debugfs;
41925 };
41926diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41927index aa0bd054..aea6a01 100644
41928--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41929+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41930@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41931 }
41932
41933 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41934- ttm_bo_man_init,
41935- ttm_bo_man_takedown,
41936- ttm_bo_man_get_node,
41937- ttm_bo_man_put_node,
41938- ttm_bo_man_debug
41939+ .init = ttm_bo_man_init,
41940+ .takedown = ttm_bo_man_takedown,
41941+ .get_node = ttm_bo_man_get_node,
41942+ .put_node = ttm_bo_man_put_node,
41943+ .debug = ttm_bo_man_debug
41944 };
41945 EXPORT_SYMBOL(ttm_bo_manager_func);
41946diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41947index a1803fb..c53f6b0 100644
41948--- a/drivers/gpu/drm/ttm/ttm_memory.c
41949+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41950@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41951 zone->glob = glob;
41952 glob->zone_kernel = zone;
41953 ret = kobject_init_and_add(
41954- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41955+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41956 if (unlikely(ret != 0)) {
41957 kobject_put(&zone->kobj);
41958 return ret;
41959@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41960 zone->glob = glob;
41961 glob->zone_dma32 = zone;
41962 ret = kobject_init_and_add(
41963- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41964+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41965 if (unlikely(ret != 0)) {
41966 kobject_put(&zone->kobj);
41967 return ret;
41968diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41969index 025c429..314062f 100644
41970--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41971+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41972@@ -54,7 +54,7 @@
41973
41974 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41975 #define SMALL_ALLOCATION 16
41976-#define FREE_ALL_PAGES (~0U)
41977+#define FREE_ALL_PAGES (~0UL)
41978 /* times are in msecs */
41979 #define PAGE_FREE_INTERVAL 1000
41980
41981@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
41982 * @free_all: If set to true will free all pages in pool
41983 * @use_static: Safe to use static buffer
41984 **/
41985-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
41986+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
41987 bool use_static)
41988 {
41989 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41990 unsigned long irq_flags;
41991 struct page *p;
41992 struct page **pages_to_free;
41993- unsigned freed_pages = 0,
41994- npages_to_free = nr_free;
41995+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41996
41997 if (NUM_PAGES_TO_ALLOC < nr_free)
41998 npages_to_free = NUM_PAGES_TO_ALLOC;
41999@@ -371,7 +370,8 @@ restart:
42000 __list_del(&p->lru, &pool->list);
42001
42002 ttm_pool_update_free_locked(pool, freed_pages);
42003- nr_free -= freed_pages;
42004+ if (likely(nr_free != FREE_ALL_PAGES))
42005+ nr_free -= freed_pages;
42006 }
42007
42008 spin_unlock_irqrestore(&pool->lock, irq_flags);
42009@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42010 unsigned i;
42011 unsigned pool_offset;
42012 struct ttm_page_pool *pool;
42013- int shrink_pages = sc->nr_to_scan;
42014+ unsigned long shrink_pages = sc->nr_to_scan;
42015 unsigned long freed = 0;
42016
42017 if (!mutex_trylock(&lock))
42018@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42019 pool_offset = ++start_pool % NUM_POOLS;
42020 /* select start pool in round robin fashion */
42021 for (i = 0; i < NUM_POOLS; ++i) {
42022- unsigned nr_free = shrink_pages;
42023+ unsigned long nr_free = shrink_pages;
42024 if (shrink_pages == 0)
42025 break;
42026 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
42027@@ -673,7 +673,7 @@ out:
42028 }
42029
42030 /* Put all pages in pages list to correct pool to wait for reuse */
42031-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
42032+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
42033 enum ttm_caching_state cstate)
42034 {
42035 unsigned long irq_flags;
42036@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
42037 struct list_head plist;
42038 struct page *p = NULL;
42039 gfp_t gfp_flags = GFP_USER;
42040- unsigned count;
42041+ unsigned long count;
42042 int r;
42043
42044 /* set zero flag for page allocation if required */
42045diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42046index 01e1d27..aaa018a 100644
42047--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42048+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42049@@ -56,7 +56,7 @@
42050
42051 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
42052 #define SMALL_ALLOCATION 4
42053-#define FREE_ALL_PAGES (~0U)
42054+#define FREE_ALL_PAGES (~0UL)
42055 /* times are in msecs */
42056 #define IS_UNDEFINED (0)
42057 #define IS_WC (1<<1)
42058@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
42059 * @nr_free: If set to true will free all pages in pool
42060 * @use_static: Safe to use static buffer
42061 **/
42062-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
42063+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
42064 bool use_static)
42065 {
42066 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
42067@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
42068 struct dma_page *dma_p, *tmp;
42069 struct page **pages_to_free;
42070 struct list_head d_pages;
42071- unsigned freed_pages = 0,
42072- npages_to_free = nr_free;
42073+ unsigned long freed_pages = 0, npages_to_free = nr_free;
42074
42075 if (NUM_PAGES_TO_ALLOC < nr_free)
42076 npages_to_free = NUM_PAGES_TO_ALLOC;
42077@@ -499,7 +498,8 @@ restart:
42078 /* remove range of pages from the pool */
42079 if (freed_pages) {
42080 ttm_pool_update_free_locked(pool, freed_pages);
42081- nr_free -= freed_pages;
42082+ if (likely(nr_free != FREE_ALL_PAGES))
42083+ nr_free -= freed_pages;
42084 }
42085
42086 spin_unlock_irqrestore(&pool->lock, irq_flags);
42087@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
42088 struct dma_page *d_page, *next;
42089 enum pool_type type;
42090 bool is_cached = false;
42091- unsigned count = 0, i, npages = 0;
42092+ unsigned long count = 0, i, npages = 0;
42093 unsigned long irq_flags;
42094
42095 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
42096@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42097 static unsigned start_pool;
42098 unsigned idx = 0;
42099 unsigned pool_offset;
42100- unsigned shrink_pages = sc->nr_to_scan;
42101+ unsigned long shrink_pages = sc->nr_to_scan;
42102 struct device_pools *p;
42103 unsigned long freed = 0;
42104
42105@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42106 goto out;
42107 pool_offset = ++start_pool % _manager->npools;
42108 list_for_each_entry(p, &_manager->pools, pools) {
42109- unsigned nr_free;
42110+ unsigned long nr_free;
42111
42112 if (!p->dev)
42113 continue;
42114@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42115 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
42116 freed += nr_free - shrink_pages;
42117
42118- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
42119+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
42120 p->pool->dev_name, p->pool->name, current->pid,
42121 nr_free, shrink_pages);
42122 }
42123diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
42124index 5fc16ce..1bd84ec 100644
42125--- a/drivers/gpu/drm/udl/udl_fb.c
42126+++ b/drivers/gpu/drm/udl/udl_fb.c
42127@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
42128 fb_deferred_io_cleanup(info);
42129 kfree(info->fbdefio);
42130 info->fbdefio = NULL;
42131- info->fbops->fb_mmap = udl_fb_mmap;
42132 }
42133
42134 pr_warn("released /dev/fb%d user=%d count=%d\n",
42135diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
42136index ef8c500..01030c8 100644
42137--- a/drivers/gpu/drm/via/via_drv.h
42138+++ b/drivers/gpu/drm/via/via_drv.h
42139@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
42140 typedef uint32_t maskarray_t[5];
42141
42142 typedef struct drm_via_irq {
42143- atomic_t irq_received;
42144+ atomic_unchecked_t irq_received;
42145 uint32_t pending_mask;
42146 uint32_t enable_mask;
42147 wait_queue_head_t irq_queue;
42148@@ -77,7 +77,7 @@ typedef struct drm_via_private {
42149 struct timeval last_vblank;
42150 int last_vblank_valid;
42151 unsigned usec_per_vblank;
42152- atomic_t vbl_received;
42153+ atomic_unchecked_t vbl_received;
42154 drm_via_state_t hc_state;
42155 char pci_buf[VIA_PCI_BUF_SIZE];
42156 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
42157diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
42158index 1319433..a993b0c 100644
42159--- a/drivers/gpu/drm/via/via_irq.c
42160+++ b/drivers/gpu/drm/via/via_irq.c
42161@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
42162 if (crtc != 0)
42163 return 0;
42164
42165- return atomic_read(&dev_priv->vbl_received);
42166+ return atomic_read_unchecked(&dev_priv->vbl_received);
42167 }
42168
42169 irqreturn_t via_driver_irq_handler(int irq, void *arg)
42170@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42171
42172 status = VIA_READ(VIA_REG_INTERRUPT);
42173 if (status & VIA_IRQ_VBLANK_PENDING) {
42174- atomic_inc(&dev_priv->vbl_received);
42175- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
42176+ atomic_inc_unchecked(&dev_priv->vbl_received);
42177+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
42178 do_gettimeofday(&cur_vblank);
42179 if (dev_priv->last_vblank_valid) {
42180 dev_priv->usec_per_vblank =
42181@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42182 dev_priv->last_vblank = cur_vblank;
42183 dev_priv->last_vblank_valid = 1;
42184 }
42185- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
42186+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
42187 DRM_DEBUG("US per vblank is: %u\n",
42188 dev_priv->usec_per_vblank);
42189 }
42190@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42191
42192 for (i = 0; i < dev_priv->num_irqs; ++i) {
42193 if (status & cur_irq->pending_mask) {
42194- atomic_inc(&cur_irq->irq_received);
42195+ atomic_inc_unchecked(&cur_irq->irq_received);
42196 wake_up(&cur_irq->irq_queue);
42197 handled = 1;
42198 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
42199@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
42200 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42201 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
42202 masks[irq][4]));
42203- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
42204+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
42205 } else {
42206 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42207 (((cur_irq_sequence =
42208- atomic_read(&cur_irq->irq_received)) -
42209+ atomic_read_unchecked(&cur_irq->irq_received)) -
42210 *sequence) <= (1 << 23)));
42211 }
42212 *sequence = cur_irq_sequence;
42213@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
42214 }
42215
42216 for (i = 0; i < dev_priv->num_irqs; ++i) {
42217- atomic_set(&cur_irq->irq_received, 0);
42218+ atomic_set_unchecked(&cur_irq->irq_received, 0);
42219 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
42220 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
42221 init_waitqueue_head(&cur_irq->irq_queue);
42222@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
42223 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
42224 case VIA_IRQ_RELATIVE:
42225 irqwait->request.sequence +=
42226- atomic_read(&cur_irq->irq_received);
42227+ atomic_read_unchecked(&cur_irq->irq_received);
42228 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
42229 case VIA_IRQ_ABSOLUTE:
42230 break;
42231diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42232index d26a6da..5fa41ed 100644
42233--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42234+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42235@@ -447,7 +447,7 @@ struct vmw_private {
42236 * Fencing and IRQs.
42237 */
42238
42239- atomic_t marker_seq;
42240+ atomic_unchecked_t marker_seq;
42241 wait_queue_head_t fence_queue;
42242 wait_queue_head_t fifo_queue;
42243 spinlock_t waiter_lock;
42244diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42245index 39f2b03..d1b0a64 100644
42246--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42247+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42248@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
42249 (unsigned int) min,
42250 (unsigned int) fifo->capabilities);
42251
42252- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42253+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42254 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
42255 vmw_marker_queue_init(&fifo->marker_queue);
42256 return vmw_fifo_send_fence(dev_priv, &dummy);
42257@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
42258 if (reserveable)
42259 iowrite32(bytes, fifo_mem +
42260 SVGA_FIFO_RESERVED);
42261- return fifo_mem + (next_cmd >> 2);
42262+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
42263 } else {
42264 need_bounce = true;
42265 }
42266@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42267
42268 fm = vmw_fifo_reserve(dev_priv, bytes);
42269 if (unlikely(fm == NULL)) {
42270- *seqno = atomic_read(&dev_priv->marker_seq);
42271+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42272 ret = -ENOMEM;
42273 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
42274 false, 3*HZ);
42275@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42276 }
42277
42278 do {
42279- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
42280+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
42281 } while (*seqno == 0);
42282
42283 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
42284diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42285index 170b61b..fec7348 100644
42286--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42287+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42288@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
42289 }
42290
42291 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
42292- vmw_gmrid_man_init,
42293- vmw_gmrid_man_takedown,
42294- vmw_gmrid_man_get_node,
42295- vmw_gmrid_man_put_node,
42296- vmw_gmrid_man_debug
42297+ .init = vmw_gmrid_man_init,
42298+ .takedown = vmw_gmrid_man_takedown,
42299+ .get_node = vmw_gmrid_man_get_node,
42300+ .put_node = vmw_gmrid_man_put_node,
42301+ .debug = vmw_gmrid_man_debug
42302 };
42303diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42304index 69c8ce2..cacb0ab 100644
42305--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42306+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42307@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
42308 int ret;
42309
42310 num_clips = arg->num_clips;
42311- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42312+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42313
42314 if (unlikely(num_clips == 0))
42315 return 0;
42316@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
42317 int ret;
42318
42319 num_clips = arg->num_clips;
42320- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42321+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42322
42323 if (unlikely(num_clips == 0))
42324 return 0;
42325diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42326index 9fe9827..0aa2fc0 100644
42327--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42328+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42329@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
42330 * emitted. Then the fence is stale and signaled.
42331 */
42332
42333- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
42334+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
42335 > VMW_FENCE_WRAP);
42336
42337 return ret;
42338@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
42339
42340 if (fifo_idle)
42341 down_read(&fifo_state->rwsem);
42342- signal_seq = atomic_read(&dev_priv->marker_seq);
42343+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
42344 ret = 0;
42345
42346 for (;;) {
42347diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42348index efd1ffd..0ae13ca 100644
42349--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42350+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42351@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
42352 while (!vmw_lag_lt(queue, us)) {
42353 spin_lock(&queue->lock);
42354 if (list_empty(&queue->head))
42355- seqno = atomic_read(&dev_priv->marker_seq);
42356+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42357 else {
42358 marker = list_first_entry(&queue->head,
42359 struct vmw_marker, head);
42360diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
42361index 37ac7b5..d52a5c9 100644
42362--- a/drivers/gpu/vga/vga_switcheroo.c
42363+++ b/drivers/gpu/vga/vga_switcheroo.c
42364@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
42365
42366 /* this version is for the case where the power switch is separate
42367 to the device being powered down. */
42368-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
42369+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
42370 {
42371 /* copy over all the bus versions */
42372 if (dev->bus && dev->bus->pm) {
42373@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
42374 return ret;
42375 }
42376
42377-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
42378+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
42379 {
42380 /* copy over all the bus versions */
42381 if (dev->bus && dev->bus->pm) {
42382diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
42383index 56ce8c2..32ce524 100644
42384--- a/drivers/hid/hid-core.c
42385+++ b/drivers/hid/hid-core.c
42386@@ -2531,7 +2531,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
42387
42388 int hid_add_device(struct hid_device *hdev)
42389 {
42390- static atomic_t id = ATOMIC_INIT(0);
42391+ static atomic_unchecked_t id = ATOMIC_INIT(0);
42392 int ret;
42393
42394 if (WARN_ON(hdev->status & HID_STAT_ADDED))
42395@@ -2574,7 +2574,7 @@ int hid_add_device(struct hid_device *hdev)
42396 /* XXX hack, any other cleaner solution after the driver core
42397 * is converted to allow more than 20 bytes as the device name? */
42398 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
42399- hdev->vendor, hdev->product, atomic_inc_return(&id));
42400+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
42401
42402 hid_debug_register(hdev, dev_name(&hdev->dev));
42403 ret = device_add(&hdev->dev);
42404diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
42405index c13fb5b..55a3802 100644
42406--- a/drivers/hid/hid-wiimote-debug.c
42407+++ b/drivers/hid/hid-wiimote-debug.c
42408@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
42409 else if (size == 0)
42410 return -EIO;
42411
42412- if (copy_to_user(u, buf, size))
42413+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
42414 return -EFAULT;
42415
42416 *off += size;
42417diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
42418index 00bc30e..d8e5097 100644
42419--- a/drivers/hv/channel.c
42420+++ b/drivers/hv/channel.c
42421@@ -370,7 +370,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
42422 int ret = 0;
42423
42424 next_gpadl_handle =
42425- (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
42426+ (atomic_inc_return_unchecked(&vmbus_connection.next_gpadl_handle) - 1);
42427
42428 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
42429 if (ret)
42430diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
42431index 50e51a5..b0bfd78 100644
42432--- a/drivers/hv/hv.c
42433+++ b/drivers/hv/hv.c
42434@@ -118,7 +118,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
42435 u64 output_address = (output) ? virt_to_phys(output) : 0;
42436 u32 output_address_hi = output_address >> 32;
42437 u32 output_address_lo = output_address & 0xFFFFFFFF;
42438- void *hypercall_page = hv_context.hypercall_page;
42439+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
42440
42441 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
42442 "=a"(hv_status_lo) : "d" (control_hi),
42443@@ -164,7 +164,7 @@ int hv_init(void)
42444 /* See if the hypercall page is already set */
42445 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
42446
42447- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
42448+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
42449
42450 if (!virtaddr)
42451 goto cleanup;
42452diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
42453index ff16938..e60879c 100644
42454--- a/drivers/hv/hv_balloon.c
42455+++ b/drivers/hv/hv_balloon.c
42456@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
42457
42458 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
42459 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
42460-static atomic_t trans_id = ATOMIC_INIT(0);
42461+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
42462
42463 static int dm_ring_size = (5 * PAGE_SIZE);
42464
42465@@ -947,7 +947,7 @@ static void hot_add_req(struct work_struct *dummy)
42466 pr_info("Memory hot add failed\n");
42467
42468 dm->state = DM_INITIALIZED;
42469- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42470+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42471 vmbus_sendpacket(dm->dev->channel, &resp,
42472 sizeof(struct dm_hot_add_response),
42473 (unsigned long)NULL,
42474@@ -1028,7 +1028,7 @@ static void post_status(struct hv_dynmem_device *dm)
42475 memset(&status, 0, sizeof(struct dm_status));
42476 status.hdr.type = DM_STATUS_REPORT;
42477 status.hdr.size = sizeof(struct dm_status);
42478- status.hdr.trans_id = atomic_inc_return(&trans_id);
42479+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42480
42481 /*
42482 * The host expects the guest to report free memory.
42483@@ -1048,7 +1048,7 @@ static void post_status(struct hv_dynmem_device *dm)
42484 * send the status. This can happen if we were interrupted
42485 * after we picked our transaction ID.
42486 */
42487- if (status.hdr.trans_id != atomic_read(&trans_id))
42488+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
42489 return;
42490
42491 /*
42492@@ -1188,7 +1188,7 @@ static void balloon_up(struct work_struct *dummy)
42493 */
42494
42495 do {
42496- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
42497+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42498 ret = vmbus_sendpacket(dm_device.dev->channel,
42499 bl_resp,
42500 bl_resp->hdr.size,
42501@@ -1234,7 +1234,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
42502
42503 memset(&resp, 0, sizeof(struct dm_unballoon_response));
42504 resp.hdr.type = DM_UNBALLOON_RESPONSE;
42505- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42506+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42507 resp.hdr.size = sizeof(struct dm_unballoon_response);
42508
42509 vmbus_sendpacket(dm_device.dev->channel, &resp,
42510@@ -1295,7 +1295,7 @@ static void version_resp(struct hv_dynmem_device *dm,
42511 memset(&version_req, 0, sizeof(struct dm_version_request));
42512 version_req.hdr.type = DM_VERSION_REQUEST;
42513 version_req.hdr.size = sizeof(struct dm_version_request);
42514- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42515+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42516 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
42517 version_req.is_last_attempt = 1;
42518
42519@@ -1468,7 +1468,7 @@ static int balloon_probe(struct hv_device *dev,
42520 memset(&version_req, 0, sizeof(struct dm_version_request));
42521 version_req.hdr.type = DM_VERSION_REQUEST;
42522 version_req.hdr.size = sizeof(struct dm_version_request);
42523- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42524+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42525 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
42526 version_req.is_last_attempt = 0;
42527
42528@@ -1499,7 +1499,7 @@ static int balloon_probe(struct hv_device *dev,
42529 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
42530 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
42531 cap_msg.hdr.size = sizeof(struct dm_capabilities);
42532- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
42533+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42534
42535 cap_msg.caps.cap_bits.balloon = 1;
42536 cap_msg.caps.cap_bits.hot_add = 1;
42537diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
42538index 44b1c94..6dccc2c 100644
42539--- a/drivers/hv/hyperv_vmbus.h
42540+++ b/drivers/hv/hyperv_vmbus.h
42541@@ -632,7 +632,7 @@ enum vmbus_connect_state {
42542 struct vmbus_connection {
42543 enum vmbus_connect_state conn_state;
42544
42545- atomic_t next_gpadl_handle;
42546+ atomic_unchecked_t next_gpadl_handle;
42547
42548 /*
42549 * Represents channel interrupts. Each bit position represents a
42550diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
42551index f518b8d7..4bc0b64 100644
42552--- a/drivers/hv/vmbus_drv.c
42553+++ b/drivers/hv/vmbus_drv.c
42554@@ -840,10 +840,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
42555 {
42556 int ret = 0;
42557
42558- static atomic_t device_num = ATOMIC_INIT(0);
42559+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42560
42561 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
42562- atomic_inc_return(&device_num));
42563+ atomic_inc_return_unchecked(&device_num));
42564
42565 child_device_obj->device.bus = &hv_bus;
42566 child_device_obj->device.parent = &hv_acpi_dev->dev;
42567diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
42568index 579bdf9..0dac21d5 100644
42569--- a/drivers/hwmon/acpi_power_meter.c
42570+++ b/drivers/hwmon/acpi_power_meter.c
42571@@ -116,7 +116,7 @@ struct sensor_template {
42572 struct device_attribute *devattr,
42573 const char *buf, size_t count);
42574 int index;
42575-};
42576+} __do_const;
42577
42578 /* Averaging interval */
42579 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42580@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42581 struct sensor_template *attrs)
42582 {
42583 struct device *dev = &resource->acpi_dev->dev;
42584- struct sensor_device_attribute *sensors =
42585+ sensor_device_attribute_no_const *sensors =
42586 &resource->sensors[resource->num_sensors];
42587 int res = 0;
42588
42589@@ -973,7 +973,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d)
42590 return 0;
42591 }
42592
42593-static struct dmi_system_id __initdata pm_dmi_table[] = {
42594+static const struct dmi_system_id __initconst pm_dmi_table[] = {
42595 {
42596 enable_cap_knobs, "IBM Active Energy Manager",
42597 {
42598diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42599index 0af63da..05a183a 100644
42600--- a/drivers/hwmon/applesmc.c
42601+++ b/drivers/hwmon/applesmc.c
42602@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42603 {
42604 struct applesmc_node_group *grp;
42605 struct applesmc_dev_attr *node;
42606- struct attribute *attr;
42607+ attribute_no_const *attr;
42608 int ret, i;
42609
42610 for (grp = groups; grp->format; grp++) {
42611diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42612index cccef87..06ce8ec 100644
42613--- a/drivers/hwmon/asus_atk0110.c
42614+++ b/drivers/hwmon/asus_atk0110.c
42615@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42616 struct atk_sensor_data {
42617 struct list_head list;
42618 struct atk_data *data;
42619- struct device_attribute label_attr;
42620- struct device_attribute input_attr;
42621- struct device_attribute limit1_attr;
42622- struct device_attribute limit2_attr;
42623+ device_attribute_no_const label_attr;
42624+ device_attribute_no_const input_attr;
42625+ device_attribute_no_const limit1_attr;
42626+ device_attribute_no_const limit2_attr;
42627 char label_attr_name[ATTR_NAME_SIZE];
42628 char input_attr_name[ATTR_NAME_SIZE];
42629 char limit1_attr_name[ATTR_NAME_SIZE];
42630@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42631 static struct device_attribute atk_name_attr =
42632 __ATTR(name, 0444, atk_name_show, NULL);
42633
42634-static void atk_init_attribute(struct device_attribute *attr, char *name,
42635+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42636 sysfs_show_func show)
42637 {
42638 sysfs_attr_init(&attr->attr);
42639diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42640index 5b7fec8..05c957a 100644
42641--- a/drivers/hwmon/coretemp.c
42642+++ b/drivers/hwmon/coretemp.c
42643@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42644 return NOTIFY_OK;
42645 }
42646
42647-static struct notifier_block coretemp_cpu_notifier __refdata = {
42648+static struct notifier_block coretemp_cpu_notifier = {
42649 .notifier_call = coretemp_cpu_callback,
42650 };
42651
42652diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42653index 7a8a6fb..015c1fd 100644
42654--- a/drivers/hwmon/ibmaem.c
42655+++ b/drivers/hwmon/ibmaem.c
42656@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42657 struct aem_rw_sensor_template *rw)
42658 {
42659 struct device *dev = &data->pdev->dev;
42660- struct sensor_device_attribute *sensors = data->sensors;
42661+ sensor_device_attribute_no_const *sensors = data->sensors;
42662 int err;
42663
42664 /* Set up read-only sensors */
42665diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42666index 17ae2eb..21b71dd 100644
42667--- a/drivers/hwmon/iio_hwmon.c
42668+++ b/drivers/hwmon/iio_hwmon.c
42669@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42670 {
42671 struct device *dev = &pdev->dev;
42672 struct iio_hwmon_state *st;
42673- struct sensor_device_attribute *a;
42674+ sensor_device_attribute_no_const *a;
42675 int ret, i;
42676 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42677 enum iio_chan_type type;
42678diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42679index f3830db..9f4d6d5 100644
42680--- a/drivers/hwmon/nct6683.c
42681+++ b/drivers/hwmon/nct6683.c
42682@@ -397,11 +397,11 @@ static struct attribute_group *
42683 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42684 int repeat)
42685 {
42686- struct sensor_device_attribute_2 *a2;
42687- struct sensor_device_attribute *a;
42688+ sensor_device_attribute_2_no_const *a2;
42689+ sensor_device_attribute_no_const *a;
42690 struct sensor_device_template **t;
42691 struct sensor_device_attr_u *su;
42692- struct attribute_group *group;
42693+ attribute_group_no_const *group;
42694 struct attribute **attrs;
42695 int i, j, count;
42696
42697diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42698index 1be4117..88ae1e1 100644
42699--- a/drivers/hwmon/nct6775.c
42700+++ b/drivers/hwmon/nct6775.c
42701@@ -952,10 +952,10 @@ static struct attribute_group *
42702 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42703 int repeat)
42704 {
42705- struct attribute_group *group;
42706+ attribute_group_no_const *group;
42707 struct sensor_device_attr_u *su;
42708- struct sensor_device_attribute *a;
42709- struct sensor_device_attribute_2 *a2;
42710+ sensor_device_attribute_no_const *a;
42711+ sensor_device_attribute_2_no_const *a2;
42712 struct attribute **attrs;
42713 struct sensor_device_template **t;
42714 int i, count;
42715diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42716index f2e47c7..45d7941 100644
42717--- a/drivers/hwmon/pmbus/pmbus_core.c
42718+++ b/drivers/hwmon/pmbus/pmbus_core.c
42719@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42720 return 0;
42721 }
42722
42723-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42724+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42725 const char *name,
42726 umode_t mode,
42727 ssize_t (*show)(struct device *dev,
42728@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42729 dev_attr->store = store;
42730 }
42731
42732-static void pmbus_attr_init(struct sensor_device_attribute *a,
42733+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42734 const char *name,
42735 umode_t mode,
42736 ssize_t (*show)(struct device *dev,
42737@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42738 u16 reg, u8 mask)
42739 {
42740 struct pmbus_boolean *boolean;
42741- struct sensor_device_attribute *a;
42742+ sensor_device_attribute_no_const *a;
42743
42744 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42745 if (!boolean)
42746@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42747 bool update, bool readonly)
42748 {
42749 struct pmbus_sensor *sensor;
42750- struct device_attribute *a;
42751+ device_attribute_no_const *a;
42752
42753 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42754 if (!sensor)
42755@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42756 const char *lstring, int index)
42757 {
42758 struct pmbus_label *label;
42759- struct device_attribute *a;
42760+ device_attribute_no_const *a;
42761
42762 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42763 if (!label)
42764diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42765index d4f0935..7420593 100644
42766--- a/drivers/hwmon/sht15.c
42767+++ b/drivers/hwmon/sht15.c
42768@@ -169,7 +169,7 @@ struct sht15_data {
42769 int supply_uv;
42770 bool supply_uv_valid;
42771 struct work_struct update_supply_work;
42772- atomic_t interrupt_handled;
42773+ atomic_unchecked_t interrupt_handled;
42774 };
42775
42776 /**
42777@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42778 ret = gpio_direction_input(data->pdata->gpio_data);
42779 if (ret)
42780 return ret;
42781- atomic_set(&data->interrupt_handled, 0);
42782+ atomic_set_unchecked(&data->interrupt_handled, 0);
42783
42784 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42785 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42786 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42787 /* Only relevant if the interrupt hasn't occurred. */
42788- if (!atomic_read(&data->interrupt_handled))
42789+ if (!atomic_read_unchecked(&data->interrupt_handled))
42790 schedule_work(&data->read_work);
42791 }
42792 ret = wait_event_timeout(data->wait_queue,
42793@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42794
42795 /* First disable the interrupt */
42796 disable_irq_nosync(irq);
42797- atomic_inc(&data->interrupt_handled);
42798+ atomic_inc_unchecked(&data->interrupt_handled);
42799 /* Then schedule a reading work struct */
42800 if (data->state != SHT15_READING_NOTHING)
42801 schedule_work(&data->read_work);
42802@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42803 * If not, then start the interrupt again - care here as could
42804 * have gone low in meantime so verify it hasn't!
42805 */
42806- atomic_set(&data->interrupt_handled, 0);
42807+ atomic_set_unchecked(&data->interrupt_handled, 0);
42808 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42809 /* If still not occurred or another handler was scheduled */
42810 if (gpio_get_value(data->pdata->gpio_data)
42811- || atomic_read(&data->interrupt_handled))
42812+ || atomic_read_unchecked(&data->interrupt_handled))
42813 return;
42814 }
42815
42816diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42817index ac91c07..8e69663 100644
42818--- a/drivers/hwmon/via-cputemp.c
42819+++ b/drivers/hwmon/via-cputemp.c
42820@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42821 return NOTIFY_OK;
42822 }
42823
42824-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42825+static struct notifier_block via_cputemp_cpu_notifier = {
42826 .notifier_call = via_cputemp_cpu_callback,
42827 };
42828
42829diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42830index 65e3240..e6c511d 100644
42831--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42832+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42833@@ -39,7 +39,7 @@
42834 extern struct i2c_adapter amd756_smbus;
42835
42836 static struct i2c_adapter *s4882_adapter;
42837-static struct i2c_algorithm *s4882_algo;
42838+static i2c_algorithm_no_const *s4882_algo;
42839
42840 /* Wrapper access functions for multiplexed SMBus */
42841 static DEFINE_MUTEX(amd756_lock);
42842diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42843index b19a310..d6eece0 100644
42844--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42845+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42846@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42847 /* usb layer */
42848
42849 /* Send command to device, and get response. */
42850-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42851+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42852 {
42853 int ret = 0;
42854 int actual;
42855diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42856index 88eda09..cf40434 100644
42857--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42858+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42859@@ -37,7 +37,7 @@
42860 extern struct i2c_adapter *nforce2_smbus;
42861
42862 static struct i2c_adapter *s4985_adapter;
42863-static struct i2c_algorithm *s4985_algo;
42864+static i2c_algorithm_no_const *s4985_algo;
42865
42866 /* Wrapper access functions for multiplexed SMBus */
42867 static DEFINE_MUTEX(nforce2_lock);
42868diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42869index 71c7a39..71dd3e0 100644
42870--- a/drivers/i2c/i2c-dev.c
42871+++ b/drivers/i2c/i2c-dev.c
42872@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42873 break;
42874 }
42875
42876- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
42877+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
42878 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
42879 if (IS_ERR(rdwr_pa[i].buf)) {
42880 res = PTR_ERR(rdwr_pa[i].buf);
42881diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
42882index 0b510ba..4fbb5085 100644
42883--- a/drivers/ide/ide-cd.c
42884+++ b/drivers/ide/ide-cd.c
42885@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
42886 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
42887 if ((unsigned long)buf & alignment
42888 || blk_rq_bytes(rq) & q->dma_pad_mask
42889- || object_is_on_stack(buf))
42890+ || object_starts_on_stack(buf))
42891 drive->dma = 0;
42892 }
42893 }
42894diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
42895index 4df97f6..c751151 100644
42896--- a/drivers/iio/industrialio-core.c
42897+++ b/drivers/iio/industrialio-core.c
42898@@ -570,7 +570,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
42899 }
42900
42901 static
42902-int __iio_device_attr_init(struct device_attribute *dev_attr,
42903+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
42904 const char *postfix,
42905 struct iio_chan_spec const *chan,
42906 ssize_t (*readfunc)(struct device *dev,
42907diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
42908index e28a494..f7c2671 100644
42909--- a/drivers/infiniband/core/cm.c
42910+++ b/drivers/infiniband/core/cm.c
42911@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
42912
42913 struct cm_counter_group {
42914 struct kobject obj;
42915- atomic_long_t counter[CM_ATTR_COUNT];
42916+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
42917 };
42918
42919 struct cm_counter_attribute {
42920@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
42921 struct ib_mad_send_buf *msg = NULL;
42922 int ret;
42923
42924- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42925+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42926 counter[CM_REQ_COUNTER]);
42927
42928 /* Quick state check to discard duplicate REQs. */
42929@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
42930 if (!cm_id_priv)
42931 return;
42932
42933- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42934+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42935 counter[CM_REP_COUNTER]);
42936 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
42937 if (ret)
42938@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
42939 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
42940 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
42941 spin_unlock_irq(&cm_id_priv->lock);
42942- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42943+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42944 counter[CM_RTU_COUNTER]);
42945 goto out;
42946 }
42947@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
42948 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
42949 dreq_msg->local_comm_id);
42950 if (!cm_id_priv) {
42951- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42952+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42953 counter[CM_DREQ_COUNTER]);
42954 cm_issue_drep(work->port, work->mad_recv_wc);
42955 return -EINVAL;
42956@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
42957 case IB_CM_MRA_REP_RCVD:
42958 break;
42959 case IB_CM_TIMEWAIT:
42960- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42961+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42962 counter[CM_DREQ_COUNTER]);
42963 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42964 goto unlock;
42965@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
42966 cm_free_msg(msg);
42967 goto deref;
42968 case IB_CM_DREQ_RCVD:
42969- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42970+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42971 counter[CM_DREQ_COUNTER]);
42972 goto unlock;
42973 default:
42974@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
42975 ib_modify_mad(cm_id_priv->av.port->mad_agent,
42976 cm_id_priv->msg, timeout)) {
42977 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
42978- atomic_long_inc(&work->port->
42979+ atomic_long_inc_unchecked(&work->port->
42980 counter_group[CM_RECV_DUPLICATES].
42981 counter[CM_MRA_COUNTER]);
42982 goto out;
42983@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
42984 break;
42985 case IB_CM_MRA_REQ_RCVD:
42986 case IB_CM_MRA_REP_RCVD:
42987- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42988+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42989 counter[CM_MRA_COUNTER]);
42990 /* fall through */
42991 default:
42992@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
42993 case IB_CM_LAP_IDLE:
42994 break;
42995 case IB_CM_MRA_LAP_SENT:
42996- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42997+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42998 counter[CM_LAP_COUNTER]);
42999 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43000 goto unlock;
43001@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
43002 cm_free_msg(msg);
43003 goto deref;
43004 case IB_CM_LAP_RCVD:
43005- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43006+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43007 counter[CM_LAP_COUNTER]);
43008 goto unlock;
43009 default:
43010@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
43011 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
43012 if (cur_cm_id_priv) {
43013 spin_unlock_irq(&cm.lock);
43014- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43015+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43016 counter[CM_SIDR_REQ_COUNTER]);
43017 goto out; /* Duplicate message. */
43018 }
43019@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
43020 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
43021 msg->retries = 1;
43022
43023- atomic_long_add(1 + msg->retries,
43024+ atomic_long_add_unchecked(1 + msg->retries,
43025 &port->counter_group[CM_XMIT].counter[attr_index]);
43026 if (msg->retries)
43027- atomic_long_add(msg->retries,
43028+ atomic_long_add_unchecked(msg->retries,
43029 &port->counter_group[CM_XMIT_RETRIES].
43030 counter[attr_index]);
43031
43032@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
43033 }
43034
43035 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
43036- atomic_long_inc(&port->counter_group[CM_RECV].
43037+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
43038 counter[attr_id - CM_ATTR_ID_OFFSET]);
43039
43040 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
43041@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
43042 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
43043
43044 return sprintf(buf, "%ld\n",
43045- atomic_long_read(&group->counter[cm_attr->index]));
43046+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
43047 }
43048
43049 static const struct sysfs_ops cm_counter_ops = {
43050diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
43051index 9f5ad7c..588cd84 100644
43052--- a/drivers/infiniband/core/fmr_pool.c
43053+++ b/drivers/infiniband/core/fmr_pool.c
43054@@ -98,8 +98,8 @@ struct ib_fmr_pool {
43055
43056 struct task_struct *thread;
43057
43058- atomic_t req_ser;
43059- atomic_t flush_ser;
43060+ atomic_unchecked_t req_ser;
43061+ atomic_unchecked_t flush_ser;
43062
43063 wait_queue_head_t force_wait;
43064 };
43065@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43066 struct ib_fmr_pool *pool = pool_ptr;
43067
43068 do {
43069- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
43070+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
43071 ib_fmr_batch_release(pool);
43072
43073- atomic_inc(&pool->flush_ser);
43074+ atomic_inc_unchecked(&pool->flush_ser);
43075 wake_up_interruptible(&pool->force_wait);
43076
43077 if (pool->flush_function)
43078@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43079 }
43080
43081 set_current_state(TASK_INTERRUPTIBLE);
43082- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
43083+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
43084 !kthread_should_stop())
43085 schedule();
43086 __set_current_state(TASK_RUNNING);
43087@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
43088 pool->dirty_watermark = params->dirty_watermark;
43089 pool->dirty_len = 0;
43090 spin_lock_init(&pool->pool_lock);
43091- atomic_set(&pool->req_ser, 0);
43092- atomic_set(&pool->flush_ser, 0);
43093+ atomic_set_unchecked(&pool->req_ser, 0);
43094+ atomic_set_unchecked(&pool->flush_ser, 0);
43095 init_waitqueue_head(&pool->force_wait);
43096
43097 pool->thread = kthread_run(ib_fmr_cleanup_thread,
43098@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
43099 }
43100 spin_unlock_irq(&pool->pool_lock);
43101
43102- serial = atomic_inc_return(&pool->req_ser);
43103+ serial = atomic_inc_return_unchecked(&pool->req_ser);
43104 wake_up_process(pool->thread);
43105
43106 if (wait_event_interruptible(pool->force_wait,
43107- atomic_read(&pool->flush_ser) - serial >= 0))
43108+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
43109 return -EINTR;
43110
43111 return 0;
43112@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
43113 } else {
43114 list_add_tail(&fmr->list, &pool->dirty_list);
43115 if (++pool->dirty_len >= pool->dirty_watermark) {
43116- atomic_inc(&pool->req_ser);
43117+ atomic_inc_unchecked(&pool->req_ser);
43118 wake_up_process(pool->thread);
43119 }
43120 }
43121diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
43122index a9f0489..27a161b 100644
43123--- a/drivers/infiniband/core/uverbs_cmd.c
43124+++ b/drivers/infiniband/core/uverbs_cmd.c
43125@@ -951,6 +951,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
43126 if (copy_from_user(&cmd, buf, sizeof cmd))
43127 return -EFAULT;
43128
43129+ if (!access_ok_noprefault(VERIFY_READ, cmd.start, cmd.length))
43130+ return -EFAULT;
43131+
43132 INIT_UDATA(&udata, buf + sizeof cmd,
43133 (unsigned long) cmd.response + sizeof resp,
43134 in_len - sizeof cmd, out_len - sizeof resp);
43135diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
43136index 6791fd1..78bdcdf 100644
43137--- a/drivers/infiniband/hw/cxgb4/mem.c
43138+++ b/drivers/infiniband/hw/cxgb4/mem.c
43139@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43140 int err;
43141 struct fw_ri_tpte tpt;
43142 u32 stag_idx;
43143- static atomic_t key;
43144+ static atomic_unchecked_t key;
43145
43146 if (c4iw_fatal_error(rdev))
43147 return -EIO;
43148@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43149 if (rdev->stats.stag.cur > rdev->stats.stag.max)
43150 rdev->stats.stag.max = rdev->stats.stag.cur;
43151 mutex_unlock(&rdev->stats.lock);
43152- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
43153+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
43154 }
43155 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
43156 __func__, stag_state, type, pdid, stag_idx);
43157diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
43158index 79b3dbc..96e5fcc 100644
43159--- a/drivers/infiniband/hw/ipath/ipath_rc.c
43160+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
43161@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43162 struct ib_atomic_eth *ateth;
43163 struct ipath_ack_entry *e;
43164 u64 vaddr;
43165- atomic64_t *maddr;
43166+ atomic64_unchecked_t *maddr;
43167 u64 sdata;
43168 u32 rkey;
43169 u8 next;
43170@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43171 IB_ACCESS_REMOTE_ATOMIC)))
43172 goto nack_acc_unlck;
43173 /* Perform atomic OP and save result. */
43174- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43175+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43176 sdata = be64_to_cpu(ateth->swap_data);
43177 e = &qp->s_ack_queue[qp->r_head_ack_queue];
43178 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
43179- (u64) atomic64_add_return(sdata, maddr) - sdata :
43180+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43181 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43182 be64_to_cpu(ateth->compare_data),
43183 sdata);
43184diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
43185index 1f95bba..9530f87 100644
43186--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
43187+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
43188@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
43189 unsigned long flags;
43190 struct ib_wc wc;
43191 u64 sdata;
43192- atomic64_t *maddr;
43193+ atomic64_unchecked_t *maddr;
43194 enum ib_wc_status send_status;
43195
43196 /*
43197@@ -382,11 +382,11 @@ again:
43198 IB_ACCESS_REMOTE_ATOMIC)))
43199 goto acc_err;
43200 /* Perform atomic OP and save result. */
43201- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43202+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43203 sdata = wqe->wr.wr.atomic.compare_add;
43204 *(u64 *) sqp->s_sge.sge.vaddr =
43205 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
43206- (u64) atomic64_add_return(sdata, maddr) - sdata :
43207+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43208 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43209 sdata, wqe->wr.wr.atomic.swap);
43210 goto send_comp;
43211diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
43212index 5904026..f1c30e5 100644
43213--- a/drivers/infiniband/hw/mlx4/mad.c
43214+++ b/drivers/infiniband/hw/mlx4/mad.c
43215@@ -106,7 +106,7 @@ __be64 mlx4_ib_gen_node_guid(void)
43216
43217 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
43218 {
43219- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
43220+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
43221 cpu_to_be64(0xff00000000000000LL);
43222 }
43223
43224diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
43225index ed327e6..ca1739e0 100644
43226--- a/drivers/infiniband/hw/mlx4/mcg.c
43227+++ b/drivers/infiniband/hw/mlx4/mcg.c
43228@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
43229 {
43230 char name[20];
43231
43232- atomic_set(&ctx->tid, 0);
43233+ atomic_set_unchecked(&ctx->tid, 0);
43234 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
43235 ctx->mcg_wq = create_singlethread_workqueue(name);
43236 if (!ctx->mcg_wq)
43237diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43238index f829fd9..1a8d436 100644
43239--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
43240+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43241@@ -439,7 +439,7 @@ struct mlx4_ib_demux_ctx {
43242 struct list_head mcg_mgid0_list;
43243 struct workqueue_struct *mcg_wq;
43244 struct mlx4_ib_demux_pv_ctx **tun;
43245- atomic_t tid;
43246+ atomic_unchecked_t tid;
43247 int flushing; /* flushing the work queue */
43248 };
43249
43250diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
43251index 9d3e5c1..6f166df 100644
43252--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
43253+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
43254@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
43255 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
43256 }
43257
43258-int mthca_QUERY_FW(struct mthca_dev *dev)
43259+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
43260 {
43261 struct mthca_mailbox *mailbox;
43262 u32 *outbox;
43263@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43264 CMD_TIME_CLASS_B);
43265 }
43266
43267-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43268+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43269 int num_mtt)
43270 {
43271 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
43272@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
43273 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
43274 }
43275
43276-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43277+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43278 int eq_num)
43279 {
43280 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
43281@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
43282 CMD_TIME_CLASS_B);
43283 }
43284
43285-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43286+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43287 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
43288 void *in_mad, void *response_mad)
43289 {
43290diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
43291index ded76c1..0cf0a08 100644
43292--- a/drivers/infiniband/hw/mthca/mthca_main.c
43293+++ b/drivers/infiniband/hw/mthca/mthca_main.c
43294@@ -692,7 +692,7 @@ err_close:
43295 return err;
43296 }
43297
43298-static int mthca_setup_hca(struct mthca_dev *dev)
43299+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
43300 {
43301 int err;
43302
43303diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
43304index ed9a989..6aa5dc2 100644
43305--- a/drivers/infiniband/hw/mthca/mthca_mr.c
43306+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
43307@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
43308 * through the bitmaps)
43309 */
43310
43311-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43312+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43313 {
43314 int o;
43315 int m;
43316@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
43317 return key;
43318 }
43319
43320-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43321+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43322 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
43323 {
43324 struct mthca_mailbox *mailbox;
43325@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
43326 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
43327 }
43328
43329-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43330+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43331 u64 *buffer_list, int buffer_size_shift,
43332 int list_len, u64 iova, u64 total_size,
43333 u32 access, struct mthca_mr *mr)
43334diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
43335index 415f8e1..e34214e 100644
43336--- a/drivers/infiniband/hw/mthca/mthca_provider.c
43337+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
43338@@ -764,7 +764,7 @@ unlock:
43339 return 0;
43340 }
43341
43342-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43343+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43344 {
43345 struct mthca_dev *dev = to_mdev(ibcq->device);
43346 struct mthca_cq *cq = to_mcq(ibcq);
43347diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
43348index 3b2a6dc..bce26ff 100644
43349--- a/drivers/infiniband/hw/nes/nes.c
43350+++ b/drivers/infiniband/hw/nes/nes.c
43351@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
43352 LIST_HEAD(nes_adapter_list);
43353 static LIST_HEAD(nes_dev_list);
43354
43355-atomic_t qps_destroyed;
43356+atomic_unchecked_t qps_destroyed;
43357
43358 static unsigned int ee_flsh_adapter;
43359 static unsigned int sysfs_nonidx_addr;
43360@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
43361 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
43362 struct nes_adapter *nesadapter = nesdev->nesadapter;
43363
43364- atomic_inc(&qps_destroyed);
43365+ atomic_inc_unchecked(&qps_destroyed);
43366
43367 /* Free the control structures */
43368
43369diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
43370index bd9d132..70d84f4 100644
43371--- a/drivers/infiniband/hw/nes/nes.h
43372+++ b/drivers/infiniband/hw/nes/nes.h
43373@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
43374 extern unsigned int wqm_quanta;
43375 extern struct list_head nes_adapter_list;
43376
43377-extern atomic_t cm_connects;
43378-extern atomic_t cm_accepts;
43379-extern atomic_t cm_disconnects;
43380-extern atomic_t cm_closes;
43381-extern atomic_t cm_connecteds;
43382-extern atomic_t cm_connect_reqs;
43383-extern atomic_t cm_rejects;
43384-extern atomic_t mod_qp_timouts;
43385-extern atomic_t qps_created;
43386-extern atomic_t qps_destroyed;
43387-extern atomic_t sw_qps_destroyed;
43388+extern atomic_unchecked_t cm_connects;
43389+extern atomic_unchecked_t cm_accepts;
43390+extern atomic_unchecked_t cm_disconnects;
43391+extern atomic_unchecked_t cm_closes;
43392+extern atomic_unchecked_t cm_connecteds;
43393+extern atomic_unchecked_t cm_connect_reqs;
43394+extern atomic_unchecked_t cm_rejects;
43395+extern atomic_unchecked_t mod_qp_timouts;
43396+extern atomic_unchecked_t qps_created;
43397+extern atomic_unchecked_t qps_destroyed;
43398+extern atomic_unchecked_t sw_qps_destroyed;
43399 extern u32 mh_detected;
43400 extern u32 mh_pauses_sent;
43401 extern u32 cm_packets_sent;
43402@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
43403 extern u32 cm_packets_received;
43404 extern u32 cm_packets_dropped;
43405 extern u32 cm_packets_retrans;
43406-extern atomic_t cm_listens_created;
43407-extern atomic_t cm_listens_destroyed;
43408+extern atomic_unchecked_t cm_listens_created;
43409+extern atomic_unchecked_t cm_listens_destroyed;
43410 extern u32 cm_backlog_drops;
43411-extern atomic_t cm_loopbacks;
43412-extern atomic_t cm_nodes_created;
43413-extern atomic_t cm_nodes_destroyed;
43414-extern atomic_t cm_accel_dropped_pkts;
43415-extern atomic_t cm_resets_recvd;
43416-extern atomic_t pau_qps_created;
43417-extern atomic_t pau_qps_destroyed;
43418+extern atomic_unchecked_t cm_loopbacks;
43419+extern atomic_unchecked_t cm_nodes_created;
43420+extern atomic_unchecked_t cm_nodes_destroyed;
43421+extern atomic_unchecked_t cm_accel_dropped_pkts;
43422+extern atomic_unchecked_t cm_resets_recvd;
43423+extern atomic_unchecked_t pau_qps_created;
43424+extern atomic_unchecked_t pau_qps_destroyed;
43425
43426 extern u32 int_mod_timer_init;
43427 extern u32 int_mod_cq_depth_256;
43428diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
43429index 6f09a72..cf4399d 100644
43430--- a/drivers/infiniband/hw/nes/nes_cm.c
43431+++ b/drivers/infiniband/hw/nes/nes_cm.c
43432@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
43433 u32 cm_packets_retrans;
43434 u32 cm_packets_created;
43435 u32 cm_packets_received;
43436-atomic_t cm_listens_created;
43437-atomic_t cm_listens_destroyed;
43438+atomic_unchecked_t cm_listens_created;
43439+atomic_unchecked_t cm_listens_destroyed;
43440 u32 cm_backlog_drops;
43441-atomic_t cm_loopbacks;
43442-atomic_t cm_nodes_created;
43443-atomic_t cm_nodes_destroyed;
43444-atomic_t cm_accel_dropped_pkts;
43445-atomic_t cm_resets_recvd;
43446+atomic_unchecked_t cm_loopbacks;
43447+atomic_unchecked_t cm_nodes_created;
43448+atomic_unchecked_t cm_nodes_destroyed;
43449+atomic_unchecked_t cm_accel_dropped_pkts;
43450+atomic_unchecked_t cm_resets_recvd;
43451
43452 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
43453 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
43454@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
43455 /* instance of function pointers for client API */
43456 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
43457 static struct nes_cm_ops nes_cm_api = {
43458- mini_cm_accelerated,
43459- mini_cm_listen,
43460- mini_cm_del_listen,
43461- mini_cm_connect,
43462- mini_cm_close,
43463- mini_cm_accept,
43464- mini_cm_reject,
43465- mini_cm_recv_pkt,
43466- mini_cm_dealloc_core,
43467- mini_cm_get,
43468- mini_cm_set
43469+ .accelerated = mini_cm_accelerated,
43470+ .listen = mini_cm_listen,
43471+ .stop_listener = mini_cm_del_listen,
43472+ .connect = mini_cm_connect,
43473+ .close = mini_cm_close,
43474+ .accept = mini_cm_accept,
43475+ .reject = mini_cm_reject,
43476+ .recv_pkt = mini_cm_recv_pkt,
43477+ .destroy_cm_core = mini_cm_dealloc_core,
43478+ .get = mini_cm_get,
43479+ .set = mini_cm_set
43480 };
43481
43482 static struct nes_cm_core *g_cm_core;
43483
43484-atomic_t cm_connects;
43485-atomic_t cm_accepts;
43486-atomic_t cm_disconnects;
43487-atomic_t cm_closes;
43488-atomic_t cm_connecteds;
43489-atomic_t cm_connect_reqs;
43490-atomic_t cm_rejects;
43491+atomic_unchecked_t cm_connects;
43492+atomic_unchecked_t cm_accepts;
43493+atomic_unchecked_t cm_disconnects;
43494+atomic_unchecked_t cm_closes;
43495+atomic_unchecked_t cm_connecteds;
43496+atomic_unchecked_t cm_connect_reqs;
43497+atomic_unchecked_t cm_rejects;
43498
43499 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
43500 {
43501@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
43502 kfree(listener);
43503 listener = NULL;
43504 ret = 0;
43505- atomic_inc(&cm_listens_destroyed);
43506+ atomic_inc_unchecked(&cm_listens_destroyed);
43507 } else {
43508 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
43509 }
43510@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
43511 cm_node->rem_mac);
43512
43513 add_hte_node(cm_core, cm_node);
43514- atomic_inc(&cm_nodes_created);
43515+ atomic_inc_unchecked(&cm_nodes_created);
43516
43517 return cm_node;
43518 }
43519@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
43520 }
43521
43522 atomic_dec(&cm_core->node_cnt);
43523- atomic_inc(&cm_nodes_destroyed);
43524+ atomic_inc_unchecked(&cm_nodes_destroyed);
43525 nesqp = cm_node->nesqp;
43526 if (nesqp) {
43527 nesqp->cm_node = NULL;
43528@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
43529
43530 static void drop_packet(struct sk_buff *skb)
43531 {
43532- atomic_inc(&cm_accel_dropped_pkts);
43533+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43534 dev_kfree_skb_any(skb);
43535 }
43536
43537@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
43538 {
43539
43540 int reset = 0; /* whether to send reset in case of err.. */
43541- atomic_inc(&cm_resets_recvd);
43542+ atomic_inc_unchecked(&cm_resets_recvd);
43543 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
43544 " refcnt=%d\n", cm_node, cm_node->state,
43545 atomic_read(&cm_node->ref_count));
43546@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
43547 rem_ref_cm_node(cm_node->cm_core, cm_node);
43548 return NULL;
43549 }
43550- atomic_inc(&cm_loopbacks);
43551+ atomic_inc_unchecked(&cm_loopbacks);
43552 loopbackremotenode->loopbackpartner = cm_node;
43553 loopbackremotenode->tcp_cntxt.rcv_wscale =
43554 NES_CM_DEFAULT_RCV_WND_SCALE;
43555@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
43556 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
43557 else {
43558 rem_ref_cm_node(cm_core, cm_node);
43559- atomic_inc(&cm_accel_dropped_pkts);
43560+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43561 dev_kfree_skb_any(skb);
43562 }
43563 break;
43564@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43565
43566 if ((cm_id) && (cm_id->event_handler)) {
43567 if (issue_disconn) {
43568- atomic_inc(&cm_disconnects);
43569+ atomic_inc_unchecked(&cm_disconnects);
43570 cm_event.event = IW_CM_EVENT_DISCONNECT;
43571 cm_event.status = disconn_status;
43572 cm_event.local_addr = cm_id->local_addr;
43573@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43574 }
43575
43576 if (issue_close) {
43577- atomic_inc(&cm_closes);
43578+ atomic_inc_unchecked(&cm_closes);
43579 nes_disconnect(nesqp, 1);
43580
43581 cm_id->provider_data = nesqp;
43582@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43583
43584 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43585 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43586- atomic_inc(&cm_accepts);
43587+ atomic_inc_unchecked(&cm_accepts);
43588
43589 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43590 netdev_refcnt_read(nesvnic->netdev));
43591@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43592 struct nes_cm_core *cm_core;
43593 u8 *start_buff;
43594
43595- atomic_inc(&cm_rejects);
43596+ atomic_inc_unchecked(&cm_rejects);
43597 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43598 loopback = cm_node->loopbackpartner;
43599 cm_core = cm_node->cm_core;
43600@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43601 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43602 ntohs(laddr->sin_port));
43603
43604- atomic_inc(&cm_connects);
43605+ atomic_inc_unchecked(&cm_connects);
43606 nesqp->active_conn = 1;
43607
43608 /* cache the cm_id in the qp */
43609@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43610 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43611 return err;
43612 }
43613- atomic_inc(&cm_listens_created);
43614+ atomic_inc_unchecked(&cm_listens_created);
43615 }
43616
43617 cm_id->add_ref(cm_id);
43618@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43619
43620 if (nesqp->destroyed)
43621 return;
43622- atomic_inc(&cm_connecteds);
43623+ atomic_inc_unchecked(&cm_connecteds);
43624 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43625 " local port 0x%04X. jiffies = %lu.\n",
43626 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43627@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43628
43629 cm_id->add_ref(cm_id);
43630 ret = cm_id->event_handler(cm_id, &cm_event);
43631- atomic_inc(&cm_closes);
43632+ atomic_inc_unchecked(&cm_closes);
43633 cm_event.event = IW_CM_EVENT_CLOSE;
43634 cm_event.status = 0;
43635 cm_event.provider_data = cm_id->provider_data;
43636@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43637 return;
43638 cm_id = cm_node->cm_id;
43639
43640- atomic_inc(&cm_connect_reqs);
43641+ atomic_inc_unchecked(&cm_connect_reqs);
43642 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43643 cm_node, cm_id, jiffies);
43644
43645@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43646 return;
43647 cm_id = cm_node->cm_id;
43648
43649- atomic_inc(&cm_connect_reqs);
43650+ atomic_inc_unchecked(&cm_connect_reqs);
43651 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43652 cm_node, cm_id, jiffies);
43653
43654diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43655index 4166452..fc952c3 100644
43656--- a/drivers/infiniband/hw/nes/nes_mgt.c
43657+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43658@@ -40,8 +40,8 @@
43659 #include "nes.h"
43660 #include "nes_mgt.h"
43661
43662-atomic_t pau_qps_created;
43663-atomic_t pau_qps_destroyed;
43664+atomic_unchecked_t pau_qps_created;
43665+atomic_unchecked_t pau_qps_destroyed;
43666
43667 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43668 {
43669@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43670 {
43671 struct sk_buff *skb;
43672 unsigned long flags;
43673- atomic_inc(&pau_qps_destroyed);
43674+ atomic_inc_unchecked(&pau_qps_destroyed);
43675
43676 /* Free packets that have not yet been forwarded */
43677 /* Lock is acquired by skb_dequeue when removing the skb */
43678@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43679 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43680 skb_queue_head_init(&nesqp->pau_list);
43681 spin_lock_init(&nesqp->pau_lock);
43682- atomic_inc(&pau_qps_created);
43683+ atomic_inc_unchecked(&pau_qps_created);
43684 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43685 }
43686
43687diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43688index 70acda9..a96de9d 100644
43689--- a/drivers/infiniband/hw/nes/nes_nic.c
43690+++ b/drivers/infiniband/hw/nes/nes_nic.c
43691@@ -1274,39 +1274,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43692 target_stat_values[++index] = mh_detected;
43693 target_stat_values[++index] = mh_pauses_sent;
43694 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43695- target_stat_values[++index] = atomic_read(&cm_connects);
43696- target_stat_values[++index] = atomic_read(&cm_accepts);
43697- target_stat_values[++index] = atomic_read(&cm_disconnects);
43698- target_stat_values[++index] = atomic_read(&cm_connecteds);
43699- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43700- target_stat_values[++index] = atomic_read(&cm_rejects);
43701- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43702- target_stat_values[++index] = atomic_read(&qps_created);
43703- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43704- target_stat_values[++index] = atomic_read(&qps_destroyed);
43705- target_stat_values[++index] = atomic_read(&cm_closes);
43706+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43707+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43708+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43709+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43710+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43711+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43712+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43713+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43714+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43715+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43716+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43717 target_stat_values[++index] = cm_packets_sent;
43718 target_stat_values[++index] = cm_packets_bounced;
43719 target_stat_values[++index] = cm_packets_created;
43720 target_stat_values[++index] = cm_packets_received;
43721 target_stat_values[++index] = cm_packets_dropped;
43722 target_stat_values[++index] = cm_packets_retrans;
43723- target_stat_values[++index] = atomic_read(&cm_listens_created);
43724- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43725+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43726+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43727 target_stat_values[++index] = cm_backlog_drops;
43728- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43729- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43730- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43731- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43732- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43733+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43734+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43735+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43736+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43737+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43738 target_stat_values[++index] = nesadapter->free_4kpbl;
43739 target_stat_values[++index] = nesadapter->free_256pbl;
43740 target_stat_values[++index] = int_mod_timer_init;
43741 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43742 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43743 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43744- target_stat_values[++index] = atomic_read(&pau_qps_created);
43745- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43746+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43747+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43748 }
43749
43750 /**
43751diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43752index c0d0296..3185f57 100644
43753--- a/drivers/infiniband/hw/nes/nes_verbs.c
43754+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43755@@ -46,9 +46,9 @@
43756
43757 #include <rdma/ib_umem.h>
43758
43759-atomic_t mod_qp_timouts;
43760-atomic_t qps_created;
43761-atomic_t sw_qps_destroyed;
43762+atomic_unchecked_t mod_qp_timouts;
43763+atomic_unchecked_t qps_created;
43764+atomic_unchecked_t sw_qps_destroyed;
43765
43766 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43767
43768@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43769 if (init_attr->create_flags)
43770 return ERR_PTR(-EINVAL);
43771
43772- atomic_inc(&qps_created);
43773+ atomic_inc_unchecked(&qps_created);
43774 switch (init_attr->qp_type) {
43775 case IB_QPT_RC:
43776 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43777@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43778 struct iw_cm_event cm_event;
43779 int ret = 0;
43780
43781- atomic_inc(&sw_qps_destroyed);
43782+ atomic_inc_unchecked(&sw_qps_destroyed);
43783 nesqp->destroyed = 1;
43784
43785 /* Blow away the connection if it exists. */
43786diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43787index ffd48bf..83cdb56 100644
43788--- a/drivers/infiniband/hw/qib/qib.h
43789+++ b/drivers/infiniband/hw/qib/qib.h
43790@@ -52,6 +52,7 @@
43791 #include <linux/kref.h>
43792 #include <linux/sched.h>
43793 #include <linux/kthread.h>
43794+#include <linux/slab.h>
43795
43796 #include "qib_common.h"
43797 #include "qib_verbs.h"
43798diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43799index cdc7df4..a2fdfdb 100644
43800--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43801+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43802@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
43803 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
43804 }
43805
43806-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
43807+static struct rtnl_link_ops ipoib_link_ops = {
43808 .kind = "ipoib",
43809 .maxtype = IFLA_IPOIB_MAX,
43810 .policy = ipoib_policy,
43811diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43812index e853a21..56fc5a8 100644
43813--- a/drivers/input/gameport/gameport.c
43814+++ b/drivers/input/gameport/gameport.c
43815@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43816 */
43817 static void gameport_init_port(struct gameport *gameport)
43818 {
43819- static atomic_t gameport_no = ATOMIC_INIT(-1);
43820+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
43821
43822 __module_get(THIS_MODULE);
43823
43824 mutex_init(&gameport->drv_mutex);
43825 device_initialize(&gameport->dev);
43826 dev_set_name(&gameport->dev, "gameport%lu",
43827- (unsigned long)atomic_inc_return(&gameport_no));
43828+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
43829 gameport->dev.bus = &gameport_bus;
43830 gameport->dev.release = gameport_release_port;
43831 if (gameport->parent)
43832diff --git a/drivers/input/input.c b/drivers/input/input.c
43833index cc357f1..ee42fbc 100644
43834--- a/drivers/input/input.c
43835+++ b/drivers/input/input.c
43836@@ -1781,7 +1781,7 @@ EXPORT_SYMBOL_GPL(input_class);
43837 */
43838 struct input_dev *input_allocate_device(void)
43839 {
43840- static atomic_t input_no = ATOMIC_INIT(-1);
43841+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
43842 struct input_dev *dev;
43843
43844 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43845@@ -1796,7 +1796,7 @@ struct input_dev *input_allocate_device(void)
43846 INIT_LIST_HEAD(&dev->node);
43847
43848 dev_set_name(&dev->dev, "input%lu",
43849- (unsigned long)atomic_inc_return(&input_no));
43850+ (unsigned long)atomic_inc_return_unchecked(&input_no));
43851
43852 __module_get(THIS_MODULE);
43853 }
43854diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43855index 4a95b22..874c182 100644
43856--- a/drivers/input/joystick/sidewinder.c
43857+++ b/drivers/input/joystick/sidewinder.c
43858@@ -30,6 +30,7 @@
43859 #include <linux/kernel.h>
43860 #include <linux/module.h>
43861 #include <linux/slab.h>
43862+#include <linux/sched.h>
43863 #include <linux/input.h>
43864 #include <linux/gameport.h>
43865 #include <linux/jiffies.h>
43866diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43867index 3aa2f3f..53c00ea 100644
43868--- a/drivers/input/joystick/xpad.c
43869+++ b/drivers/input/joystick/xpad.c
43870@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43871
43872 static int xpad_led_probe(struct usb_xpad *xpad)
43873 {
43874- static atomic_t led_seq = ATOMIC_INIT(-1);
43875+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
43876 unsigned long led_no;
43877 struct xpad_led *led;
43878 struct led_classdev *led_cdev;
43879@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43880 if (!led)
43881 return -ENOMEM;
43882
43883- led_no = atomic_inc_return(&led_seq);
43884+ led_no = atomic_inc_return_unchecked(&led_seq);
43885
43886 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
43887 led->xpad = xpad;
43888diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
43889index ac1fa5f..5f7502c 100644
43890--- a/drivers/input/misc/ims-pcu.c
43891+++ b/drivers/input/misc/ims-pcu.c
43892@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
43893
43894 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43895 {
43896- static atomic_t device_no = ATOMIC_INIT(-1);
43897+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
43898
43899 const struct ims_pcu_device_info *info;
43900 int error;
43901@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43902 }
43903
43904 /* Device appears to be operable, complete initialization */
43905- pcu->device_no = atomic_inc_return(&device_no);
43906+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
43907
43908 /*
43909 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
43910diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
43911index d02e1bd..d719719 100644
43912--- a/drivers/input/mouse/psmouse.h
43913+++ b/drivers/input/mouse/psmouse.h
43914@@ -124,7 +124,7 @@ struct psmouse_attribute {
43915 ssize_t (*set)(struct psmouse *psmouse, void *data,
43916 const char *buf, size_t count);
43917 bool protect;
43918-};
43919+} __do_const;
43920 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
43921
43922 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
43923diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
43924index b604564..3f14ae4 100644
43925--- a/drivers/input/mousedev.c
43926+++ b/drivers/input/mousedev.c
43927@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
43928
43929 spin_unlock_irq(&client->packet_lock);
43930
43931- if (copy_to_user(buffer, data, count))
43932+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
43933 return -EFAULT;
43934
43935 return count;
43936diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
43937index a05a517..323a2fd 100644
43938--- a/drivers/input/serio/serio.c
43939+++ b/drivers/input/serio/serio.c
43940@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
43941 */
43942 static void serio_init_port(struct serio *serio)
43943 {
43944- static atomic_t serio_no = ATOMIC_INIT(-1);
43945+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
43946
43947 __module_get(THIS_MODULE);
43948
43949@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
43950 mutex_init(&serio->drv_mutex);
43951 device_initialize(&serio->dev);
43952 dev_set_name(&serio->dev, "serio%lu",
43953- (unsigned long)atomic_inc_return(&serio_no));
43954+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
43955 serio->dev.bus = &serio_bus;
43956 serio->dev.release = serio_release_port;
43957 serio->dev.groups = serio_device_attr_groups;
43958diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
43959index 71ef5d6..93380a9 100644
43960--- a/drivers/input/serio/serio_raw.c
43961+++ b/drivers/input/serio/serio_raw.c
43962@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
43963
43964 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43965 {
43966- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
43967+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
43968 struct serio_raw *serio_raw;
43969 int err;
43970
43971@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43972 }
43973
43974 snprintf(serio_raw->name, sizeof(serio_raw->name),
43975- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
43976+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
43977 kref_init(&serio_raw->kref);
43978 INIT_LIST_HEAD(&serio_raw->client_list);
43979 init_waitqueue_head(&serio_raw->wait);
43980diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
43981index 92e2243..8fd9092 100644
43982--- a/drivers/input/touchscreen/htcpen.c
43983+++ b/drivers/input/touchscreen/htcpen.c
43984@@ -219,7 +219,7 @@ static struct isa_driver htcpen_isa_driver = {
43985 }
43986 };
43987
43988-static struct dmi_system_id htcshift_dmi_table[] __initdata = {
43989+static const struct dmi_system_id htcshift_dmi_table[] __initconst = {
43990 {
43991 .ident = "Shift",
43992 .matches = {
43993diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
43994index 48882c1..93e0987 100644
43995--- a/drivers/iommu/amd_iommu.c
43996+++ b/drivers/iommu/amd_iommu.c
43997@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
43998
43999 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
44000 {
44001+ phys_addr_t physaddr;
44002 WARN_ON(address & 0x7ULL);
44003
44004 memset(cmd, 0, sizeof(*cmd));
44005- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
44006- cmd->data[1] = upper_32_bits(__pa(address));
44007+
44008+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
44009+ if (object_starts_on_stack((void *)address)) {
44010+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
44011+ physaddr = __pa((u64)adjbuf);
44012+ } else
44013+#endif
44014+ physaddr = __pa(address);
44015+
44016+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
44017+ cmd->data[1] = upper_32_bits(physaddr);
44018 cmd->data[2] = 1;
44019 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
44020 }
44021diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
44022index a3adde6..988ee96 100644
44023--- a/drivers/iommu/arm-smmu.c
44024+++ b/drivers/iommu/arm-smmu.c
44025@@ -338,7 +338,7 @@ enum arm_smmu_domain_stage {
44026
44027 struct arm_smmu_domain {
44028 struct arm_smmu_device *smmu;
44029- struct io_pgtable_ops *pgtbl_ops;
44030+ struct io_pgtable *pgtbl;
44031 spinlock_t pgtbl_lock;
44032 struct arm_smmu_cfg cfg;
44033 enum arm_smmu_domain_stage stage;
44034@@ -833,7 +833,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44035 {
44036 int irq, start, ret = 0;
44037 unsigned long ias, oas;
44038- struct io_pgtable_ops *pgtbl_ops;
44039+ struct io_pgtable *pgtbl;
44040 struct io_pgtable_cfg pgtbl_cfg;
44041 enum io_pgtable_fmt fmt;
44042 struct arm_smmu_domain *smmu_domain = domain->priv;
44043@@ -918,14 +918,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44044 };
44045
44046 smmu_domain->smmu = smmu;
44047- pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
44048- if (!pgtbl_ops) {
44049+ pgtbl = alloc_io_pgtable(fmt, &pgtbl_cfg, smmu_domain);
44050+ if (!pgtbl) {
44051 ret = -ENOMEM;
44052 goto out_clear_smmu;
44053 }
44054
44055 /* Update our support page sizes to reflect the page table format */
44056- arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
44057+ pax_open_kernel();
44058+ *(unsigned long *)&arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
44059+ pax_close_kernel();
44060
44061 /* Initialise the context bank with our page table cfg */
44062 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
44063@@ -946,7 +948,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44064 mutex_unlock(&smmu_domain->init_mutex);
44065
44066 /* Publish page table ops for map/unmap */
44067- smmu_domain->pgtbl_ops = pgtbl_ops;
44068+ smmu_domain->pgtbl = pgtbl;
44069 return 0;
44070
44071 out_clear_smmu:
44072@@ -979,8 +981,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
44073 free_irq(irq, domain);
44074 }
44075
44076- if (smmu_domain->pgtbl_ops)
44077- free_io_pgtable_ops(smmu_domain->pgtbl_ops);
44078+ free_io_pgtable(smmu_domain->pgtbl);
44079
44080 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
44081 }
44082@@ -1204,13 +1205,13 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
44083 int ret;
44084 unsigned long flags;
44085 struct arm_smmu_domain *smmu_domain = domain->priv;
44086- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44087+ struct io_pgtable *iop = smmu_domain->pgtbl;
44088
44089- if (!ops)
44090+ if (!iop)
44091 return -ENODEV;
44092
44093 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44094- ret = ops->map(ops, iova, paddr, size, prot);
44095+ ret = iop->ops->map(iop, iova, paddr, size, prot);
44096 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44097 return ret;
44098 }
44099@@ -1221,13 +1222,13 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
44100 size_t ret;
44101 unsigned long flags;
44102 struct arm_smmu_domain *smmu_domain = domain->priv;
44103- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44104+ struct io_pgtable *iop = smmu_domain->pgtbl;
44105
44106- if (!ops)
44107+ if (!iop)
44108 return 0;
44109
44110 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44111- ret = ops->unmap(ops, iova, size);
44112+ ret = iop->ops->unmap(iop, iova, size);
44113 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44114 return ret;
44115 }
44116@@ -1238,7 +1239,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
44117 struct arm_smmu_domain *smmu_domain = domain->priv;
44118 struct arm_smmu_device *smmu = smmu_domain->smmu;
44119 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
44120- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44121+ struct io_pgtable *iop = smmu_domain->pgtbl;
44122 struct device *dev = smmu->dev;
44123 void __iomem *cb_base;
44124 u32 tmp;
44125@@ -1261,7 +1262,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
44126 dev_err(dev,
44127 "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
44128 &iova);
44129- return ops->iova_to_phys(ops, iova);
44130+ return iop->ops->iova_to_phys(iop, iova);
44131 }
44132
44133 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
44134@@ -1282,9 +1283,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
44135 phys_addr_t ret;
44136 unsigned long flags;
44137 struct arm_smmu_domain *smmu_domain = domain->priv;
44138- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44139+ struct io_pgtable *iop = smmu_domain->pgtbl;
44140
44141- if (!ops)
44142+ if (!iop)
44143 return 0;
44144
44145 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44146@@ -1292,7 +1293,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
44147 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
44148 ret = arm_smmu_iova_to_phys_hard(domain, iova);
44149 } else {
44150- ret = ops->iova_to_phys(ops, iova);
44151+ ret = iop->ops->iova_to_phys(iop, iova);
44152 }
44153
44154 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44155@@ -1651,7 +1652,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
44156 size |= SZ_64K | SZ_512M;
44157 }
44158
44159- arm_smmu_ops.pgsize_bitmap &= size;
44160+ pax_open_kernel();
44161+ *(unsigned long *)&arm_smmu_ops.pgsize_bitmap &= size;
44162+ pax_close_kernel();
44163 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
44164
44165 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
44166diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
44167index b610a8d..08eb879 100644
44168--- a/drivers/iommu/io-pgtable-arm.c
44169+++ b/drivers/iommu/io-pgtable-arm.c
44170@@ -36,12 +36,6 @@
44171 #define io_pgtable_to_data(x) \
44172 container_of((x), struct arm_lpae_io_pgtable, iop)
44173
44174-#define io_pgtable_ops_to_pgtable(x) \
44175- container_of((x), struct io_pgtable, ops)
44176-
44177-#define io_pgtable_ops_to_data(x) \
44178- io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44179-
44180 /*
44181 * For consistency with the architecture, we always consider
44182 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
44183@@ -302,10 +296,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
44184 return pte;
44185 }
44186
44187-static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
44188+static int arm_lpae_map(struct io_pgtable *iop, unsigned long iova,
44189 phys_addr_t paddr, size_t size, int iommu_prot)
44190 {
44191- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44192+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44193 arm_lpae_iopte *ptep = data->pgd;
44194 int lvl = ARM_LPAE_START_LVL(data);
44195 arm_lpae_iopte prot;
44196@@ -445,12 +439,11 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
44197 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
44198 }
44199
44200-static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
44201+static int arm_lpae_unmap(struct io_pgtable *iop, unsigned long iova,
44202 size_t size)
44203 {
44204 size_t unmapped;
44205- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44206- struct io_pgtable *iop = &data->iop;
44207+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44208 arm_lpae_iopte *ptep = data->pgd;
44209 int lvl = ARM_LPAE_START_LVL(data);
44210
44211@@ -461,10 +454,10 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
44212 return unmapped;
44213 }
44214
44215-static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
44216+static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable *iop,
44217 unsigned long iova)
44218 {
44219- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44220+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44221 arm_lpae_iopte pte, *ptep = data->pgd;
44222 int lvl = ARM_LPAE_START_LVL(data);
44223
44224@@ -531,6 +524,12 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
44225 }
44226 }
44227
44228+static struct io_pgtable_ops arm_lpae_io_pgtable_ops = {
44229+ .map = arm_lpae_map,
44230+ .unmap = arm_lpae_unmap,
44231+ .iova_to_phys = arm_lpae_iova_to_phys,
44232+};
44233+
44234 static struct arm_lpae_io_pgtable *
44235 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
44236 {
44237@@ -562,11 +561,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
44238 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
44239 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
44240
44241- data->iop.ops = (struct io_pgtable_ops) {
44242- .map = arm_lpae_map,
44243- .unmap = arm_lpae_unmap,
44244- .iova_to_phys = arm_lpae_iova_to_phys,
44245- };
44246+ data->iop.ops = &arm_lpae_io_pgtable_ops;
44247
44248 return data;
44249 }
44250@@ -825,9 +820,9 @@ static struct iommu_gather_ops dummy_tlb_ops __initdata = {
44251 .flush_pgtable = dummy_flush_pgtable,
44252 };
44253
44254-static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
44255+static void __init arm_lpae_dump_ops(struct io_pgtable *iop)
44256 {
44257- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44258+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44259 struct io_pgtable_cfg *cfg = &data->iop.cfg;
44260
44261 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
44262@@ -837,9 +832,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
44263 data->bits_per_level, data->pgd);
44264 }
44265
44266-#define __FAIL(ops, i) ({ \
44267+#define __FAIL(iop, i) ({ \
44268 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
44269- arm_lpae_dump_ops(ops); \
44270+ arm_lpae_dump_ops(iop); \
44271 selftest_running = false; \
44272 -EFAULT; \
44273 })
44274@@ -854,30 +849,32 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44275 int i, j;
44276 unsigned long iova;
44277 size_t size;
44278- struct io_pgtable_ops *ops;
44279+ struct io_pgtable *iop;
44280+ const struct io_pgtable_ops *ops;
44281
44282 selftest_running = true;
44283
44284 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
44285 cfg_cookie = cfg;
44286- ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
44287- if (!ops) {
44288+ iop = alloc_io_pgtable(fmts[i], cfg, cfg);
44289+ if (!iop) {
44290 pr_err("selftest: failed to allocate io pgtable ops\n");
44291 return -ENOMEM;
44292 }
44293+ ops = iop->ops;
44294
44295 /*
44296 * Initial sanity checks.
44297 * Empty page tables shouldn't provide any translations.
44298 */
44299- if (ops->iova_to_phys(ops, 42))
44300- return __FAIL(ops, i);
44301+ if (ops->iova_to_phys(iop, 42))
44302+ return __FAIL(iop, i);
44303
44304- if (ops->iova_to_phys(ops, SZ_1G + 42))
44305- return __FAIL(ops, i);
44306+ if (ops->iova_to_phys(iop, SZ_1G + 42))
44307+ return __FAIL(iop, i);
44308
44309- if (ops->iova_to_phys(ops, SZ_2G + 42))
44310- return __FAIL(ops, i);
44311+ if (ops->iova_to_phys(iop, SZ_2G + 42))
44312+ return __FAIL(iop, i);
44313
44314 /*
44315 * Distinct mappings of different granule sizes.
44316@@ -887,19 +884,19 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44317 while (j != BITS_PER_LONG) {
44318 size = 1UL << j;
44319
44320- if (ops->map(ops, iova, iova, size, IOMMU_READ |
44321+ if (ops->map(iop, iova, iova, size, IOMMU_READ |
44322 IOMMU_WRITE |
44323 IOMMU_NOEXEC |
44324 IOMMU_CACHE))
44325- return __FAIL(ops, i);
44326+ return __FAIL(iop, i);
44327
44328 /* Overlapping mappings */
44329- if (!ops->map(ops, iova, iova + size, size,
44330+ if (!ops->map(iop, iova, iova + size, size,
44331 IOMMU_READ | IOMMU_NOEXEC))
44332- return __FAIL(ops, i);
44333+ return __FAIL(iop, i);
44334
44335- if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
44336- return __FAIL(ops, i);
44337+ if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
44338+ return __FAIL(iop, i);
44339
44340 iova += SZ_1G;
44341 j++;
44342@@ -908,15 +905,15 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44343
44344 /* Partial unmap */
44345 size = 1UL << __ffs(cfg->pgsize_bitmap);
44346- if (ops->unmap(ops, SZ_1G + size, size) != size)
44347- return __FAIL(ops, i);
44348+ if (ops->unmap(iop, SZ_1G + size, size) != size)
44349+ return __FAIL(iop, i);
44350
44351 /* Remap of partial unmap */
44352- if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
44353- return __FAIL(ops, i);
44354+ if (ops->map(iop, SZ_1G + size, size, size, IOMMU_READ))
44355+ return __FAIL(iop, i);
44356
44357- if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
44358- return __FAIL(ops, i);
44359+ if (ops->iova_to_phys(iop, SZ_1G + size + 42) != (size + 42))
44360+ return __FAIL(iop, i);
44361
44362 /* Full unmap */
44363 iova = 0;
44364@@ -924,25 +921,25 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44365 while (j != BITS_PER_LONG) {
44366 size = 1UL << j;
44367
44368- if (ops->unmap(ops, iova, size) != size)
44369- return __FAIL(ops, i);
44370+ if (ops->unmap(iop, iova, size) != size)
44371+ return __FAIL(iop, i);
44372
44373- if (ops->iova_to_phys(ops, iova + 42))
44374- return __FAIL(ops, i);
44375+ if (ops->iova_to_phys(iop, iova + 42))
44376+ return __FAIL(iop, i);
44377
44378 /* Remap full block */
44379- if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
44380- return __FAIL(ops, i);
44381+ if (ops->map(iop, iova, iova, size, IOMMU_WRITE))
44382+ return __FAIL(iop, i);
44383
44384- if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
44385- return __FAIL(ops, i);
44386+ if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
44387+ return __FAIL(iop, i);
44388
44389 iova += SZ_1G;
44390 j++;
44391 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
44392 }
44393
44394- free_io_pgtable_ops(ops);
44395+ free_io_pgtable(iop);
44396 }
44397
44398 selftest_running = false;
44399diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
44400index 6436fe2..088c965 100644
44401--- a/drivers/iommu/io-pgtable.c
44402+++ b/drivers/iommu/io-pgtable.c
44403@@ -40,7 +40,7 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
44404 #endif
44405 };
44406
44407-struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44408+struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
44409 struct io_pgtable_cfg *cfg,
44410 void *cookie)
44411 {
44412@@ -62,21 +62,18 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44413 iop->cookie = cookie;
44414 iop->cfg = *cfg;
44415
44416- return &iop->ops;
44417+ return iop;
44418 }
44419
44420 /*
44421 * It is the IOMMU driver's responsibility to ensure that the page table
44422 * is no longer accessible to the walker by this point.
44423 */
44424-void free_io_pgtable_ops(struct io_pgtable_ops *ops)
44425+void free_io_pgtable(struct io_pgtable *iop)
44426 {
44427- struct io_pgtable *iop;
44428-
44429- if (!ops)
44430+ if (!iop)
44431 return;
44432
44433- iop = container_of(ops, struct io_pgtable, ops);
44434 iop->cfg.tlb->tlb_flush_all(iop->cookie);
44435 io_pgtable_init_table[iop->fmt]->free(iop);
44436 }
44437diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
44438index 10e32f6..0b276c8 100644
44439--- a/drivers/iommu/io-pgtable.h
44440+++ b/drivers/iommu/io-pgtable.h
44441@@ -75,17 +75,18 @@ struct io_pgtable_cfg {
44442 * These functions map directly onto the iommu_ops member functions with
44443 * the same names.
44444 */
44445+struct io_pgtable;
44446 struct io_pgtable_ops {
44447- int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
44448+ int (*map)(struct io_pgtable *iop, unsigned long iova,
44449 phys_addr_t paddr, size_t size, int prot);
44450- int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
44451+ int (*unmap)(struct io_pgtable *iop, unsigned long iova,
44452 size_t size);
44453- phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
44454+ phys_addr_t (*iova_to_phys)(struct io_pgtable *iop,
44455 unsigned long iova);
44456 };
44457
44458 /**
44459- * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
44460+ * alloc_io_pgtable() - Allocate a page table allocator for use by an IOMMU.
44461 *
44462 * @fmt: The page table format.
44463 * @cfg: The page table configuration. This will be modified to represent
44464@@ -94,9 +95,9 @@ struct io_pgtable_ops {
44465 * @cookie: An opaque token provided by the IOMMU driver and passed back to
44466 * the callback routines in cfg->tlb.
44467 */
44468-struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44469- struct io_pgtable_cfg *cfg,
44470- void *cookie);
44471+struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
44472+ struct io_pgtable_cfg *cfg,
44473+ void *cookie);
44474
44475 /**
44476 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
44477@@ -105,7 +106,7 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44478 *
44479 * @ops: The ops returned from alloc_io_pgtable_ops.
44480 */
44481-void free_io_pgtable_ops(struct io_pgtable_ops *ops);
44482+void free_io_pgtable(struct io_pgtable *iop);
44483
44484
44485 /*
44486@@ -125,7 +126,7 @@ struct io_pgtable {
44487 enum io_pgtable_fmt fmt;
44488 void *cookie;
44489 struct io_pgtable_cfg cfg;
44490- struct io_pgtable_ops ops;
44491+ const struct io_pgtable_ops *ops;
44492 };
44493
44494 /**
44495diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
44496index 72e683d..c9db262 100644
44497--- a/drivers/iommu/iommu.c
44498+++ b/drivers/iommu/iommu.c
44499@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
44500 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
44501 {
44502 int err;
44503- struct notifier_block *nb;
44504+ notifier_block_no_const *nb;
44505 struct iommu_callback_data cb = {
44506 .ops = ops,
44507 };
44508diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
44509index bc39bdf..e2de272 100644
44510--- a/drivers/iommu/ipmmu-vmsa.c
44511+++ b/drivers/iommu/ipmmu-vmsa.c
44512@@ -41,7 +41,7 @@ struct ipmmu_vmsa_domain {
44513 struct iommu_domain *io_domain;
44514
44515 struct io_pgtable_cfg cfg;
44516- struct io_pgtable_ops *iop;
44517+ struct io_pgtable *iop;
44518
44519 unsigned int context_id;
44520 spinlock_t lock; /* Protects mappings */
44521@@ -323,8 +323,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
44522 domain->cfg.oas = 40;
44523 domain->cfg.tlb = &ipmmu_gather_ops;
44524
44525- domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
44526- domain);
44527+ domain->iop = alloc_io_pgtable(ARM_32_LPAE_S1, &domain->cfg, domain);
44528 if (!domain->iop)
44529 return -EINVAL;
44530
44531@@ -482,7 +481,7 @@ static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
44532 * been detached.
44533 */
44534 ipmmu_domain_destroy_context(domain);
44535- free_io_pgtable_ops(domain->iop);
44536+ free_io_pgtable(domain->iop);
44537 kfree(domain);
44538 }
44539
44540@@ -551,7 +550,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
44541 if (!domain)
44542 return -ENODEV;
44543
44544- return domain->iop->map(domain->iop, iova, paddr, size, prot);
44545+ return domain->iop->ops->map(domain->iop, iova, paddr, size, prot);
44546 }
44547
44548 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
44549@@ -559,7 +558,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
44550 {
44551 struct ipmmu_vmsa_domain *domain = io_domain->priv;
44552
44553- return domain->iop->unmap(domain->iop, iova, size);
44554+ return domain->iop->ops->unmap(domain->iop, iova, size);
44555 }
44556
44557 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
44558@@ -569,7 +568,7 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
44559
44560 /* TODO: Is locking needed ? */
44561
44562- return domain->iop->iova_to_phys(domain->iop, iova);
44563+ return domain->iop->ops->iova_to_phys(domain->iop, iova);
44564 }
44565
44566 static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
44567diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
44568index 390079e..1da9d6c 100644
44569--- a/drivers/iommu/irq_remapping.c
44570+++ b/drivers/iommu/irq_remapping.c
44571@@ -329,7 +329,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
44572 void panic_if_irq_remap(const char *msg)
44573 {
44574 if (irq_remapping_enabled)
44575- panic(msg);
44576+ panic("%s", msg);
44577 }
44578
44579 static void ir_ack_apic_edge(struct irq_data *data)
44580@@ -350,10 +350,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
44581
44582 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
44583 {
44584- chip->irq_print_chip = ir_print_prefix;
44585- chip->irq_ack = ir_ack_apic_edge;
44586- chip->irq_eoi = ir_ack_apic_level;
44587- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44588+ pax_open_kernel();
44589+ *(void **)&chip->irq_print_chip = ir_print_prefix;
44590+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
44591+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
44592+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44593+ pax_close_kernel();
44594 }
44595
44596 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
44597diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
44598index 471e1cd..b53b870 100644
44599--- a/drivers/irqchip/irq-gic.c
44600+++ b/drivers/irqchip/irq-gic.c
44601@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
44602 * Supported arch specific GIC irq extension.
44603 * Default make them NULL.
44604 */
44605-struct irq_chip gic_arch_extn = {
44606+irq_chip_no_const gic_arch_extn = {
44607 .irq_eoi = NULL,
44608 .irq_mask = NULL,
44609 .irq_unmask = NULL,
44610@@ -318,7 +318,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
44611 chained_irq_exit(chip, desc);
44612 }
44613
44614-static struct irq_chip gic_chip = {
44615+static irq_chip_no_const gic_chip __read_only = {
44616 .name = "GIC",
44617 .irq_mask = gic_mask_irq,
44618 .irq_unmask = gic_unmask_irq,
44619diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
44620index 9a0767b..5e5f86f 100644
44621--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
44622+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
44623@@ -373,7 +373,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
44624 struct intc_irqpin_iomem *i;
44625 struct resource *io[INTC_IRQPIN_REG_NR];
44626 struct resource *irq;
44627- struct irq_chip *irq_chip;
44628+ irq_chip_no_const *irq_chip;
44629 void (*enable_fn)(struct irq_data *d);
44630 void (*disable_fn)(struct irq_data *d);
44631 const char *name = dev_name(dev);
44632diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
44633index 384e6ed..7a771b2 100644
44634--- a/drivers/irqchip/irq-renesas-irqc.c
44635+++ b/drivers/irqchip/irq-renesas-irqc.c
44636@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
44637 struct irqc_priv *p;
44638 struct resource *io;
44639 struct resource *irq;
44640- struct irq_chip *irq_chip;
44641+ irq_chip_no_const *irq_chip;
44642 const char *name = dev_name(&pdev->dev);
44643 int ret;
44644 int k;
44645diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
44646index 6a2df32..dc962f1 100644
44647--- a/drivers/isdn/capi/capi.c
44648+++ b/drivers/isdn/capi/capi.c
44649@@ -81,8 +81,8 @@ struct capiminor {
44650
44651 struct capi20_appl *ap;
44652 u32 ncci;
44653- atomic_t datahandle;
44654- atomic_t msgid;
44655+ atomic_unchecked_t datahandle;
44656+ atomic_unchecked_t msgid;
44657
44658 struct tty_port port;
44659 int ttyinstop;
44660@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
44661 capimsg_setu16(s, 2, mp->ap->applid);
44662 capimsg_setu8 (s, 4, CAPI_DATA_B3);
44663 capimsg_setu8 (s, 5, CAPI_RESP);
44664- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
44665+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
44666 capimsg_setu32(s, 8, mp->ncci);
44667 capimsg_setu16(s, 12, datahandle);
44668 }
44669@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
44670 mp->outbytes -= len;
44671 spin_unlock_bh(&mp->outlock);
44672
44673- datahandle = atomic_inc_return(&mp->datahandle);
44674+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
44675 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
44676 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44677 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44678 capimsg_setu16(skb->data, 2, mp->ap->applid);
44679 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
44680 capimsg_setu8 (skb->data, 5, CAPI_REQ);
44681- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
44682+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
44683 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
44684 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
44685 capimsg_setu16(skb->data, 16, len); /* Data length */
44686diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
44687index aecec6d..11e13c5 100644
44688--- a/drivers/isdn/gigaset/bas-gigaset.c
44689+++ b/drivers/isdn/gigaset/bas-gigaset.c
44690@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
44691
44692
44693 static const struct gigaset_ops gigops = {
44694- gigaset_write_cmd,
44695- gigaset_write_room,
44696- gigaset_chars_in_buffer,
44697- gigaset_brkchars,
44698- gigaset_init_bchannel,
44699- gigaset_close_bchannel,
44700- gigaset_initbcshw,
44701- gigaset_freebcshw,
44702- gigaset_reinitbcshw,
44703- gigaset_initcshw,
44704- gigaset_freecshw,
44705- gigaset_set_modem_ctrl,
44706- gigaset_baud_rate,
44707- gigaset_set_line_ctrl,
44708- gigaset_isoc_send_skb,
44709- gigaset_isoc_input,
44710+ .write_cmd = gigaset_write_cmd,
44711+ .write_room = gigaset_write_room,
44712+ .chars_in_buffer = gigaset_chars_in_buffer,
44713+ .brkchars = gigaset_brkchars,
44714+ .init_bchannel = gigaset_init_bchannel,
44715+ .close_bchannel = gigaset_close_bchannel,
44716+ .initbcshw = gigaset_initbcshw,
44717+ .freebcshw = gigaset_freebcshw,
44718+ .reinitbcshw = gigaset_reinitbcshw,
44719+ .initcshw = gigaset_initcshw,
44720+ .freecshw = gigaset_freecshw,
44721+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44722+ .baud_rate = gigaset_baud_rate,
44723+ .set_line_ctrl = gigaset_set_line_ctrl,
44724+ .send_skb = gigaset_isoc_send_skb,
44725+ .handle_input = gigaset_isoc_input,
44726 };
44727
44728 /* bas_gigaset_init
44729diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
44730index 600c79b..3752bab 100644
44731--- a/drivers/isdn/gigaset/interface.c
44732+++ b/drivers/isdn/gigaset/interface.c
44733@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
44734 }
44735 tty->driver_data = cs;
44736
44737- ++cs->port.count;
44738+ atomic_inc(&cs->port.count);
44739
44740- if (cs->port.count == 1) {
44741+ if (atomic_read(&cs->port.count) == 1) {
44742 tty_port_tty_set(&cs->port, tty);
44743 cs->port.low_latency = 1;
44744 }
44745@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
44746
44747 if (!cs->connected)
44748 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
44749- else if (!cs->port.count)
44750+ else if (!atomic_read(&cs->port.count))
44751 dev_warn(cs->dev, "%s: device not opened\n", __func__);
44752- else if (!--cs->port.count)
44753+ else if (!atomic_dec_return(&cs->port.count))
44754 tty_port_tty_set(&cs->port, NULL);
44755
44756 mutex_unlock(&cs->mutex);
44757diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
44758index 8c91fd5..14f13ce 100644
44759--- a/drivers/isdn/gigaset/ser-gigaset.c
44760+++ b/drivers/isdn/gigaset/ser-gigaset.c
44761@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
44762 }
44763
44764 static const struct gigaset_ops ops = {
44765- gigaset_write_cmd,
44766- gigaset_write_room,
44767- gigaset_chars_in_buffer,
44768- gigaset_brkchars,
44769- gigaset_init_bchannel,
44770- gigaset_close_bchannel,
44771- gigaset_initbcshw,
44772- gigaset_freebcshw,
44773- gigaset_reinitbcshw,
44774- gigaset_initcshw,
44775- gigaset_freecshw,
44776- gigaset_set_modem_ctrl,
44777- gigaset_baud_rate,
44778- gigaset_set_line_ctrl,
44779- gigaset_m10x_send_skb, /* asyncdata.c */
44780- gigaset_m10x_input, /* asyncdata.c */
44781+ .write_cmd = gigaset_write_cmd,
44782+ .write_room = gigaset_write_room,
44783+ .chars_in_buffer = gigaset_chars_in_buffer,
44784+ .brkchars = gigaset_brkchars,
44785+ .init_bchannel = gigaset_init_bchannel,
44786+ .close_bchannel = gigaset_close_bchannel,
44787+ .initbcshw = gigaset_initbcshw,
44788+ .freebcshw = gigaset_freebcshw,
44789+ .reinitbcshw = gigaset_reinitbcshw,
44790+ .initcshw = gigaset_initcshw,
44791+ .freecshw = gigaset_freecshw,
44792+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44793+ .baud_rate = gigaset_baud_rate,
44794+ .set_line_ctrl = gigaset_set_line_ctrl,
44795+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
44796+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
44797 };
44798
44799
44800diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
44801index 5f306e2..5342f88 100644
44802--- a/drivers/isdn/gigaset/usb-gigaset.c
44803+++ b/drivers/isdn/gigaset/usb-gigaset.c
44804@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
44805 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
44806 memcpy(cs->hw.usb->bchars, buf, 6);
44807 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
44808- 0, 0, &buf, 6, 2000);
44809+ 0, 0, buf, 6, 2000);
44810 }
44811
44812 static void gigaset_freebcshw(struct bc_state *bcs)
44813@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
44814 }
44815
44816 static const struct gigaset_ops ops = {
44817- gigaset_write_cmd,
44818- gigaset_write_room,
44819- gigaset_chars_in_buffer,
44820- gigaset_brkchars,
44821- gigaset_init_bchannel,
44822- gigaset_close_bchannel,
44823- gigaset_initbcshw,
44824- gigaset_freebcshw,
44825- gigaset_reinitbcshw,
44826- gigaset_initcshw,
44827- gigaset_freecshw,
44828- gigaset_set_modem_ctrl,
44829- gigaset_baud_rate,
44830- gigaset_set_line_ctrl,
44831- gigaset_m10x_send_skb,
44832- gigaset_m10x_input,
44833+ .write_cmd = gigaset_write_cmd,
44834+ .write_room = gigaset_write_room,
44835+ .chars_in_buffer = gigaset_chars_in_buffer,
44836+ .brkchars = gigaset_brkchars,
44837+ .init_bchannel = gigaset_init_bchannel,
44838+ .close_bchannel = gigaset_close_bchannel,
44839+ .initbcshw = gigaset_initbcshw,
44840+ .freebcshw = gigaset_freebcshw,
44841+ .reinitbcshw = gigaset_reinitbcshw,
44842+ .initcshw = gigaset_initcshw,
44843+ .freecshw = gigaset_freecshw,
44844+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44845+ .baud_rate = gigaset_baud_rate,
44846+ .set_line_ctrl = gigaset_set_line_ctrl,
44847+ .send_skb = gigaset_m10x_send_skb,
44848+ .handle_input = gigaset_m10x_input,
44849 };
44850
44851 /*
44852diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
44853index 4d9b195..455075c 100644
44854--- a/drivers/isdn/hardware/avm/b1.c
44855+++ b/drivers/isdn/hardware/avm/b1.c
44856@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
44857 }
44858 if (left) {
44859 if (t4file->user) {
44860- if (copy_from_user(buf, dp, left))
44861+ if (left > sizeof buf || copy_from_user(buf, dp, left))
44862 return -EFAULT;
44863 } else {
44864 memcpy(buf, dp, left);
44865@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
44866 }
44867 if (left) {
44868 if (config->user) {
44869- if (copy_from_user(buf, dp, left))
44870+ if (left > sizeof buf || copy_from_user(buf, dp, left))
44871 return -EFAULT;
44872 } else {
44873 memcpy(buf, dp, left);
44874diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
44875index 9b856e1..fa03c92 100644
44876--- a/drivers/isdn/i4l/isdn_common.c
44877+++ b/drivers/isdn/i4l/isdn_common.c
44878@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
44879 } else
44880 return -EINVAL;
44881 case IIOCDBGVAR:
44882+ if (!capable(CAP_SYS_RAWIO))
44883+ return -EPERM;
44884 if (arg) {
44885 if (copy_to_user(argp, &dev, sizeof(ulong)))
44886 return -EFAULT;
44887diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
44888index 91d5730..336523e 100644
44889--- a/drivers/isdn/i4l/isdn_concap.c
44890+++ b/drivers/isdn/i4l/isdn_concap.c
44891@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
44892 }
44893
44894 struct concap_device_ops isdn_concap_reliable_dl_dops = {
44895- &isdn_concap_dl_data_req,
44896- &isdn_concap_dl_connect_req,
44897- &isdn_concap_dl_disconn_req
44898+ .data_req = &isdn_concap_dl_data_req,
44899+ .connect_req = &isdn_concap_dl_connect_req,
44900+ .disconn_req = &isdn_concap_dl_disconn_req
44901 };
44902
44903 /* The following should better go into a dedicated source file such that
44904diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
44905index bc91261..2ef7e36 100644
44906--- a/drivers/isdn/i4l/isdn_tty.c
44907+++ b/drivers/isdn/i4l/isdn_tty.c
44908@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
44909
44910 #ifdef ISDN_DEBUG_MODEM_OPEN
44911 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
44912- port->count);
44913+ atomic_read(&port->count));
44914 #endif
44915- port->count++;
44916+ atomic_inc(&port->count);
44917 port->tty = tty;
44918 /*
44919 * Start up serial port
44920@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
44921 #endif
44922 return;
44923 }
44924- if ((tty->count == 1) && (port->count != 1)) {
44925+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
44926 /*
44927 * Uh, oh. tty->count is 1, which means that the tty
44928 * structure will be freed. Info->count should always
44929@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
44930 * serial port won't be shutdown.
44931 */
44932 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
44933- "info->count is %d\n", port->count);
44934- port->count = 1;
44935+ "info->count is %d\n", atomic_read(&port->count));
44936+ atomic_set(&port->count, 1);
44937 }
44938- if (--port->count < 0) {
44939+ if (atomic_dec_return(&port->count) < 0) {
44940 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
44941- info->line, port->count);
44942- port->count = 0;
44943+ info->line, atomic_read(&port->count));
44944+ atomic_set(&port->count, 0);
44945 }
44946- if (port->count) {
44947+ if (atomic_read(&port->count)) {
44948 #ifdef ISDN_DEBUG_MODEM_OPEN
44949 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
44950 #endif
44951@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
44952 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
44953 return;
44954 isdn_tty_shutdown(info);
44955- port->count = 0;
44956+ atomic_set(&port->count, 0);
44957 port->flags &= ~ASYNC_NORMAL_ACTIVE;
44958 port->tty = NULL;
44959 wake_up_interruptible(&port->open_wait);
44960@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
44961 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
44962 modem_info *info = &dev->mdm.info[i];
44963
44964- if (info->port.count == 0)
44965+ if (atomic_read(&info->port.count) == 0)
44966 continue;
44967 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
44968 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
44969diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
44970index e2d4e58..40cd045 100644
44971--- a/drivers/isdn/i4l/isdn_x25iface.c
44972+++ b/drivers/isdn/i4l/isdn_x25iface.c
44973@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
44974
44975
44976 static struct concap_proto_ops ix25_pops = {
44977- &isdn_x25iface_proto_new,
44978- &isdn_x25iface_proto_del,
44979- &isdn_x25iface_proto_restart,
44980- &isdn_x25iface_proto_close,
44981- &isdn_x25iface_xmit,
44982- &isdn_x25iface_receive,
44983- &isdn_x25iface_connect_ind,
44984- &isdn_x25iface_disconn_ind
44985+ .proto_new = &isdn_x25iface_proto_new,
44986+ .proto_del = &isdn_x25iface_proto_del,
44987+ .restart = &isdn_x25iface_proto_restart,
44988+ .close = &isdn_x25iface_proto_close,
44989+ .encap_and_xmit = &isdn_x25iface_xmit,
44990+ .data_ind = &isdn_x25iface_receive,
44991+ .connect_ind = &isdn_x25iface_connect_ind,
44992+ .disconn_ind = &isdn_x25iface_disconn_ind
44993 };
44994
44995 /* error message helper function */
44996diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
44997index 358a574..b4987ea 100644
44998--- a/drivers/isdn/icn/icn.c
44999+++ b/drivers/isdn/icn/icn.c
45000@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
45001 if (count > len)
45002 count = len;
45003 if (user) {
45004- if (copy_from_user(msg, buf, count))
45005+ if (count > sizeof msg || copy_from_user(msg, buf, count))
45006 return -EFAULT;
45007 } else
45008 memcpy(msg, buf, count);
45009diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
45010index 87f7dff..7300125 100644
45011--- a/drivers/isdn/mISDN/dsp_cmx.c
45012+++ b/drivers/isdn/mISDN/dsp_cmx.c
45013@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
45014 static u16 dsp_count; /* last sample count */
45015 static int dsp_count_valid; /* if we have last sample count */
45016
45017-void
45018+void __intentional_overflow(-1)
45019 dsp_cmx_send(void *arg)
45020 {
45021 struct dsp_conf *conf;
45022diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
45023index 0f9ed1e..2715d6f 100644
45024--- a/drivers/leds/leds-clevo-mail.c
45025+++ b/drivers/leds/leds-clevo-mail.c
45026@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
45027 * detected as working, but in reality it is not) as low as
45028 * possible.
45029 */
45030-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
45031+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
45032 {
45033 .callback = clevo_mail_led_dmi_callback,
45034 .ident = "Clevo D410J",
45035diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
45036index 046cb70..6b20d39 100644
45037--- a/drivers/leds/leds-ss4200.c
45038+++ b/drivers/leds/leds-ss4200.c
45039@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
45040 * detected as working, but in reality it is not) as low as
45041 * possible.
45042 */
45043-static struct dmi_system_id nas_led_whitelist[] __initdata = {
45044+static struct dmi_system_id nas_led_whitelist[] __initconst = {
45045 {
45046 .callback = ss4200_led_dmi_callback,
45047 .ident = "Intel SS4200-E",
45048diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
45049index 7dc93aa..8272379 100644
45050--- a/drivers/lguest/core.c
45051+++ b/drivers/lguest/core.c
45052@@ -96,9 +96,17 @@ static __init int map_switcher(void)
45053 * The end address needs +1 because __get_vm_area allocates an
45054 * extra guard page, so we need space for that.
45055 */
45056+
45057+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
45058+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45059+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
45060+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45061+#else
45062 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45063 VM_ALLOC, switcher_addr, switcher_addr
45064 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45065+#endif
45066+
45067 if (!switcher_vma) {
45068 err = -ENOMEM;
45069 printk("lguest: could not map switcher pages high\n");
45070@@ -121,7 +129,7 @@ static __init int map_switcher(void)
45071 * Now the Switcher is mapped at the right address, we can't fail!
45072 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
45073 */
45074- memcpy(switcher_vma->addr, start_switcher_text,
45075+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
45076 end_switcher_text - start_switcher_text);
45077
45078 printk(KERN_INFO "lguest: mapped switcher at %p\n",
45079diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
45080index e3abebc9..6a35328 100644
45081--- a/drivers/lguest/page_tables.c
45082+++ b/drivers/lguest/page_tables.c
45083@@ -585,7 +585,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
45084 /*:*/
45085
45086 #ifdef CONFIG_X86_PAE
45087-static void release_pmd(pmd_t *spmd)
45088+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
45089 {
45090 /* If the entry's not present, there's nothing to release. */
45091 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
45092diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
45093index 30f2aef..391c748 100644
45094--- a/drivers/lguest/x86/core.c
45095+++ b/drivers/lguest/x86/core.c
45096@@ -60,7 +60,7 @@ static struct {
45097 /* Offset from where switcher.S was compiled to where we've copied it */
45098 static unsigned long switcher_offset(void)
45099 {
45100- return switcher_addr - (unsigned long)start_switcher_text;
45101+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
45102 }
45103
45104 /* This cpu's struct lguest_pages (after the Switcher text page) */
45105@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
45106 * These copies are pretty cheap, so we do them unconditionally: */
45107 /* Save the current Host top-level page directory.
45108 */
45109+
45110+#ifdef CONFIG_PAX_PER_CPU_PGD
45111+ pages->state.host_cr3 = read_cr3();
45112+#else
45113 pages->state.host_cr3 = __pa(current->mm->pgd);
45114+#endif
45115+
45116 /*
45117 * Set up the Guest's page tables to see this CPU's pages (and no
45118 * other CPU's pages).
45119@@ -494,7 +500,7 @@ void __init lguest_arch_host_init(void)
45120 * compiled-in switcher code and the high-mapped copy we just made.
45121 */
45122 for (i = 0; i < IDT_ENTRIES; i++)
45123- default_idt_entries[i] += switcher_offset();
45124+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
45125
45126 /*
45127 * Set up the Switcher's per-cpu areas.
45128@@ -577,7 +583,7 @@ void __init lguest_arch_host_init(void)
45129 * it will be undisturbed when we switch. To change %cs and jump we
45130 * need this structure to feed to Intel's "lcall" instruction.
45131 */
45132- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
45133+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
45134 lguest_entry.segment = LGUEST_CS;
45135
45136 /*
45137diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
45138index 40634b0..4f5855e 100644
45139--- a/drivers/lguest/x86/switcher_32.S
45140+++ b/drivers/lguest/x86/switcher_32.S
45141@@ -87,6 +87,7 @@
45142 #include <asm/page.h>
45143 #include <asm/segment.h>
45144 #include <asm/lguest.h>
45145+#include <asm/processor-flags.h>
45146
45147 // We mark the start of the code to copy
45148 // It's placed in .text tho it's never run here
45149@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
45150 // Changes type when we load it: damn Intel!
45151 // For after we switch over our page tables
45152 // That entry will be read-only: we'd crash.
45153+
45154+#ifdef CONFIG_PAX_KERNEXEC
45155+ mov %cr0, %edx
45156+ xor $X86_CR0_WP, %edx
45157+ mov %edx, %cr0
45158+#endif
45159+
45160 movl $(GDT_ENTRY_TSS*8), %edx
45161 ltr %dx
45162
45163@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
45164 // Let's clear it again for our return.
45165 // The GDT descriptor of the Host
45166 // Points to the table after two "size" bytes
45167- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
45168+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
45169 // Clear "used" from type field (byte 5, bit 2)
45170- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
45171+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
45172+
45173+#ifdef CONFIG_PAX_KERNEXEC
45174+ mov %cr0, %eax
45175+ xor $X86_CR0_WP, %eax
45176+ mov %eax, %cr0
45177+#endif
45178
45179 // Once our page table's switched, the Guest is live!
45180 // The Host fades as we run this final step.
45181@@ -295,13 +309,12 @@ deliver_to_host:
45182 // I consulted gcc, and it gave
45183 // These instructions, which I gladly credit:
45184 leal (%edx,%ebx,8), %eax
45185- movzwl (%eax),%edx
45186- movl 4(%eax), %eax
45187- xorw %ax, %ax
45188- orl %eax, %edx
45189+ movl 4(%eax), %edx
45190+ movw (%eax), %dx
45191 // Now the address of the handler's in %edx
45192 // We call it now: its "iret" drops us home.
45193- jmp *%edx
45194+ ljmp $__KERNEL_CS, $1f
45195+1: jmp *%edx
45196
45197 // Every interrupt can come to us here
45198 // But we must truly tell each apart.
45199diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
45200index a08e3ee..df8ade2 100644
45201--- a/drivers/md/bcache/closure.h
45202+++ b/drivers/md/bcache/closure.h
45203@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
45204 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
45205 struct workqueue_struct *wq)
45206 {
45207- BUG_ON(object_is_on_stack(cl));
45208+ BUG_ON(object_starts_on_stack(cl));
45209 closure_set_ip(cl);
45210 cl->fn = fn;
45211 cl->wq = wq;
45212diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
45213index 3a57679..c58cdaf 100644
45214--- a/drivers/md/bitmap.c
45215+++ b/drivers/md/bitmap.c
45216@@ -1786,7 +1786,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
45217 chunk_kb ? "KB" : "B");
45218 if (bitmap->storage.file) {
45219 seq_printf(seq, ", file: ");
45220- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
45221+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
45222 }
45223
45224 seq_printf(seq, "\n");
45225diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
45226index c8a18e4..0ab43e5 100644
45227--- a/drivers/md/dm-ioctl.c
45228+++ b/drivers/md/dm-ioctl.c
45229@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
45230 cmd == DM_LIST_VERSIONS_CMD)
45231 return 0;
45232
45233- if ((cmd == DM_DEV_CREATE_CMD)) {
45234+ if (cmd == DM_DEV_CREATE_CMD) {
45235 if (!*param->name) {
45236 DMWARN("name not supplied when creating device");
45237 return -EINVAL;
45238diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
45239index 089d627..ef7352e 100644
45240--- a/drivers/md/dm-raid1.c
45241+++ b/drivers/md/dm-raid1.c
45242@@ -40,7 +40,7 @@ enum dm_raid1_error {
45243
45244 struct mirror {
45245 struct mirror_set *ms;
45246- atomic_t error_count;
45247+ atomic_unchecked_t error_count;
45248 unsigned long error_type;
45249 struct dm_dev *dev;
45250 sector_t offset;
45251@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
45252 struct mirror *m;
45253
45254 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
45255- if (!atomic_read(&m->error_count))
45256+ if (!atomic_read_unchecked(&m->error_count))
45257 return m;
45258
45259 return NULL;
45260@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
45261 * simple way to tell if a device has encountered
45262 * errors.
45263 */
45264- atomic_inc(&m->error_count);
45265+ atomic_inc_unchecked(&m->error_count);
45266
45267 if (test_and_set_bit(error_type, &m->error_type))
45268 return;
45269@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
45270 struct mirror *m = get_default_mirror(ms);
45271
45272 do {
45273- if (likely(!atomic_read(&m->error_count)))
45274+ if (likely(!atomic_read_unchecked(&m->error_count)))
45275 return m;
45276
45277 if (m-- == ms->mirror)
45278@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
45279 {
45280 struct mirror *default_mirror = get_default_mirror(m->ms);
45281
45282- return !atomic_read(&default_mirror->error_count);
45283+ return !atomic_read_unchecked(&default_mirror->error_count);
45284 }
45285
45286 static int mirror_available(struct mirror_set *ms, struct bio *bio)
45287@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
45288 */
45289 if (likely(region_in_sync(ms, region, 1)))
45290 m = choose_mirror(ms, bio->bi_iter.bi_sector);
45291- else if (m && atomic_read(&m->error_count))
45292+ else if (m && atomic_read_unchecked(&m->error_count))
45293 m = NULL;
45294
45295 if (likely(m))
45296@@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
45297 }
45298
45299 ms->mirror[mirror].ms = ms;
45300- atomic_set(&(ms->mirror[mirror].error_count), 0);
45301+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
45302 ms->mirror[mirror].error_type = 0;
45303 ms->mirror[mirror].offset = offset;
45304
45305@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
45306 */
45307 static char device_status_char(struct mirror *m)
45308 {
45309- if (!atomic_read(&(m->error_count)))
45310+ if (!atomic_read_unchecked(&(m->error_count)))
45311 return 'A';
45312
45313 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
45314diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
45315index f478a4c..4b8e5ef 100644
45316--- a/drivers/md/dm-stats.c
45317+++ b/drivers/md/dm-stats.c
45318@@ -382,7 +382,7 @@ do_sync_free:
45319 synchronize_rcu_expedited();
45320 dm_stat_free(&s->rcu_head);
45321 } else {
45322- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
45323+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
45324 call_rcu(&s->rcu_head, dm_stat_free);
45325 }
45326 return 0;
45327@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
45328 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
45329 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
45330 ));
45331- ACCESS_ONCE(last->last_sector) = end_sector;
45332- ACCESS_ONCE(last->last_rw) = bi_rw;
45333+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
45334+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
45335 }
45336
45337 rcu_read_lock();
45338diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
45339index f8b37d4..5c5cafd 100644
45340--- a/drivers/md/dm-stripe.c
45341+++ b/drivers/md/dm-stripe.c
45342@@ -21,7 +21,7 @@ struct stripe {
45343 struct dm_dev *dev;
45344 sector_t physical_start;
45345
45346- atomic_t error_count;
45347+ atomic_unchecked_t error_count;
45348 };
45349
45350 struct stripe_c {
45351@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
45352 kfree(sc);
45353 return r;
45354 }
45355- atomic_set(&(sc->stripe[i].error_count), 0);
45356+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
45357 }
45358
45359 ti->private = sc;
45360@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
45361 DMEMIT("%d ", sc->stripes);
45362 for (i = 0; i < sc->stripes; i++) {
45363 DMEMIT("%s ", sc->stripe[i].dev->name);
45364- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
45365+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
45366 'D' : 'A';
45367 }
45368 buffer[i] = '\0';
45369@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
45370 */
45371 for (i = 0; i < sc->stripes; i++)
45372 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
45373- atomic_inc(&(sc->stripe[i].error_count));
45374- if (atomic_read(&(sc->stripe[i].error_count)) <
45375+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
45376+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
45377 DM_IO_ERROR_THRESHOLD)
45378 schedule_work(&sc->trigger_event);
45379 }
45380diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
45381index 6554d91..b0221c2 100644
45382--- a/drivers/md/dm-table.c
45383+++ b/drivers/md/dm-table.c
45384@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
45385 if (!dev_size)
45386 return 0;
45387
45388- if ((start >= dev_size) || (start + len > dev_size)) {
45389+ if ((start >= dev_size) || (len > dev_size - start)) {
45390 DMWARN("%s: %s too small for target: "
45391 "start=%llu, len=%llu, dev_size=%llu",
45392 dm_device_name(ti->table->md), bdevname(bdev, b),
45393diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
45394index 79f6941..b33b4e0 100644
45395--- a/drivers/md/dm-thin-metadata.c
45396+++ b/drivers/md/dm-thin-metadata.c
45397@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45398 {
45399 pmd->info.tm = pmd->tm;
45400 pmd->info.levels = 2;
45401- pmd->info.value_type.context = pmd->data_sm;
45402+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45403 pmd->info.value_type.size = sizeof(__le64);
45404 pmd->info.value_type.inc = data_block_inc;
45405 pmd->info.value_type.dec = data_block_dec;
45406@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45407
45408 pmd->bl_info.tm = pmd->tm;
45409 pmd->bl_info.levels = 1;
45410- pmd->bl_info.value_type.context = pmd->data_sm;
45411+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45412 pmd->bl_info.value_type.size = sizeof(__le64);
45413 pmd->bl_info.value_type.inc = data_block_inc;
45414 pmd->bl_info.value_type.dec = data_block_dec;
45415diff --git a/drivers/md/dm.c b/drivers/md/dm.c
45416index 8001fe9..abdd0d0 100644
45417--- a/drivers/md/dm.c
45418+++ b/drivers/md/dm.c
45419@@ -188,9 +188,9 @@ struct mapped_device {
45420 /*
45421 * Event handling.
45422 */
45423- atomic_t event_nr;
45424+ atomic_unchecked_t event_nr;
45425 wait_queue_head_t eventq;
45426- atomic_t uevent_seq;
45427+ atomic_unchecked_t uevent_seq;
45428 struct list_head uevent_list;
45429 spinlock_t uevent_lock; /* Protect access to uevent_list */
45430
45431@@ -2163,8 +2163,8 @@ static struct mapped_device *alloc_dev(int minor)
45432 spin_lock_init(&md->deferred_lock);
45433 atomic_set(&md->holders, 1);
45434 atomic_set(&md->open_count, 0);
45435- atomic_set(&md->event_nr, 0);
45436- atomic_set(&md->uevent_seq, 0);
45437+ atomic_set_unchecked(&md->event_nr, 0);
45438+ atomic_set_unchecked(&md->uevent_seq, 0);
45439 INIT_LIST_HEAD(&md->uevent_list);
45440 INIT_LIST_HEAD(&md->table_devices);
45441 spin_lock_init(&md->uevent_lock);
45442@@ -2329,7 +2329,7 @@ static void event_callback(void *context)
45443
45444 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
45445
45446- atomic_inc(&md->event_nr);
45447+ atomic_inc_unchecked(&md->event_nr);
45448 wake_up(&md->eventq);
45449 }
45450
45451@@ -3175,18 +3175,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
45452
45453 uint32_t dm_next_uevent_seq(struct mapped_device *md)
45454 {
45455- return atomic_add_return(1, &md->uevent_seq);
45456+ return atomic_add_return_unchecked(1, &md->uevent_seq);
45457 }
45458
45459 uint32_t dm_get_event_nr(struct mapped_device *md)
45460 {
45461- return atomic_read(&md->event_nr);
45462+ return atomic_read_unchecked(&md->event_nr);
45463 }
45464
45465 int dm_wait_event(struct mapped_device *md, int event_nr)
45466 {
45467 return wait_event_interruptible(md->eventq,
45468- (event_nr != atomic_read(&md->event_nr)));
45469+ (event_nr != atomic_read_unchecked(&md->event_nr)));
45470 }
45471
45472 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
45473diff --git a/drivers/md/md.c b/drivers/md/md.c
45474index e47d1dd..ebc3480 100644
45475--- a/drivers/md/md.c
45476+++ b/drivers/md/md.c
45477@@ -191,10 +191,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
45478 * start build, activate spare
45479 */
45480 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
45481-static atomic_t md_event_count;
45482+static atomic_unchecked_t md_event_count;
45483 void md_new_event(struct mddev *mddev)
45484 {
45485- atomic_inc(&md_event_count);
45486+ atomic_inc_unchecked(&md_event_count);
45487 wake_up(&md_event_waiters);
45488 }
45489 EXPORT_SYMBOL_GPL(md_new_event);
45490@@ -204,7 +204,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
45491 */
45492 static void md_new_event_inintr(struct mddev *mddev)
45493 {
45494- atomic_inc(&md_event_count);
45495+ atomic_inc_unchecked(&md_event_count);
45496 wake_up(&md_event_waiters);
45497 }
45498
45499@@ -1442,7 +1442,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
45500 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
45501 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
45502 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
45503- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45504+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45505
45506 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
45507 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
45508@@ -1693,7 +1693,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
45509 else
45510 sb->resync_offset = cpu_to_le64(0);
45511
45512- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
45513+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
45514
45515 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
45516 sb->size = cpu_to_le64(mddev->dev_sectors);
45517@@ -2564,7 +2564,7 @@ __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
45518 static ssize_t
45519 errors_show(struct md_rdev *rdev, char *page)
45520 {
45521- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
45522+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
45523 }
45524
45525 static ssize_t
45526@@ -2573,7 +2573,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
45527 char *e;
45528 unsigned long n = simple_strtoul(buf, &e, 10);
45529 if (*buf && (*e == 0 || *e == '\n')) {
45530- atomic_set(&rdev->corrected_errors, n);
45531+ atomic_set_unchecked(&rdev->corrected_errors, n);
45532 return len;
45533 }
45534 return -EINVAL;
45535@@ -3009,8 +3009,8 @@ int md_rdev_init(struct md_rdev *rdev)
45536 rdev->sb_loaded = 0;
45537 rdev->bb_page = NULL;
45538 atomic_set(&rdev->nr_pending, 0);
45539- atomic_set(&rdev->read_errors, 0);
45540- atomic_set(&rdev->corrected_errors, 0);
45541+ atomic_set_unchecked(&rdev->read_errors, 0);
45542+ atomic_set_unchecked(&rdev->corrected_errors, 0);
45543
45544 INIT_LIST_HEAD(&rdev->same_set);
45545 init_waitqueue_head(&rdev->blocked_wait);
45546@@ -7083,7 +7083,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
45547
45548 spin_unlock(&pers_lock);
45549 seq_printf(seq, "\n");
45550- seq->poll_event = atomic_read(&md_event_count);
45551+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45552 return 0;
45553 }
45554 if (v == (void*)2) {
45555@@ -7186,7 +7186,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
45556 return error;
45557
45558 seq = file->private_data;
45559- seq->poll_event = atomic_read(&md_event_count);
45560+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45561 return error;
45562 }
45563
45564@@ -7203,7 +7203,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
45565 /* always allow read */
45566 mask = POLLIN | POLLRDNORM;
45567
45568- if (seq->poll_event != atomic_read(&md_event_count))
45569+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
45570 mask |= POLLERR | POLLPRI;
45571 return mask;
45572 }
45573@@ -7250,7 +7250,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
45574 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
45575 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
45576 (int)part_stat_read(&disk->part0, sectors[1]) -
45577- atomic_read(&disk->sync_io);
45578+ atomic_read_unchecked(&disk->sync_io);
45579 /* sync IO will cause sync_io to increase before the disk_stats
45580 * as sync_io is counted when a request starts, and
45581 * disk_stats is counted when it completes.
45582diff --git a/drivers/md/md.h b/drivers/md/md.h
45583index 318ca8f..31e4478 100644
45584--- a/drivers/md/md.h
45585+++ b/drivers/md/md.h
45586@@ -94,13 +94,13 @@ struct md_rdev {
45587 * only maintained for arrays that
45588 * support hot removal
45589 */
45590- atomic_t read_errors; /* number of consecutive read errors that
45591+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
45592 * we have tried to ignore.
45593 */
45594 struct timespec last_read_error; /* monotonic time since our
45595 * last read error
45596 */
45597- atomic_t corrected_errors; /* number of corrected read errors,
45598+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
45599 * for reporting to userspace and storing
45600 * in superblock.
45601 */
45602@@ -476,7 +476,7 @@ extern void mddev_unlock(struct mddev *mddev);
45603
45604 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
45605 {
45606- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45607+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45608 }
45609
45610 struct md_personality
45611diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
45612index e8a9042..35bd145 100644
45613--- a/drivers/md/persistent-data/dm-space-map-metadata.c
45614+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
45615@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
45616 * Flick into a mode where all blocks get allocated in the new area.
45617 */
45618 smm->begin = old_len;
45619- memcpy(sm, &bootstrap_ops, sizeof(*sm));
45620+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
45621
45622 /*
45623 * Extend.
45624@@ -714,7 +714,7 @@ out:
45625 /*
45626 * Switch back to normal behaviour.
45627 */
45628- memcpy(sm, &ops, sizeof(*sm));
45629+ memcpy((void *)sm, &ops, sizeof(*sm));
45630 return r;
45631 }
45632
45633diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
45634index 3e6d115..ffecdeb 100644
45635--- a/drivers/md/persistent-data/dm-space-map.h
45636+++ b/drivers/md/persistent-data/dm-space-map.h
45637@@ -71,6 +71,7 @@ struct dm_space_map {
45638 dm_sm_threshold_fn fn,
45639 void *context);
45640 };
45641+typedef struct dm_space_map __no_const dm_space_map_no_const;
45642
45643 /*----------------------------------------------------------------*/
45644
45645diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
45646index d34e238..34f8d98 100644
45647--- a/drivers/md/raid1.c
45648+++ b/drivers/md/raid1.c
45649@@ -1922,7 +1922,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
45650 if (r1_sync_page_io(rdev, sect, s,
45651 bio->bi_io_vec[idx].bv_page,
45652 READ) != 0)
45653- atomic_add(s, &rdev->corrected_errors);
45654+ atomic_add_unchecked(s, &rdev->corrected_errors);
45655 }
45656 sectors -= s;
45657 sect += s;
45658@@ -2155,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
45659 !test_bit(Faulty, &rdev->flags)) {
45660 if (r1_sync_page_io(rdev, sect, s,
45661 conf->tmppage, READ)) {
45662- atomic_add(s, &rdev->corrected_errors);
45663+ atomic_add_unchecked(s, &rdev->corrected_errors);
45664 printk(KERN_INFO
45665 "md/raid1:%s: read error corrected "
45666 "(%d sectors at %llu on %s)\n",
45667diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
45668index a7196c4..439f012 100644
45669--- a/drivers/md/raid10.c
45670+++ b/drivers/md/raid10.c
45671@@ -1934,7 +1934,7 @@ static void end_sync_read(struct bio *bio, int error)
45672 /* The write handler will notice the lack of
45673 * R10BIO_Uptodate and record any errors etc
45674 */
45675- atomic_add(r10_bio->sectors,
45676+ atomic_add_unchecked(r10_bio->sectors,
45677 &conf->mirrors[d].rdev->corrected_errors);
45678
45679 /* for reconstruct, we always reschedule after a read.
45680@@ -2291,7 +2291,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45681 {
45682 struct timespec cur_time_mon;
45683 unsigned long hours_since_last;
45684- unsigned int read_errors = atomic_read(&rdev->read_errors);
45685+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
45686
45687 ktime_get_ts(&cur_time_mon);
45688
45689@@ -2313,9 +2313,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45690 * overflowing the shift of read_errors by hours_since_last.
45691 */
45692 if (hours_since_last >= 8 * sizeof(read_errors))
45693- atomic_set(&rdev->read_errors, 0);
45694+ atomic_set_unchecked(&rdev->read_errors, 0);
45695 else
45696- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
45697+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
45698 }
45699
45700 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
45701@@ -2369,8 +2369,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45702 return;
45703
45704 check_decay_read_errors(mddev, rdev);
45705- atomic_inc(&rdev->read_errors);
45706- if (atomic_read(&rdev->read_errors) > max_read_errors) {
45707+ atomic_inc_unchecked(&rdev->read_errors);
45708+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
45709 char b[BDEVNAME_SIZE];
45710 bdevname(rdev->bdev, b);
45711
45712@@ -2378,7 +2378,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45713 "md/raid10:%s: %s: Raid device exceeded "
45714 "read_error threshold [cur %d:max %d]\n",
45715 mdname(mddev), b,
45716- atomic_read(&rdev->read_errors), max_read_errors);
45717+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
45718 printk(KERN_NOTICE
45719 "md/raid10:%s: %s: Failing raid device\n",
45720 mdname(mddev), b);
45721@@ -2533,7 +2533,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45722 sect +
45723 choose_data_offset(r10_bio, rdev)),
45724 bdevname(rdev->bdev, b));
45725- atomic_add(s, &rdev->corrected_errors);
45726+ atomic_add_unchecked(s, &rdev->corrected_errors);
45727 }
45728
45729 rdev_dec_pending(rdev, mddev);
45730diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
45731index cd2f96b..3876e63 100644
45732--- a/drivers/md/raid5.c
45733+++ b/drivers/md/raid5.c
45734@@ -947,23 +947,23 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
45735 struct bio_vec bvl;
45736 struct bvec_iter iter;
45737 struct page *bio_page;
45738- int page_offset;
45739+ s64 page_offset;
45740 struct async_submit_ctl submit;
45741 enum async_tx_flags flags = 0;
45742
45743 if (bio->bi_iter.bi_sector >= sector)
45744- page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
45745+ page_offset = (s64)(bio->bi_iter.bi_sector - sector) * 512;
45746 else
45747- page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
45748+ page_offset = (s64)(sector - bio->bi_iter.bi_sector) * -512;
45749
45750 if (frombio)
45751 flags |= ASYNC_TX_FENCE;
45752 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
45753
45754 bio_for_each_segment(bvl, bio, iter) {
45755- int len = bvl.bv_len;
45756- int clen;
45757- int b_offset = 0;
45758+ s64 len = bvl.bv_len;
45759+ s64 clen;
45760+ s64 b_offset = 0;
45761
45762 if (page_offset < 0) {
45763 b_offset = -page_offset;
45764@@ -1727,6 +1727,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
45765 return 1;
45766 }
45767
45768+#ifdef CONFIG_GRKERNSEC_HIDESYM
45769+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
45770+#endif
45771+
45772 static int grow_stripes(struct r5conf *conf, int num)
45773 {
45774 struct kmem_cache *sc;
45775@@ -1738,7 +1742,11 @@ static int grow_stripes(struct r5conf *conf, int num)
45776 "raid%d-%s", conf->level, mdname(conf->mddev));
45777 else
45778 sprintf(conf->cache_name[0],
45779+#ifdef CONFIG_GRKERNSEC_HIDESYM
45780+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
45781+#else
45782 "raid%d-%p", conf->level, conf->mddev);
45783+#endif
45784 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
45785
45786 conf->active_name = 0;
45787@@ -2014,21 +2022,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
45788 mdname(conf->mddev), STRIPE_SECTORS,
45789 (unsigned long long)s,
45790 bdevname(rdev->bdev, b));
45791- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
45792+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
45793 clear_bit(R5_ReadError, &sh->dev[i].flags);
45794 clear_bit(R5_ReWrite, &sh->dev[i].flags);
45795 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
45796 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
45797
45798- if (atomic_read(&rdev->read_errors))
45799- atomic_set(&rdev->read_errors, 0);
45800+ if (atomic_read_unchecked(&rdev->read_errors))
45801+ atomic_set_unchecked(&rdev->read_errors, 0);
45802 } else {
45803 const char *bdn = bdevname(rdev->bdev, b);
45804 int retry = 0;
45805 int set_bad = 0;
45806
45807 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
45808- atomic_inc(&rdev->read_errors);
45809+ atomic_inc_unchecked(&rdev->read_errors);
45810 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
45811 printk_ratelimited(
45812 KERN_WARNING
45813@@ -2056,7 +2064,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
45814 mdname(conf->mddev),
45815 (unsigned long long)s,
45816 bdn);
45817- } else if (atomic_read(&rdev->read_errors)
45818+ } else if (atomic_read_unchecked(&rdev->read_errors)
45819 > conf->max_nr_stripes)
45820 printk(KERN_WARNING
45821 "md/raid:%s: Too many read errors, failing device %s.\n",
45822diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
45823index 983db75..ef9248c 100644
45824--- a/drivers/media/dvb-core/dvbdev.c
45825+++ b/drivers/media/dvb-core/dvbdev.c
45826@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
45827 const struct dvb_device *template, void *priv, int type)
45828 {
45829 struct dvb_device *dvbdev;
45830- struct file_operations *dvbdevfops;
45831+ file_operations_no_const *dvbdevfops;
45832 struct device *clsdev;
45833 int minor;
45834 int id;
45835diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
45836index 6ad22b6..6e90e2a 100644
45837--- a/drivers/media/dvb-frontends/af9033.h
45838+++ b/drivers/media/dvb-frontends/af9033.h
45839@@ -96,6 +96,6 @@ struct af9033_ops {
45840 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
45841 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
45842 int onoff);
45843-};
45844+} __no_const;
45845
45846 #endif /* AF9033_H */
45847diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
45848index 9b6c3bb..baeb5c7 100644
45849--- a/drivers/media/dvb-frontends/dib3000.h
45850+++ b/drivers/media/dvb-frontends/dib3000.h
45851@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
45852 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
45853 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
45854 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
45855-};
45856+} __no_const;
45857
45858 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
45859 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
45860diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
45861index 1fea0e9..321ce8f 100644
45862--- a/drivers/media/dvb-frontends/dib7000p.h
45863+++ b/drivers/media/dvb-frontends/dib7000p.h
45864@@ -64,7 +64,7 @@ struct dib7000p_ops {
45865 int (*get_adc_power)(struct dvb_frontend *fe);
45866 int (*slave_reset)(struct dvb_frontend *fe);
45867 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
45868-};
45869+} __no_const;
45870
45871 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
45872 void *dib7000p_attach(struct dib7000p_ops *ops);
45873diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
45874index 84cc103..5780c54 100644
45875--- a/drivers/media/dvb-frontends/dib8000.h
45876+++ b/drivers/media/dvb-frontends/dib8000.h
45877@@ -61,7 +61,7 @@ struct dib8000_ops {
45878 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
45879 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
45880 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
45881-};
45882+} __no_const;
45883
45884 #if IS_ENABLED(CONFIG_DVB_DIB8000)
45885 void *dib8000_attach(struct dib8000_ops *ops);
45886diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
45887index 860c98fc..497fa25 100644
45888--- a/drivers/media/pci/cx88/cx88-video.c
45889+++ b/drivers/media/pci/cx88/cx88-video.c
45890@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
45891
45892 /* ------------------------------------------------------------------ */
45893
45894-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45895-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45896-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45897+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45898+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45899+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45900
45901 module_param_array(video_nr, int, NULL, 0444);
45902 module_param_array(vbi_nr, int, NULL, 0444);
45903diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
45904index 802642d..5534900 100644
45905--- a/drivers/media/pci/ivtv/ivtv-driver.c
45906+++ b/drivers/media/pci/ivtv/ivtv-driver.c
45907@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
45908 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
45909
45910 /* ivtv instance counter */
45911-static atomic_t ivtv_instance = ATOMIC_INIT(0);
45912+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
45913
45914 /* Parameter declarations */
45915 static int cardtype[IVTV_MAX_CARDS];
45916diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
45917index 570d119..ed25830 100644
45918--- a/drivers/media/pci/solo6x10/solo6x10-core.c
45919+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
45920@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
45921
45922 static int solo_sysfs_init(struct solo_dev *solo_dev)
45923 {
45924- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
45925+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
45926 struct device *dev = &solo_dev->dev;
45927 const char *driver;
45928 int i;
45929diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
45930index 7ddc767..1c24361 100644
45931--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
45932+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
45933@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
45934
45935 int solo_g723_init(struct solo_dev *solo_dev)
45936 {
45937- static struct snd_device_ops ops = { NULL };
45938+ static struct snd_device_ops ops = { };
45939 struct snd_card *card;
45940 struct snd_kcontrol_new kctl;
45941 char name[32];
45942diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
45943index 8c84846..27b4f83 100644
45944--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
45945+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
45946@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
45947
45948 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
45949 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
45950- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
45951+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
45952 if (p2m_id < 0)
45953 p2m_id = -p2m_id;
45954 }
45955diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
45956index 1ca54b0..7d7cb9a 100644
45957--- a/drivers/media/pci/solo6x10/solo6x10.h
45958+++ b/drivers/media/pci/solo6x10/solo6x10.h
45959@@ -218,7 +218,7 @@ struct solo_dev {
45960
45961 /* P2M DMA Engine */
45962 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
45963- atomic_t p2m_count;
45964+ atomic_unchecked_t p2m_count;
45965 int p2m_jiffies;
45966 unsigned int p2m_timeouts;
45967
45968diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
45969index c135165..dc69499 100644
45970--- a/drivers/media/pci/tw68/tw68-core.c
45971+++ b/drivers/media/pci/tw68/tw68-core.c
45972@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
45973 module_param_array(card, int, NULL, 0444);
45974 MODULE_PARM_DESC(card, "card type");
45975
45976-static atomic_t tw68_instance = ATOMIC_INIT(0);
45977+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
45978
45979 /* ------------------------------------------------------------------ */
45980
45981diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
45982index ba2d8f9..1566684 100644
45983--- a/drivers/media/platform/omap/omap_vout.c
45984+++ b/drivers/media/platform/omap/omap_vout.c
45985@@ -63,7 +63,6 @@ enum omap_vout_channels {
45986 OMAP_VIDEO2,
45987 };
45988
45989-static struct videobuf_queue_ops video_vbq_ops;
45990 /* Variables configurable through module params*/
45991 static u32 video1_numbuffers = 3;
45992 static u32 video2_numbuffers = 3;
45993@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
45994 {
45995 struct videobuf_queue *q;
45996 struct omap_vout_device *vout = NULL;
45997+ static struct videobuf_queue_ops video_vbq_ops = {
45998+ .buf_setup = omap_vout_buffer_setup,
45999+ .buf_prepare = omap_vout_buffer_prepare,
46000+ .buf_release = omap_vout_buffer_release,
46001+ .buf_queue = omap_vout_buffer_queue,
46002+ };
46003
46004 vout = video_drvdata(file);
46005 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
46006@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
46007 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
46008
46009 q = &vout->vbq;
46010- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
46011- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
46012- video_vbq_ops.buf_release = omap_vout_buffer_release;
46013- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
46014 spin_lock_init(&vout->vbq_lock);
46015
46016 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
46017diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
46018index fb2acc5..a2fcbdc4 100644
46019--- a/drivers/media/platform/s5p-tv/mixer.h
46020+++ b/drivers/media/platform/s5p-tv/mixer.h
46021@@ -156,7 +156,7 @@ struct mxr_layer {
46022 /** layer index (unique identifier) */
46023 int idx;
46024 /** callbacks for layer methods */
46025- struct mxr_layer_ops ops;
46026+ struct mxr_layer_ops *ops;
46027 /** format array */
46028 const struct mxr_format **fmt_array;
46029 /** size of format array */
46030diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46031index 74344c7..a39e70e 100644
46032--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46033+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46034@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
46035 {
46036 struct mxr_layer *layer;
46037 int ret;
46038- struct mxr_layer_ops ops = {
46039+ static struct mxr_layer_ops ops = {
46040 .release = mxr_graph_layer_release,
46041 .buffer_set = mxr_graph_buffer_set,
46042 .stream_set = mxr_graph_stream_set,
46043diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
46044index b713403..53cb5ad 100644
46045--- a/drivers/media/platform/s5p-tv/mixer_reg.c
46046+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
46047@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
46048 layer->update_buf = next;
46049 }
46050
46051- layer->ops.buffer_set(layer, layer->update_buf);
46052+ layer->ops->buffer_set(layer, layer->update_buf);
46053
46054 if (done && done != layer->shadow_buf)
46055 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
46056diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
46057index 72d4f2e..4b2ea0d 100644
46058--- a/drivers/media/platform/s5p-tv/mixer_video.c
46059+++ b/drivers/media/platform/s5p-tv/mixer_video.c
46060@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
46061 layer->geo.src.height = layer->geo.src.full_height;
46062
46063 mxr_geometry_dump(mdev, &layer->geo);
46064- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46065+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46066 mxr_geometry_dump(mdev, &layer->geo);
46067 }
46068
46069@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
46070 layer->geo.dst.full_width = mbus_fmt.width;
46071 layer->geo.dst.full_height = mbus_fmt.height;
46072 layer->geo.dst.field = mbus_fmt.field;
46073- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46074+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46075
46076 mxr_geometry_dump(mdev, &layer->geo);
46077 }
46078@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
46079 /* set source size to highest accepted value */
46080 geo->src.full_width = max(geo->dst.full_width, pix->width);
46081 geo->src.full_height = max(geo->dst.full_height, pix->height);
46082- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46083+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46084 mxr_geometry_dump(mdev, &layer->geo);
46085 /* set cropping to total visible screen */
46086 geo->src.width = pix->width;
46087@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
46088 geo->src.x_offset = 0;
46089 geo->src.y_offset = 0;
46090 /* assure consistency of geometry */
46091- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46092+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46093 mxr_geometry_dump(mdev, &layer->geo);
46094 /* set full size to lowest possible value */
46095 geo->src.full_width = 0;
46096 geo->src.full_height = 0;
46097- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46098+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46099 mxr_geometry_dump(mdev, &layer->geo);
46100
46101 /* returning results */
46102@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
46103 target->width = s->r.width;
46104 target->height = s->r.height;
46105
46106- layer->ops.fix_geometry(layer, stage, s->flags);
46107+ layer->ops->fix_geometry(layer, stage, s->flags);
46108
46109 /* retrieve update selection rectangle */
46110 res.left = target->x_offset;
46111@@ -938,13 +938,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
46112 mxr_output_get(mdev);
46113
46114 mxr_layer_update_output(layer);
46115- layer->ops.format_set(layer);
46116+ layer->ops->format_set(layer);
46117 /* enabling layer in hardware */
46118 spin_lock_irqsave(&layer->enq_slock, flags);
46119 layer->state = MXR_LAYER_STREAMING;
46120 spin_unlock_irqrestore(&layer->enq_slock, flags);
46121
46122- layer->ops.stream_set(layer, MXR_ENABLE);
46123+ layer->ops->stream_set(layer, MXR_ENABLE);
46124 mxr_streamer_get(mdev);
46125
46126 return 0;
46127@@ -1014,7 +1014,7 @@ static void stop_streaming(struct vb2_queue *vq)
46128 spin_unlock_irqrestore(&layer->enq_slock, flags);
46129
46130 /* disabling layer in hardware */
46131- layer->ops.stream_set(layer, MXR_DISABLE);
46132+ layer->ops->stream_set(layer, MXR_DISABLE);
46133 /* remove one streamer */
46134 mxr_streamer_put(mdev);
46135 /* allow changes in output configuration */
46136@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
46137
46138 void mxr_layer_release(struct mxr_layer *layer)
46139 {
46140- if (layer->ops.release)
46141- layer->ops.release(layer);
46142+ if (layer->ops->release)
46143+ layer->ops->release(layer);
46144 }
46145
46146 void mxr_base_layer_release(struct mxr_layer *layer)
46147@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
46148
46149 layer->mdev = mdev;
46150 layer->idx = idx;
46151- layer->ops = *ops;
46152+ layer->ops = ops;
46153
46154 spin_lock_init(&layer->enq_slock);
46155 INIT_LIST_HEAD(&layer->enq_list);
46156diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46157index c9388c4..ce71ece 100644
46158--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46159+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46160@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
46161 {
46162 struct mxr_layer *layer;
46163 int ret;
46164- struct mxr_layer_ops ops = {
46165+ static struct mxr_layer_ops ops = {
46166 .release = mxr_vp_layer_release,
46167 .buffer_set = mxr_vp_buffer_set,
46168 .stream_set = mxr_vp_stream_set,
46169diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
46170index 82affae..42833ec 100644
46171--- a/drivers/media/radio/radio-cadet.c
46172+++ b/drivers/media/radio/radio-cadet.c
46173@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46174 unsigned char readbuf[RDS_BUFFER];
46175 int i = 0;
46176
46177+ if (count > RDS_BUFFER)
46178+ return -EFAULT;
46179 mutex_lock(&dev->lock);
46180 if (dev->rdsstat == 0)
46181 cadet_start_rds(dev);
46182@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46183 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
46184 mutex_unlock(&dev->lock);
46185
46186- if (i && copy_to_user(data, readbuf, i))
46187- return -EFAULT;
46188+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
46189+ i = -EFAULT;
46190+
46191 return i;
46192 }
46193
46194diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
46195index 5236035..c622c74 100644
46196--- a/drivers/media/radio/radio-maxiradio.c
46197+++ b/drivers/media/radio/radio-maxiradio.c
46198@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
46199 /* TEA5757 pin mappings */
46200 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
46201
46202-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
46203+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
46204
46205 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
46206 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
46207diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
46208index 050b3bb..79f62b9 100644
46209--- a/drivers/media/radio/radio-shark.c
46210+++ b/drivers/media/radio/radio-shark.c
46211@@ -79,7 +79,7 @@ struct shark_device {
46212 u32 last_val;
46213 };
46214
46215-static atomic_t shark_instance = ATOMIC_INIT(0);
46216+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46217
46218 static void shark_write_val(struct snd_tea575x *tea, u32 val)
46219 {
46220diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
46221index 8654e0d..0608a64 100644
46222--- a/drivers/media/radio/radio-shark2.c
46223+++ b/drivers/media/radio/radio-shark2.c
46224@@ -74,7 +74,7 @@ struct shark_device {
46225 u8 *transfer_buffer;
46226 };
46227
46228-static atomic_t shark_instance = ATOMIC_INIT(0);
46229+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46230
46231 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
46232 {
46233diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
46234index dccf586..d5db411 100644
46235--- a/drivers/media/radio/radio-si476x.c
46236+++ b/drivers/media/radio/radio-si476x.c
46237@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
46238 struct si476x_radio *radio;
46239 struct v4l2_ctrl *ctrl;
46240
46241- static atomic_t instance = ATOMIC_INIT(0);
46242+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
46243
46244 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
46245 if (!radio)
46246diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
46247index 704397f..4d05977 100644
46248--- a/drivers/media/radio/wl128x/fmdrv_common.c
46249+++ b/drivers/media/radio/wl128x/fmdrv_common.c
46250@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
46251 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
46252
46253 /* Radio Nr */
46254-static u32 radio_nr = -1;
46255+static int radio_nr = -1;
46256 module_param(radio_nr, int, 0444);
46257 MODULE_PARM_DESC(radio_nr, "Radio Nr");
46258
46259diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46260index 9fd1527..8927230 100644
46261--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
46262+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46263@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
46264
46265 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
46266 {
46267- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
46268- char result[64];
46269- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
46270- sizeof(result), 0);
46271+ char *buf;
46272+ char *result;
46273+ int retval;
46274+
46275+ buf = kmalloc(2, GFP_KERNEL);
46276+ if (buf == NULL)
46277+ return -ENOMEM;
46278+ result = kmalloc(64, GFP_KERNEL);
46279+ if (result == NULL) {
46280+ kfree(buf);
46281+ return -ENOMEM;
46282+ }
46283+
46284+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
46285+ buf[1] = enable ? 1 : 0;
46286+
46287+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
46288+
46289+ kfree(buf);
46290+ kfree(result);
46291+ return retval;
46292 }
46293
46294 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
46295 {
46296- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
46297- char state[3];
46298- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
46299+ char *buf;
46300+ char *state;
46301+ int retval;
46302+
46303+ buf = kmalloc(2, GFP_KERNEL);
46304+ if (buf == NULL)
46305+ return -ENOMEM;
46306+ state = kmalloc(3, GFP_KERNEL);
46307+ if (state == NULL) {
46308+ kfree(buf);
46309+ return -ENOMEM;
46310+ }
46311+
46312+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
46313+ buf[1] = enable ? 1 : 0;
46314+
46315+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
46316+
46317+ kfree(buf);
46318+ kfree(state);
46319+ return retval;
46320 }
46321
46322 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46323 {
46324- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
46325- char state[3];
46326+ char *query;
46327+ char *state;
46328 int ret;
46329+ query = kmalloc(1, GFP_KERNEL);
46330+ if (query == NULL)
46331+ return -ENOMEM;
46332+ state = kmalloc(3, GFP_KERNEL);
46333+ if (state == NULL) {
46334+ kfree(query);
46335+ return -ENOMEM;
46336+ }
46337+
46338+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
46339
46340 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
46341
46342- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
46343- sizeof(state), 0);
46344+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
46345 if (ret < 0) {
46346 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
46347 "state info\n");
46348@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46349
46350 /* Copy this pointer as we are gonna need it in the release phase */
46351 cinergyt2_usb_device = adap->dev;
46352-
46353+ kfree(query);
46354+ kfree(state);
46355 return 0;
46356 }
46357
46358@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
46359 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46360 {
46361 struct cinergyt2_state *st = d->priv;
46362- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
46363+ u8 *key, *cmd;
46364 int i;
46365
46366+ cmd = kmalloc(1, GFP_KERNEL);
46367+ if (cmd == NULL)
46368+ return -EINVAL;
46369+ key = kzalloc(5, GFP_KERNEL);
46370+ if (key == NULL) {
46371+ kfree(cmd);
46372+ return -EINVAL;
46373+ }
46374+
46375+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
46376+
46377 *state = REMOTE_NO_KEY_PRESSED;
46378
46379- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
46380+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
46381 if (key[4] == 0xff) {
46382 /* key repeat */
46383 st->rc_counter++;
46384@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46385 *event = d->last_event;
46386 deb_rc("repeat key, event %x\n",
46387 *event);
46388- return 0;
46389+ goto out;
46390 }
46391 }
46392 deb_rc("repeated key (non repeatable)\n");
46393 }
46394- return 0;
46395+ goto out;
46396 }
46397
46398 /* hack to pass checksum on the custom field */
46399@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46400
46401 deb_rc("key: %*ph\n", 5, key);
46402 }
46403+out:
46404+ kfree(cmd);
46405+ kfree(key);
46406 return 0;
46407 }
46408
46409diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46410index c890fe4..f9b2ae6 100644
46411--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46412+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46413@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
46414 fe_status_t *status)
46415 {
46416 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46417- struct dvbt_get_status_msg result;
46418- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46419+ struct dvbt_get_status_msg *result;
46420+ u8 *cmd;
46421 int ret;
46422
46423- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
46424- sizeof(result), 0);
46425+ cmd = kmalloc(1, GFP_KERNEL);
46426+ if (cmd == NULL)
46427+ return -ENOMEM;
46428+ result = kmalloc(sizeof(*result), GFP_KERNEL);
46429+ if (result == NULL) {
46430+ kfree(cmd);
46431+ return -ENOMEM;
46432+ }
46433+
46434+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46435+
46436+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
46437+ sizeof(*result), 0);
46438 if (ret < 0)
46439- return ret;
46440+ goto out;
46441
46442 *status = 0;
46443
46444- if (0xffff - le16_to_cpu(result.gain) > 30)
46445+ if (0xffff - le16_to_cpu(result->gain) > 30)
46446 *status |= FE_HAS_SIGNAL;
46447- if (result.lock_bits & (1 << 6))
46448+ if (result->lock_bits & (1 << 6))
46449 *status |= FE_HAS_LOCK;
46450- if (result.lock_bits & (1 << 5))
46451+ if (result->lock_bits & (1 << 5))
46452 *status |= FE_HAS_SYNC;
46453- if (result.lock_bits & (1 << 4))
46454+ if (result->lock_bits & (1 << 4))
46455 *status |= FE_HAS_CARRIER;
46456- if (result.lock_bits & (1 << 1))
46457+ if (result->lock_bits & (1 << 1))
46458 *status |= FE_HAS_VITERBI;
46459
46460 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
46461 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
46462 *status &= ~FE_HAS_LOCK;
46463
46464- return 0;
46465+out:
46466+ kfree(cmd);
46467+ kfree(result);
46468+ return ret;
46469 }
46470
46471 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
46472 {
46473 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46474- struct dvbt_get_status_msg status;
46475- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46476+ struct dvbt_get_status_msg *status;
46477+ char *cmd;
46478 int ret;
46479
46480- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46481- sizeof(status), 0);
46482+ cmd = kmalloc(1, GFP_KERNEL);
46483+ if (cmd == NULL)
46484+ return -ENOMEM;
46485+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46486+ if (status == NULL) {
46487+ kfree(cmd);
46488+ return -ENOMEM;
46489+ }
46490+
46491+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46492+
46493+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46494+ sizeof(*status), 0);
46495 if (ret < 0)
46496- return ret;
46497+ goto out;
46498
46499- *ber = le32_to_cpu(status.viterbi_error_rate);
46500+ *ber = le32_to_cpu(status->viterbi_error_rate);
46501+out:
46502+ kfree(cmd);
46503+ kfree(status);
46504 return 0;
46505 }
46506
46507 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
46508 {
46509 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46510- struct dvbt_get_status_msg status;
46511- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46512+ struct dvbt_get_status_msg *status;
46513+ u8 *cmd;
46514 int ret;
46515
46516- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
46517- sizeof(status), 0);
46518+ cmd = kmalloc(1, GFP_KERNEL);
46519+ if (cmd == NULL)
46520+ return -ENOMEM;
46521+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46522+ if (status == NULL) {
46523+ kfree(cmd);
46524+ return -ENOMEM;
46525+ }
46526+
46527+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46528+
46529+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
46530+ sizeof(*status), 0);
46531 if (ret < 0) {
46532 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
46533 ret);
46534- return ret;
46535+ goto out;
46536 }
46537- *unc = le32_to_cpu(status.uncorrected_block_count);
46538- return 0;
46539+ *unc = le32_to_cpu(status->uncorrected_block_count);
46540+
46541+out:
46542+ kfree(cmd);
46543+ kfree(status);
46544+ return ret;
46545 }
46546
46547 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
46548 u16 *strength)
46549 {
46550 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46551- struct dvbt_get_status_msg status;
46552- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46553+ struct dvbt_get_status_msg *status;
46554+ char *cmd;
46555 int ret;
46556
46557- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46558- sizeof(status), 0);
46559+ cmd = kmalloc(1, GFP_KERNEL);
46560+ if (cmd == NULL)
46561+ return -ENOMEM;
46562+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46563+ if (status == NULL) {
46564+ kfree(cmd);
46565+ return -ENOMEM;
46566+ }
46567+
46568+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46569+
46570+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46571+ sizeof(*status), 0);
46572 if (ret < 0) {
46573 err("cinergyt2_fe_read_signal_strength() Failed!"
46574 " (Error=%d)\n", ret);
46575- return ret;
46576+ goto out;
46577 }
46578- *strength = (0xffff - le16_to_cpu(status.gain));
46579+ *strength = (0xffff - le16_to_cpu(status->gain));
46580+
46581+out:
46582+ kfree(cmd);
46583+ kfree(status);
46584 return 0;
46585 }
46586
46587 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
46588 {
46589 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46590- struct dvbt_get_status_msg status;
46591- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46592+ struct dvbt_get_status_msg *status;
46593+ char *cmd;
46594 int ret;
46595
46596- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46597- sizeof(status), 0);
46598+ cmd = kmalloc(1, GFP_KERNEL);
46599+ if (cmd == NULL)
46600+ return -ENOMEM;
46601+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46602+ if (status == NULL) {
46603+ kfree(cmd);
46604+ return -ENOMEM;
46605+ }
46606+
46607+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46608+
46609+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46610+ sizeof(*status), 0);
46611 if (ret < 0) {
46612 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
46613- return ret;
46614+ goto out;
46615 }
46616- *snr = (status.snr << 8) | status.snr;
46617- return 0;
46618+ *snr = (status->snr << 8) | status->snr;
46619+
46620+out:
46621+ kfree(cmd);
46622+ kfree(status);
46623+ return ret;
46624 }
46625
46626 static int cinergyt2_fe_init(struct dvb_frontend *fe)
46627@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
46628 {
46629 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
46630 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46631- struct dvbt_set_parameters_msg param;
46632- char result[2];
46633+ struct dvbt_set_parameters_msg *param;
46634+ char *result;
46635 int err;
46636
46637- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46638- param.tps = cpu_to_le16(compute_tps(fep));
46639- param.freq = cpu_to_le32(fep->frequency / 1000);
46640- param.flags = 0;
46641+ result = kmalloc(2, GFP_KERNEL);
46642+ if (result == NULL)
46643+ return -ENOMEM;
46644+ param = kmalloc(sizeof(*param), GFP_KERNEL);
46645+ if (param == NULL) {
46646+ kfree(result);
46647+ return -ENOMEM;
46648+ }
46649+
46650+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46651+ param->tps = cpu_to_le16(compute_tps(fep));
46652+ param->freq = cpu_to_le32(fep->frequency / 1000);
46653+ param->flags = 0;
46654
46655 switch (fep->bandwidth_hz) {
46656 default:
46657 case 8000000:
46658- param.bandwidth = 8;
46659+ param->bandwidth = 8;
46660 break;
46661 case 7000000:
46662- param.bandwidth = 7;
46663+ param->bandwidth = 7;
46664 break;
46665 case 6000000:
46666- param.bandwidth = 6;
46667+ param->bandwidth = 6;
46668 break;
46669 }
46670
46671 err = dvb_usb_generic_rw(state->d,
46672- (char *)&param, sizeof(param),
46673- result, sizeof(result), 0);
46674+ (char *)param, sizeof(*param),
46675+ result, 2, 0);
46676 if (err < 0)
46677 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
46678
46679- return (err < 0) ? err : 0;
46680+ kfree(result);
46681+ kfree(param);
46682+ return err;
46683 }
46684
46685 static void cinergyt2_fe_release(struct dvb_frontend *fe)
46686diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46687index 733a7ff..f8b52e3 100644
46688--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46689+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46690@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
46691
46692 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
46693 {
46694- struct hexline hx;
46695- u8 reset;
46696+ struct hexline *hx;
46697+ u8 *reset;
46698 int ret,pos=0;
46699
46700+ reset = kmalloc(1, GFP_KERNEL);
46701+ if (reset == NULL)
46702+ return -ENOMEM;
46703+
46704+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
46705+ if (hx == NULL) {
46706+ kfree(reset);
46707+ return -ENOMEM;
46708+ }
46709+
46710 /* stop the CPU */
46711- reset = 1;
46712- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
46713+ reset[0] = 1;
46714+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
46715 err("could not stop the USB controller CPU.");
46716
46717- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
46718- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
46719- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
46720+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
46721+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
46722+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
46723
46724- if (ret != hx.len) {
46725+ if (ret != hx->len) {
46726 err("error while transferring firmware "
46727 "(transferred size: %d, block size: %d)",
46728- ret,hx.len);
46729+ ret,hx->len);
46730 ret = -EINVAL;
46731 break;
46732 }
46733 }
46734 if (ret < 0) {
46735 err("firmware download failed at %d with %d",pos,ret);
46736+ kfree(reset);
46737+ kfree(hx);
46738 return ret;
46739 }
46740
46741 if (ret == 0) {
46742 /* restart the CPU */
46743- reset = 0;
46744- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
46745+ reset[0] = 0;
46746+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
46747 err("could not restart the USB controller CPU.");
46748 ret = -EINVAL;
46749 }
46750 } else
46751 ret = -EIO;
46752
46753+ kfree(reset);
46754+ kfree(hx);
46755+
46756 return ret;
46757 }
46758 EXPORT_SYMBOL(usb_cypress_load_firmware);
46759diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
46760index 1a3df10..57997a5 100644
46761--- a/drivers/media/usb/dvb-usb/dw2102.c
46762+++ b/drivers/media/usb/dvb-usb/dw2102.c
46763@@ -118,7 +118,7 @@ struct su3000_state {
46764
46765 struct s6x0_state {
46766 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
46767-};
46768+} __no_const;
46769
46770 /* debug */
46771 static int dvb_usb_dw2102_debug;
46772diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
46773index 5801ae7..83f71fa 100644
46774--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
46775+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
46776@@ -87,8 +87,11 @@ struct technisat_usb2_state {
46777 static int technisat_usb2_i2c_access(struct usb_device *udev,
46778 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
46779 {
46780- u8 b[64];
46781- int ret, actual_length;
46782+ u8 *b = kmalloc(64, GFP_KERNEL);
46783+ int ret, actual_length, error = 0;
46784+
46785+ if (b == NULL)
46786+ return -ENOMEM;
46787
46788 deb_i2c("i2c-access: %02x, tx: ", device_addr);
46789 debug_dump(tx, txlen, deb_i2c);
46790@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46791
46792 if (ret < 0) {
46793 err("i2c-error: out failed %02x = %d", device_addr, ret);
46794- return -ENODEV;
46795+ error = -ENODEV;
46796+ goto out;
46797 }
46798
46799 ret = usb_bulk_msg(udev,
46800@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46801 b, 64, &actual_length, 1000);
46802 if (ret < 0) {
46803 err("i2c-error: in failed %02x = %d", device_addr, ret);
46804- return -ENODEV;
46805+ error = -ENODEV;
46806+ goto out;
46807 }
46808
46809 if (b[0] != I2C_STATUS_OK) {
46810@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46811 /* handle tuner-i2c-nak */
46812 if (!(b[0] == I2C_STATUS_NAK &&
46813 device_addr == 0x60
46814- /* && device_is_technisat_usb2 */))
46815- return -ENODEV;
46816+ /* && device_is_technisat_usb2 */)) {
46817+ error = -ENODEV;
46818+ goto out;
46819+ }
46820 }
46821
46822 deb_i2c("status: %d, ", b[0]);
46823@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46824
46825 deb_i2c("\n");
46826
46827- return 0;
46828+out:
46829+ kfree(b);
46830+ return error;
46831 }
46832
46833 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
46834@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
46835 {
46836 int ret;
46837
46838- u8 led[8] = {
46839- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
46840- 0
46841- };
46842+ u8 *led = kzalloc(8, GFP_KERNEL);
46843+
46844+ if (led == NULL)
46845+ return -ENOMEM;
46846
46847 if (disable_led_control && state != TECH_LED_OFF)
46848 return 0;
46849
46850+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
46851+
46852 switch (state) {
46853 case TECH_LED_ON:
46854 led[1] = 0x82;
46855@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
46856 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
46857 USB_TYPE_VENDOR | USB_DIR_OUT,
46858 0, 0,
46859- led, sizeof(led), 500);
46860+ led, 8, 500);
46861
46862 mutex_unlock(&d->i2c_mutex);
46863+
46864+ kfree(led);
46865+
46866 return ret;
46867 }
46868
46869 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
46870 {
46871 int ret;
46872- u8 b = 0;
46873+ u8 *b = kzalloc(1, GFP_KERNEL);
46874+
46875+ if (b == NULL)
46876+ return -ENOMEM;
46877
46878 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
46879 return -EAGAIN;
46880@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
46881 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
46882 USB_TYPE_VENDOR | USB_DIR_OUT,
46883 (red << 8) | green, 0,
46884- &b, 1, 500);
46885+ b, 1, 500);
46886
46887 mutex_unlock(&d->i2c_mutex);
46888
46889+ kfree(b);
46890+
46891 return ret;
46892 }
46893
46894@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46895 struct dvb_usb_device_description **desc, int *cold)
46896 {
46897 int ret;
46898- u8 version[3];
46899+ u8 *version = kmalloc(3, GFP_KERNEL);
46900
46901 /* first select the interface */
46902 if (usb_set_interface(udev, 0, 1) != 0)
46903@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46904
46905 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
46906
46907+ if (version == NULL)
46908+ return 0;
46909+
46910 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
46911 GET_VERSION_INFO_VENDOR_REQUEST,
46912 USB_TYPE_VENDOR | USB_DIR_IN,
46913 0, 0,
46914- version, sizeof(version), 500);
46915+ version, 3, 500);
46916
46917 if (ret < 0)
46918 *cold = 1;
46919@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46920 *cold = 0;
46921 }
46922
46923+ kfree(version);
46924+
46925 return 0;
46926 }
46927
46928@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
46929
46930 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
46931 {
46932- u8 buf[62], *b;
46933+ u8 *buf, *b;
46934 int ret;
46935 struct ir_raw_event ev;
46936
46937+ buf = kmalloc(62, GFP_KERNEL);
46938+
46939+ if (buf == NULL)
46940+ return -ENOMEM;
46941+
46942 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
46943 buf[1] = 0x08;
46944 buf[2] = 0x8f;
46945@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
46946 GET_IR_DATA_VENDOR_REQUEST,
46947 USB_TYPE_VENDOR | USB_DIR_IN,
46948 0x8080, 0,
46949- buf, sizeof(buf), 500);
46950+ buf, 62, 500);
46951
46952 unlock:
46953 mutex_unlock(&d->i2c_mutex);
46954
46955- if (ret < 0)
46956+ if (ret < 0) {
46957+ kfree(buf);
46958 return ret;
46959+ }
46960
46961- if (ret == 1)
46962+ if (ret == 1) {
46963+ kfree(buf);
46964 return 0; /* no key pressed */
46965+ }
46966
46967 /* decoding */
46968 b = buf+1;
46969@@ -656,6 +689,8 @@ unlock:
46970
46971 ir_raw_event_handle(d->rc_dev);
46972
46973+ kfree(buf);
46974+
46975 return 1;
46976 }
46977
46978diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46979index af63543..0436f20 100644
46980--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46981+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46982@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46983 * by passing a very big num_planes value */
46984 uplane = compat_alloc_user_space(num_planes *
46985 sizeof(struct v4l2_plane));
46986- kp->m.planes = (__force struct v4l2_plane *)uplane;
46987+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
46988
46989 while (--num_planes >= 0) {
46990 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
46991@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46992 if (num_planes == 0)
46993 return 0;
46994
46995- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
46996+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
46997 if (get_user(p, &up->m.planes))
46998 return -EFAULT;
46999 uplane32 = compat_ptr(p);
47000@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
47001 get_user(kp->flags, &up->flags) ||
47002 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
47003 return -EFAULT;
47004- kp->base = (__force void *)compat_ptr(tmp);
47005+ kp->base = (__force_kernel void *)compat_ptr(tmp);
47006 return 0;
47007 }
47008
47009@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47010 n * sizeof(struct v4l2_ext_control32)))
47011 return -EFAULT;
47012 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
47013- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
47014+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
47015 while (--n >= 0) {
47016 u32 id;
47017
47018@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47019 {
47020 struct v4l2_ext_control32 __user *ucontrols;
47021 struct v4l2_ext_control __user *kcontrols =
47022- (__force struct v4l2_ext_control __user *)kp->controls;
47023+ (struct v4l2_ext_control __force_user *)kp->controls;
47024 int n = kp->count;
47025 compat_caddr_t p;
47026
47027@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
47028 get_user(tmp, &up->edid) ||
47029 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
47030 return -EFAULT;
47031- kp->edid = (__force u8 *)compat_ptr(tmp);
47032+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
47033 return 0;
47034 }
47035
47036diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
47037index 015f92a..59e311e 100644
47038--- a/drivers/media/v4l2-core/v4l2-device.c
47039+++ b/drivers/media/v4l2-core/v4l2-device.c
47040@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
47041 EXPORT_SYMBOL_GPL(v4l2_device_put);
47042
47043 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
47044- atomic_t *instance)
47045+ atomic_unchecked_t *instance)
47046 {
47047- int num = atomic_inc_return(instance) - 1;
47048+ int num = atomic_inc_return_unchecked(instance) - 1;
47049 int len = strlen(basename);
47050
47051 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
47052diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
47053index b084072..36706d7 100644
47054--- a/drivers/media/v4l2-core/v4l2-ioctl.c
47055+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
47056@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
47057 struct file *file, void *fh, void *p);
47058 } u;
47059 void (*debug)(const void *arg, bool write_only);
47060-};
47061+} __do_const;
47062+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
47063
47064 /* This control needs a priority check */
47065 #define INFO_FL_PRIO (1 << 0)
47066@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
47067 struct video_device *vfd = video_devdata(file);
47068 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
47069 bool write_only = false;
47070- struct v4l2_ioctl_info default_info;
47071+ v4l2_ioctl_info_no_const default_info;
47072 const struct v4l2_ioctl_info *info;
47073 void *fh = file->private_data;
47074 struct v4l2_fh *vfh = NULL;
47075@@ -2426,7 +2427,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47076 ret = -EINVAL;
47077 break;
47078 }
47079- *user_ptr = (void __user *)buf->m.planes;
47080+ *user_ptr = (void __force_user *)buf->m.planes;
47081 *kernel_ptr = (void **)&buf->m.planes;
47082 *array_size = sizeof(struct v4l2_plane) * buf->length;
47083 ret = 1;
47084@@ -2443,7 +2444,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47085 ret = -EINVAL;
47086 break;
47087 }
47088- *user_ptr = (void __user *)edid->edid;
47089+ *user_ptr = (void __force_user *)edid->edid;
47090 *kernel_ptr = (void **)&edid->edid;
47091 *array_size = edid->blocks * 128;
47092 ret = 1;
47093@@ -2461,7 +2462,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47094 ret = -EINVAL;
47095 break;
47096 }
47097- *user_ptr = (void __user *)ctrls->controls;
47098+ *user_ptr = (void __force_user *)ctrls->controls;
47099 *kernel_ptr = (void **)&ctrls->controls;
47100 *array_size = sizeof(struct v4l2_ext_control)
47101 * ctrls->count;
47102@@ -2562,7 +2563,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
47103 }
47104
47105 if (has_array_args) {
47106- *kernel_ptr = (void __force *)user_ptr;
47107+ *kernel_ptr = (void __force_kernel *)user_ptr;
47108 if (copy_to_user(user_ptr, mbuf, array_size))
47109 err = -EFAULT;
47110 goto out_array_args;
47111diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
47112index 24696f5..3637780 100644
47113--- a/drivers/memory/omap-gpmc.c
47114+++ b/drivers/memory/omap-gpmc.c
47115@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
47116 };
47117
47118 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
47119-static struct irq_chip gpmc_irq_chip;
47120 static int gpmc_irq_start;
47121
47122 static struct resource gpmc_mem_root;
47123@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
47124
47125 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
47126
47127+static struct irq_chip gpmc_irq_chip = {
47128+ .name = "gpmc",
47129+ .irq_startup = gpmc_irq_noop_ret,
47130+ .irq_enable = gpmc_irq_enable,
47131+ .irq_disable = gpmc_irq_disable,
47132+ .irq_shutdown = gpmc_irq_noop,
47133+ .irq_ack = gpmc_irq_noop,
47134+ .irq_mask = gpmc_irq_noop,
47135+ .irq_unmask = gpmc_irq_noop,
47136+};
47137+
47138 static int gpmc_setup_irq(void)
47139 {
47140 int i;
47141@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
47142 return gpmc_irq_start;
47143 }
47144
47145- gpmc_irq_chip.name = "gpmc";
47146- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
47147- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
47148- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
47149- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
47150- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
47151- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
47152- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
47153-
47154 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
47155 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
47156
47157diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
47158index 187f836..679544b 100644
47159--- a/drivers/message/fusion/mptbase.c
47160+++ b/drivers/message/fusion/mptbase.c
47161@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47162 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
47163 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
47164
47165+#ifdef CONFIG_GRKERNSEC_HIDESYM
47166+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
47167+#else
47168 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
47169 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
47170+#endif
47171+
47172 /*
47173 * Rounding UP to nearest 4-kB boundary here...
47174 */
47175@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47176 ioc->facts.GlobalCredits);
47177
47178 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
47179+#ifdef CONFIG_GRKERNSEC_HIDESYM
47180+ NULL, NULL);
47181+#else
47182 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
47183+#endif
47184 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
47185 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
47186 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
47187diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
47188index 5bdaae1..eced16f 100644
47189--- a/drivers/message/fusion/mptsas.c
47190+++ b/drivers/message/fusion/mptsas.c
47191@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
47192 return 0;
47193 }
47194
47195+static inline void
47196+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47197+{
47198+ if (phy_info->port_details) {
47199+ phy_info->port_details->rphy = rphy;
47200+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47201+ ioc->name, rphy));
47202+ }
47203+
47204+ if (rphy) {
47205+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47206+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47207+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47208+ ioc->name, rphy, rphy->dev.release));
47209+ }
47210+}
47211+
47212 /* no mutex */
47213 static void
47214 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
47215@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
47216 return NULL;
47217 }
47218
47219-static inline void
47220-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47221-{
47222- if (phy_info->port_details) {
47223- phy_info->port_details->rphy = rphy;
47224- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47225- ioc->name, rphy));
47226- }
47227-
47228- if (rphy) {
47229- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47230- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47231- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47232- ioc->name, rphy, rphy->dev.release));
47233- }
47234-}
47235-
47236 static inline struct sas_port *
47237 mptsas_get_port(struct mptsas_phyinfo *phy_info)
47238 {
47239diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
47240index 9a8e185..27ff17d 100644
47241--- a/drivers/mfd/ab8500-debugfs.c
47242+++ b/drivers/mfd/ab8500-debugfs.c
47243@@ -100,7 +100,7 @@ static int irq_last;
47244 static u32 *irq_count;
47245 static int num_irqs;
47246
47247-static struct device_attribute **dev_attr;
47248+static device_attribute_no_const **dev_attr;
47249 static char **event_name;
47250
47251 static u8 avg_sample = SAMPLE_16;
47252diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
47253index 5615522..1eb6f3dc 100644
47254--- a/drivers/mfd/kempld-core.c
47255+++ b/drivers/mfd/kempld-core.c
47256@@ -499,7 +499,7 @@ static struct platform_driver kempld_driver = {
47257 .remove = kempld_remove,
47258 };
47259
47260-static struct dmi_system_id kempld_dmi_table[] __initdata = {
47261+static const struct dmi_system_id kempld_dmi_table[] __initconst = {
47262 {
47263 .ident = "BHL6",
47264 .matches = {
47265diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
47266index c880c89..45a7c68 100644
47267--- a/drivers/mfd/max8925-i2c.c
47268+++ b/drivers/mfd/max8925-i2c.c
47269@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
47270 const struct i2c_device_id *id)
47271 {
47272 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
47273- static struct max8925_chip *chip;
47274+ struct max8925_chip *chip;
47275 struct device_node *node = client->dev.of_node;
47276
47277 if (node && !pdata) {
47278diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
47279index 7612d89..70549c2 100644
47280--- a/drivers/mfd/tps65910.c
47281+++ b/drivers/mfd/tps65910.c
47282@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
47283 struct tps65910_platform_data *pdata)
47284 {
47285 int ret = 0;
47286- static struct regmap_irq_chip *tps6591x_irqs_chip;
47287+ struct regmap_irq_chip *tps6591x_irqs_chip;
47288
47289 if (!irq) {
47290 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
47291diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
47292index 1b772ef..01e77d33 100644
47293--- a/drivers/mfd/twl4030-irq.c
47294+++ b/drivers/mfd/twl4030-irq.c
47295@@ -34,6 +34,7 @@
47296 #include <linux/of.h>
47297 #include <linux/irqdomain.h>
47298 #include <linux/i2c/twl.h>
47299+#include <asm/pgtable.h>
47300
47301 #include "twl-core.h"
47302
47303@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
47304 * Install an irq handler for each of the SIH modules;
47305 * clone dummy irq_chip since PIH can't *do* anything
47306 */
47307- twl4030_irq_chip = dummy_irq_chip;
47308- twl4030_irq_chip.name = "twl4030";
47309+ pax_open_kernel();
47310+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
47311+ *(const char **)&twl4030_irq_chip.name = "twl4030";
47312
47313- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47314+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47315+ pax_close_kernel();
47316
47317 for (i = irq_base; i < irq_end; i++) {
47318 irq_set_chip_and_handler(i, &twl4030_irq_chip,
47319diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
47320index 464419b..64bae8d 100644
47321--- a/drivers/misc/c2port/core.c
47322+++ b/drivers/misc/c2port/core.c
47323@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
47324 goto error_idr_alloc;
47325 c2dev->id = ret;
47326
47327- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47328+ pax_open_kernel();
47329+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47330+ pax_close_kernel();
47331
47332 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
47333 "c2port%d", c2dev->id);
47334diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
47335index 8385177..2f54635 100644
47336--- a/drivers/misc/eeprom/sunxi_sid.c
47337+++ b/drivers/misc/eeprom/sunxi_sid.c
47338@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
47339
47340 platform_set_drvdata(pdev, sid_data);
47341
47342- sid_bin_attr.size = sid_data->keysize;
47343+ pax_open_kernel();
47344+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
47345+ pax_close_kernel();
47346 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
47347 return -ENODEV;
47348
47349diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
47350index 36f5d52..32311c3 100644
47351--- a/drivers/misc/kgdbts.c
47352+++ b/drivers/misc/kgdbts.c
47353@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
47354 char before[BREAK_INSTR_SIZE];
47355 char after[BREAK_INSTR_SIZE];
47356
47357- probe_kernel_read(before, (char *)kgdbts_break_test,
47358+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
47359 BREAK_INSTR_SIZE);
47360 init_simple_test();
47361 ts.tst = plant_and_detach_test;
47362@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
47363 /* Activate test with initial breakpoint */
47364 if (!is_early)
47365 kgdb_breakpoint();
47366- probe_kernel_read(after, (char *)kgdbts_break_test,
47367+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
47368 BREAK_INSTR_SIZE);
47369 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
47370 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
47371diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
47372index 3ef4627..8d00486 100644
47373--- a/drivers/misc/lis3lv02d/lis3lv02d.c
47374+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
47375@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
47376 * the lid is closed. This leads to interrupts as soon as a little move
47377 * is done.
47378 */
47379- atomic_inc(&lis3->count);
47380+ atomic_inc_unchecked(&lis3->count);
47381
47382 wake_up_interruptible(&lis3->misc_wait);
47383 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
47384@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
47385 if (lis3->pm_dev)
47386 pm_runtime_get_sync(lis3->pm_dev);
47387
47388- atomic_set(&lis3->count, 0);
47389+ atomic_set_unchecked(&lis3->count, 0);
47390 return 0;
47391 }
47392
47393@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
47394 add_wait_queue(&lis3->misc_wait, &wait);
47395 while (true) {
47396 set_current_state(TASK_INTERRUPTIBLE);
47397- data = atomic_xchg(&lis3->count, 0);
47398+ data = atomic_xchg_unchecked(&lis3->count, 0);
47399 if (data)
47400 break;
47401
47402@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
47403 struct lis3lv02d, miscdev);
47404
47405 poll_wait(file, &lis3->misc_wait, wait);
47406- if (atomic_read(&lis3->count))
47407+ if (atomic_read_unchecked(&lis3->count))
47408 return POLLIN | POLLRDNORM;
47409 return 0;
47410 }
47411diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
47412index c439c82..1f20f57 100644
47413--- a/drivers/misc/lis3lv02d/lis3lv02d.h
47414+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
47415@@ -297,7 +297,7 @@ struct lis3lv02d {
47416 struct input_polled_dev *idev; /* input device */
47417 struct platform_device *pdev; /* platform device */
47418 struct regulator_bulk_data regulators[2];
47419- atomic_t count; /* interrupt count after last read */
47420+ atomic_unchecked_t count; /* interrupt count after last read */
47421 union axis_conversion ac; /* hw -> logical axis */
47422 int mapped_btns[3];
47423
47424diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
47425index 2f30bad..c4c13d0 100644
47426--- a/drivers/misc/sgi-gru/gruhandles.c
47427+++ b/drivers/misc/sgi-gru/gruhandles.c
47428@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
47429 unsigned long nsec;
47430
47431 nsec = CLKS2NSEC(clks);
47432- atomic_long_inc(&mcs_op_statistics[op].count);
47433- atomic_long_add(nsec, &mcs_op_statistics[op].total);
47434+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
47435+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
47436 if (mcs_op_statistics[op].max < nsec)
47437 mcs_op_statistics[op].max = nsec;
47438 }
47439diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
47440index 4f76359..cdfcb2e 100644
47441--- a/drivers/misc/sgi-gru/gruprocfs.c
47442+++ b/drivers/misc/sgi-gru/gruprocfs.c
47443@@ -32,9 +32,9 @@
47444
47445 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
47446
47447-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
47448+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
47449 {
47450- unsigned long val = atomic_long_read(v);
47451+ unsigned long val = atomic_long_read_unchecked(v);
47452
47453 seq_printf(s, "%16lu %s\n", val, id);
47454 }
47455@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
47456
47457 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
47458 for (op = 0; op < mcsop_last; op++) {
47459- count = atomic_long_read(&mcs_op_statistics[op].count);
47460- total = atomic_long_read(&mcs_op_statistics[op].total);
47461+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
47462+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
47463 max = mcs_op_statistics[op].max;
47464 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
47465 count ? total / count : 0, max);
47466diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
47467index 5c3ce24..4915ccb 100644
47468--- a/drivers/misc/sgi-gru/grutables.h
47469+++ b/drivers/misc/sgi-gru/grutables.h
47470@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
47471 * GRU statistics.
47472 */
47473 struct gru_stats_s {
47474- atomic_long_t vdata_alloc;
47475- atomic_long_t vdata_free;
47476- atomic_long_t gts_alloc;
47477- atomic_long_t gts_free;
47478- atomic_long_t gms_alloc;
47479- atomic_long_t gms_free;
47480- atomic_long_t gts_double_allocate;
47481- atomic_long_t assign_context;
47482- atomic_long_t assign_context_failed;
47483- atomic_long_t free_context;
47484- atomic_long_t load_user_context;
47485- atomic_long_t load_kernel_context;
47486- atomic_long_t lock_kernel_context;
47487- atomic_long_t unlock_kernel_context;
47488- atomic_long_t steal_user_context;
47489- atomic_long_t steal_kernel_context;
47490- atomic_long_t steal_context_failed;
47491- atomic_long_t nopfn;
47492- atomic_long_t asid_new;
47493- atomic_long_t asid_next;
47494- atomic_long_t asid_wrap;
47495- atomic_long_t asid_reuse;
47496- atomic_long_t intr;
47497- atomic_long_t intr_cbr;
47498- atomic_long_t intr_tfh;
47499- atomic_long_t intr_spurious;
47500- atomic_long_t intr_mm_lock_failed;
47501- atomic_long_t call_os;
47502- atomic_long_t call_os_wait_queue;
47503- atomic_long_t user_flush_tlb;
47504- atomic_long_t user_unload_context;
47505- atomic_long_t user_exception;
47506- atomic_long_t set_context_option;
47507- atomic_long_t check_context_retarget_intr;
47508- atomic_long_t check_context_unload;
47509- atomic_long_t tlb_dropin;
47510- atomic_long_t tlb_preload_page;
47511- atomic_long_t tlb_dropin_fail_no_asid;
47512- atomic_long_t tlb_dropin_fail_upm;
47513- atomic_long_t tlb_dropin_fail_invalid;
47514- atomic_long_t tlb_dropin_fail_range_active;
47515- atomic_long_t tlb_dropin_fail_idle;
47516- atomic_long_t tlb_dropin_fail_fmm;
47517- atomic_long_t tlb_dropin_fail_no_exception;
47518- atomic_long_t tfh_stale_on_fault;
47519- atomic_long_t mmu_invalidate_range;
47520- atomic_long_t mmu_invalidate_page;
47521- atomic_long_t flush_tlb;
47522- atomic_long_t flush_tlb_gru;
47523- atomic_long_t flush_tlb_gru_tgh;
47524- atomic_long_t flush_tlb_gru_zero_asid;
47525+ atomic_long_unchecked_t vdata_alloc;
47526+ atomic_long_unchecked_t vdata_free;
47527+ atomic_long_unchecked_t gts_alloc;
47528+ atomic_long_unchecked_t gts_free;
47529+ atomic_long_unchecked_t gms_alloc;
47530+ atomic_long_unchecked_t gms_free;
47531+ atomic_long_unchecked_t gts_double_allocate;
47532+ atomic_long_unchecked_t assign_context;
47533+ atomic_long_unchecked_t assign_context_failed;
47534+ atomic_long_unchecked_t free_context;
47535+ atomic_long_unchecked_t load_user_context;
47536+ atomic_long_unchecked_t load_kernel_context;
47537+ atomic_long_unchecked_t lock_kernel_context;
47538+ atomic_long_unchecked_t unlock_kernel_context;
47539+ atomic_long_unchecked_t steal_user_context;
47540+ atomic_long_unchecked_t steal_kernel_context;
47541+ atomic_long_unchecked_t steal_context_failed;
47542+ atomic_long_unchecked_t nopfn;
47543+ atomic_long_unchecked_t asid_new;
47544+ atomic_long_unchecked_t asid_next;
47545+ atomic_long_unchecked_t asid_wrap;
47546+ atomic_long_unchecked_t asid_reuse;
47547+ atomic_long_unchecked_t intr;
47548+ atomic_long_unchecked_t intr_cbr;
47549+ atomic_long_unchecked_t intr_tfh;
47550+ atomic_long_unchecked_t intr_spurious;
47551+ atomic_long_unchecked_t intr_mm_lock_failed;
47552+ atomic_long_unchecked_t call_os;
47553+ atomic_long_unchecked_t call_os_wait_queue;
47554+ atomic_long_unchecked_t user_flush_tlb;
47555+ atomic_long_unchecked_t user_unload_context;
47556+ atomic_long_unchecked_t user_exception;
47557+ atomic_long_unchecked_t set_context_option;
47558+ atomic_long_unchecked_t check_context_retarget_intr;
47559+ atomic_long_unchecked_t check_context_unload;
47560+ atomic_long_unchecked_t tlb_dropin;
47561+ atomic_long_unchecked_t tlb_preload_page;
47562+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
47563+ atomic_long_unchecked_t tlb_dropin_fail_upm;
47564+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
47565+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
47566+ atomic_long_unchecked_t tlb_dropin_fail_idle;
47567+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
47568+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
47569+ atomic_long_unchecked_t tfh_stale_on_fault;
47570+ atomic_long_unchecked_t mmu_invalidate_range;
47571+ atomic_long_unchecked_t mmu_invalidate_page;
47572+ atomic_long_unchecked_t flush_tlb;
47573+ atomic_long_unchecked_t flush_tlb_gru;
47574+ atomic_long_unchecked_t flush_tlb_gru_tgh;
47575+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
47576
47577- atomic_long_t copy_gpa;
47578- atomic_long_t read_gpa;
47579+ atomic_long_unchecked_t copy_gpa;
47580+ atomic_long_unchecked_t read_gpa;
47581
47582- atomic_long_t mesq_receive;
47583- atomic_long_t mesq_receive_none;
47584- atomic_long_t mesq_send;
47585- atomic_long_t mesq_send_failed;
47586- atomic_long_t mesq_noop;
47587- atomic_long_t mesq_send_unexpected_error;
47588- atomic_long_t mesq_send_lb_overflow;
47589- atomic_long_t mesq_send_qlimit_reached;
47590- atomic_long_t mesq_send_amo_nacked;
47591- atomic_long_t mesq_send_put_nacked;
47592- atomic_long_t mesq_page_overflow;
47593- atomic_long_t mesq_qf_locked;
47594- atomic_long_t mesq_qf_noop_not_full;
47595- atomic_long_t mesq_qf_switch_head_failed;
47596- atomic_long_t mesq_qf_unexpected_error;
47597- atomic_long_t mesq_noop_unexpected_error;
47598- atomic_long_t mesq_noop_lb_overflow;
47599- atomic_long_t mesq_noop_qlimit_reached;
47600- atomic_long_t mesq_noop_amo_nacked;
47601- atomic_long_t mesq_noop_put_nacked;
47602- atomic_long_t mesq_noop_page_overflow;
47603+ atomic_long_unchecked_t mesq_receive;
47604+ atomic_long_unchecked_t mesq_receive_none;
47605+ atomic_long_unchecked_t mesq_send;
47606+ atomic_long_unchecked_t mesq_send_failed;
47607+ atomic_long_unchecked_t mesq_noop;
47608+ atomic_long_unchecked_t mesq_send_unexpected_error;
47609+ atomic_long_unchecked_t mesq_send_lb_overflow;
47610+ atomic_long_unchecked_t mesq_send_qlimit_reached;
47611+ atomic_long_unchecked_t mesq_send_amo_nacked;
47612+ atomic_long_unchecked_t mesq_send_put_nacked;
47613+ atomic_long_unchecked_t mesq_page_overflow;
47614+ atomic_long_unchecked_t mesq_qf_locked;
47615+ atomic_long_unchecked_t mesq_qf_noop_not_full;
47616+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
47617+ atomic_long_unchecked_t mesq_qf_unexpected_error;
47618+ atomic_long_unchecked_t mesq_noop_unexpected_error;
47619+ atomic_long_unchecked_t mesq_noop_lb_overflow;
47620+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
47621+ atomic_long_unchecked_t mesq_noop_amo_nacked;
47622+ atomic_long_unchecked_t mesq_noop_put_nacked;
47623+ atomic_long_unchecked_t mesq_noop_page_overflow;
47624
47625 };
47626
47627@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
47628 tghop_invalidate, mcsop_last};
47629
47630 struct mcs_op_statistic {
47631- atomic_long_t count;
47632- atomic_long_t total;
47633+ atomic_long_unchecked_t count;
47634+ atomic_long_unchecked_t total;
47635 unsigned long max;
47636 };
47637
47638@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
47639
47640 #define STAT(id) do { \
47641 if (gru_options & OPT_STATS) \
47642- atomic_long_inc(&gru_stats.id); \
47643+ atomic_long_inc_unchecked(&gru_stats.id); \
47644 } while (0)
47645
47646 #ifdef CONFIG_SGI_GRU_DEBUG
47647diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
47648index c862cd4..0d176fe 100644
47649--- a/drivers/misc/sgi-xp/xp.h
47650+++ b/drivers/misc/sgi-xp/xp.h
47651@@ -288,7 +288,7 @@ struct xpc_interface {
47652 xpc_notify_func, void *);
47653 void (*received) (short, int, void *);
47654 enum xp_retval (*partid_to_nasids) (short, void *);
47655-};
47656+} __no_const;
47657
47658 extern struct xpc_interface xpc_interface;
47659
47660diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
47661index 01be66d..e3a0c7e 100644
47662--- a/drivers/misc/sgi-xp/xp_main.c
47663+++ b/drivers/misc/sgi-xp/xp_main.c
47664@@ -78,13 +78,13 @@ xpc_notloaded(void)
47665 }
47666
47667 struct xpc_interface xpc_interface = {
47668- (void (*)(int))xpc_notloaded,
47669- (void (*)(int))xpc_notloaded,
47670- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47671- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47672+ .connect = (void (*)(int))xpc_notloaded,
47673+ .disconnect = (void (*)(int))xpc_notloaded,
47674+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47675+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47676 void *))xpc_notloaded,
47677- (void (*)(short, int, void *))xpc_notloaded,
47678- (enum xp_retval(*)(short, void *))xpc_notloaded
47679+ .received = (void (*)(short, int, void *))xpc_notloaded,
47680+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
47681 };
47682 EXPORT_SYMBOL_GPL(xpc_interface);
47683
47684diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
47685index b94d5f7..7f494c5 100644
47686--- a/drivers/misc/sgi-xp/xpc.h
47687+++ b/drivers/misc/sgi-xp/xpc.h
47688@@ -835,6 +835,7 @@ struct xpc_arch_operations {
47689 void (*received_payload) (struct xpc_channel *, void *);
47690 void (*notify_senders_of_disconnect) (struct xpc_channel *);
47691 };
47692+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
47693
47694 /* struct xpc_partition act_state values (for XPC HB) */
47695
47696@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
47697 /* found in xpc_main.c */
47698 extern struct device *xpc_part;
47699 extern struct device *xpc_chan;
47700-extern struct xpc_arch_operations xpc_arch_ops;
47701+extern xpc_arch_operations_no_const xpc_arch_ops;
47702 extern int xpc_disengage_timelimit;
47703 extern int xpc_disengage_timedout;
47704 extern int xpc_activate_IRQ_rcvd;
47705diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
47706index 82dc574..8539ab2 100644
47707--- a/drivers/misc/sgi-xp/xpc_main.c
47708+++ b/drivers/misc/sgi-xp/xpc_main.c
47709@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
47710 .notifier_call = xpc_system_die,
47711 };
47712
47713-struct xpc_arch_operations xpc_arch_ops;
47714+xpc_arch_operations_no_const xpc_arch_ops;
47715
47716 /*
47717 * Timer function to enforce the timelimit on the partition disengage.
47718@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
47719
47720 if (((die_args->trapnr == X86_TRAP_MF) ||
47721 (die_args->trapnr == X86_TRAP_XF)) &&
47722- !user_mode_vm(die_args->regs))
47723+ !user_mode(die_args->regs))
47724 xpc_die_deactivate();
47725
47726 break;
47727diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
47728index ed2e71a..54c498e 100644
47729--- a/drivers/mmc/card/block.c
47730+++ b/drivers/mmc/card/block.c
47731@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
47732 if (idata->ic.postsleep_min_us)
47733 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
47734
47735- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
47736+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
47737 err = -EFAULT;
47738 goto cmd_rel_host;
47739 }
47740diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
47741index 18c4afe..43be71e 100644
47742--- a/drivers/mmc/host/dw_mmc.h
47743+++ b/drivers/mmc/host/dw_mmc.h
47744@@ -271,5 +271,5 @@ struct dw_mci_drv_data {
47745 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
47746 int (*parse_dt)(struct dw_mci *host);
47747 int (*execute_tuning)(struct dw_mci_slot *slot);
47748-};
47749+} __do_const;
47750 #endif /* _DW_MMC_H_ */
47751diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
47752index 7fe1619..ae0781b 100644
47753--- a/drivers/mmc/host/mmci.c
47754+++ b/drivers/mmc/host/mmci.c
47755@@ -1630,7 +1630,9 @@ static int mmci_probe(struct amba_device *dev,
47756 mmc->caps |= MMC_CAP_CMD23;
47757
47758 if (variant->busy_detect) {
47759- mmci_ops.card_busy = mmci_card_busy;
47760+ pax_open_kernel();
47761+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
47762+ pax_close_kernel();
47763 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
47764 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
47765 mmc->max_busy_timeout = 0;
47766diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
47767index f84cfb0..aebe5d6 100644
47768--- a/drivers/mmc/host/omap_hsmmc.c
47769+++ b/drivers/mmc/host/omap_hsmmc.c
47770@@ -2054,7 +2054,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
47771
47772 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
47773 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
47774- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
47775+ pax_open_kernel();
47776+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
47777+ pax_close_kernel();
47778 }
47779
47780 pm_runtime_enable(host->dev);
47781diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
47782index 10ef824..88461a2 100644
47783--- a/drivers/mmc/host/sdhci-esdhc-imx.c
47784+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
47785@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
47786 host->mmc->caps |= MMC_CAP_1_8V_DDR;
47787 }
47788
47789- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
47790- sdhci_esdhc_ops.platform_execute_tuning =
47791+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
47792+ pax_open_kernel();
47793+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
47794 esdhc_executing_tuning;
47795+ pax_close_kernel();
47796+ }
47797
47798 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
47799 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
47800diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
47801index c6d2dd7..81b1ca3 100644
47802--- a/drivers/mmc/host/sdhci-s3c.c
47803+++ b/drivers/mmc/host/sdhci-s3c.c
47804@@ -598,9 +598,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
47805 * we can use overriding functions instead of default.
47806 */
47807 if (sc->no_divider) {
47808- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47809- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47810- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47811+ pax_open_kernel();
47812+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47813+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47814+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47815+ pax_close_kernel();
47816 }
47817
47818 /* It supports additional host capabilities if needed */
47819diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
47820index 423666b..81ff5eb 100644
47821--- a/drivers/mtd/chips/cfi_cmdset_0020.c
47822+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
47823@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
47824 size_t totlen = 0, thislen;
47825 int ret = 0;
47826 size_t buflen = 0;
47827- static char *buffer;
47828+ char *buffer;
47829
47830 if (!ECCBUF_SIZE) {
47831 /* We should fall back to a general writev implementation.
47832diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
47833index f44c606..aa4e804 100644
47834--- a/drivers/mtd/nand/denali.c
47835+++ b/drivers/mtd/nand/denali.c
47836@@ -24,6 +24,7 @@
47837 #include <linux/slab.h>
47838 #include <linux/mtd/mtd.h>
47839 #include <linux/module.h>
47840+#include <linux/slab.h>
47841
47842 #include "denali.h"
47843
47844diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47845index 33f3c3c..d6bbe6a 100644
47846--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47847+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47848@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
47849
47850 /* first try to map the upper buffer directly */
47851 if (virt_addr_valid(this->upper_buf) &&
47852- !object_is_on_stack(this->upper_buf)) {
47853+ !object_starts_on_stack(this->upper_buf)) {
47854 sg_init_one(sgl, this->upper_buf, this->upper_len);
47855 ret = dma_map_sg(this->dev, sgl, 1, dr);
47856 if (ret == 0)
47857diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
47858index a5dfbfb..8042ab4 100644
47859--- a/drivers/mtd/nftlmount.c
47860+++ b/drivers/mtd/nftlmount.c
47861@@ -24,6 +24,7 @@
47862 #include <asm/errno.h>
47863 #include <linux/delay.h>
47864 #include <linux/slab.h>
47865+#include <linux/sched.h>
47866 #include <linux/mtd/mtd.h>
47867 #include <linux/mtd/nand.h>
47868 #include <linux/mtd/nftl.h>
47869diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
47870index c23184a..4115c41 100644
47871--- a/drivers/mtd/sm_ftl.c
47872+++ b/drivers/mtd/sm_ftl.c
47873@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
47874 #define SM_CIS_VENDOR_OFFSET 0x59
47875 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
47876 {
47877- struct attribute_group *attr_group;
47878+ attribute_group_no_const *attr_group;
47879 struct attribute **attributes;
47880 struct sm_sysfs_attribute *vendor_attribute;
47881 char *vendor;
47882diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
47883index 7b11243..b3278a3 100644
47884--- a/drivers/net/bonding/bond_netlink.c
47885+++ b/drivers/net/bonding/bond_netlink.c
47886@@ -585,7 +585,7 @@ nla_put_failure:
47887 return -EMSGSIZE;
47888 }
47889
47890-struct rtnl_link_ops bond_link_ops __read_mostly = {
47891+struct rtnl_link_ops bond_link_ops = {
47892 .kind = "bond",
47893 .priv_size = sizeof(struct bonding),
47894 .setup = bond_setup,
47895diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
47896index b3b922a..80bba38 100644
47897--- a/drivers/net/caif/caif_hsi.c
47898+++ b/drivers/net/caif/caif_hsi.c
47899@@ -1444,7 +1444,7 @@ err:
47900 return -ENODEV;
47901 }
47902
47903-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
47904+static struct rtnl_link_ops caif_hsi_link_ops = {
47905 .kind = "cfhsi",
47906 .priv_size = sizeof(struct cfhsi),
47907 .setup = cfhsi_setup,
47908diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
47909index 58808f65..0bdc7b3 100644
47910--- a/drivers/net/can/Kconfig
47911+++ b/drivers/net/can/Kconfig
47912@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
47913
47914 config CAN_FLEXCAN
47915 tristate "Support for Freescale FLEXCAN based chips"
47916- depends on ARM || PPC
47917+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
47918 ---help---
47919 Say Y here if you want to support for Freescale FlexCAN.
47920
47921diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
47922index b0f6924..59e9640 100644
47923--- a/drivers/net/can/dev.c
47924+++ b/drivers/net/can/dev.c
47925@@ -959,7 +959,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
47926 return -EOPNOTSUPP;
47927 }
47928
47929-static struct rtnl_link_ops can_link_ops __read_mostly = {
47930+static struct rtnl_link_ops can_link_ops = {
47931 .kind = "can",
47932 .maxtype = IFLA_CAN_MAX,
47933 .policy = can_policy,
47934diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
47935index 674f367..ec3a31f 100644
47936--- a/drivers/net/can/vcan.c
47937+++ b/drivers/net/can/vcan.c
47938@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
47939 dev->destructor = free_netdev;
47940 }
47941
47942-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
47943+static struct rtnl_link_ops vcan_link_ops = {
47944 .kind = "vcan",
47945 .setup = vcan_setup,
47946 };
47947diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
47948index 49adbf1..fff7ff8 100644
47949--- a/drivers/net/dummy.c
47950+++ b/drivers/net/dummy.c
47951@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
47952 return 0;
47953 }
47954
47955-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
47956+static struct rtnl_link_ops dummy_link_ops = {
47957 .kind = DRV_NAME,
47958 .setup = dummy_setup,
47959 .validate = dummy_validate,
47960diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
47961index 0443654..4f0aa18 100644
47962--- a/drivers/net/ethernet/8390/ax88796.c
47963+++ b/drivers/net/ethernet/8390/ax88796.c
47964@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
47965 if (ax->plat->reg_offsets)
47966 ei_local->reg_offset = ax->plat->reg_offsets;
47967 else {
47968+ resource_size_t _mem_size = mem_size;
47969+ do_div(_mem_size, 0x18);
47970 ei_local->reg_offset = ax->reg_offsets;
47971 for (ret = 0; ret < 0x18; ret++)
47972- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
47973+ ax->reg_offsets[ret] = _mem_size * ret;
47974 }
47975
47976 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
47977diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
47978index 6725dc0..163549c 100644
47979--- a/drivers/net/ethernet/altera/altera_tse_main.c
47980+++ b/drivers/net/ethernet/altera/altera_tse_main.c
47981@@ -1216,7 +1216,7 @@ static int tse_shutdown(struct net_device *dev)
47982 return 0;
47983 }
47984
47985-static struct net_device_ops altera_tse_netdev_ops = {
47986+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
47987 .ndo_open = tse_open,
47988 .ndo_stop = tse_shutdown,
47989 .ndo_start_xmit = tse_start_xmit,
47990@@ -1453,11 +1453,13 @@ static int altera_tse_probe(struct platform_device *pdev)
47991 ndev->netdev_ops = &altera_tse_netdev_ops;
47992 altera_tse_set_ethtool_ops(ndev);
47993
47994+ pax_open_kernel();
47995 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
47996
47997 if (priv->hash_filter)
47998 altera_tse_netdev_ops.ndo_set_rx_mode =
47999 tse_set_rx_mode_hashfilter;
48000+ pax_close_kernel();
48001
48002 /* Scatter/gather IO is not supported,
48003 * so it is turned off
48004diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48005index 29a0927..5a348e24 100644
48006--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48007+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48008@@ -1122,14 +1122,14 @@ do { \
48009 * operations, everything works on mask values.
48010 */
48011 #define XMDIO_READ(_pdata, _mmd, _reg) \
48012- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
48013+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
48014 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
48015
48016 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
48017 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
48018
48019 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
48020- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
48021+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
48022 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
48023
48024 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
48025diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48026index 8a50b01..39c1ad0 100644
48027--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48028+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48029@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
48030
48031 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
48032
48033- pdata->hw_if.config_dcb_tc(pdata);
48034+ pdata->hw_if->config_dcb_tc(pdata);
48035
48036 return 0;
48037 }
48038@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
48039
48040 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
48041
48042- pdata->hw_if.config_dcb_pfc(pdata);
48043+ pdata->hw_if->config_dcb_pfc(pdata);
48044
48045 return 0;
48046 }
48047diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48048index d81fc6b..6f8ab25 100644
48049--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48050+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48051@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
48052
48053 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48054 {
48055- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48056+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48057 struct xgbe_channel *channel;
48058 struct xgbe_ring *ring;
48059 struct xgbe_ring_data *rdata;
48060@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48061
48062 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
48063 {
48064- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48065+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48066 struct xgbe_channel *channel;
48067 struct xgbe_ring *ring;
48068 struct xgbe_ring_desc *rdesc;
48069@@ -620,17 +620,12 @@ err_out:
48070 return 0;
48071 }
48072
48073-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
48074-{
48075- DBGPR("-->xgbe_init_function_ptrs_desc\n");
48076-
48077- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
48078- desc_if->free_ring_resources = xgbe_free_ring_resources;
48079- desc_if->map_tx_skb = xgbe_map_tx_skb;
48080- desc_if->map_rx_buffer = xgbe_map_rx_buffer;
48081- desc_if->unmap_rdata = xgbe_unmap_rdata;
48082- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
48083- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
48084-
48085- DBGPR("<--xgbe_init_function_ptrs_desc\n");
48086-}
48087+const struct xgbe_desc_if default_xgbe_desc_if = {
48088+ .alloc_ring_resources = xgbe_alloc_ring_resources,
48089+ .free_ring_resources = xgbe_free_ring_resources,
48090+ .map_tx_skb = xgbe_map_tx_skb,
48091+ .map_rx_buffer = xgbe_map_rx_buffer,
48092+ .unmap_rdata = xgbe_unmap_rdata,
48093+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
48094+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
48095+};
48096diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48097index 400757b..d8c53f6 100644
48098--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48099+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48100@@ -2748,7 +2748,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
48101
48102 static int xgbe_init(struct xgbe_prv_data *pdata)
48103 {
48104- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48105+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48106 int ret;
48107
48108 DBGPR("-->xgbe_init\n");
48109@@ -2813,108 +2813,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
48110 return 0;
48111 }
48112
48113-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
48114-{
48115- DBGPR("-->xgbe_init_function_ptrs\n");
48116-
48117- hw_if->tx_complete = xgbe_tx_complete;
48118-
48119- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
48120- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
48121- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
48122- hw_if->set_mac_address = xgbe_set_mac_address;
48123-
48124- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
48125- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
48126-
48127- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
48128- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
48129- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
48130- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
48131- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
48132-
48133- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
48134- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
48135-
48136- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
48137- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
48138- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
48139-
48140- hw_if->enable_tx = xgbe_enable_tx;
48141- hw_if->disable_tx = xgbe_disable_tx;
48142- hw_if->enable_rx = xgbe_enable_rx;
48143- hw_if->disable_rx = xgbe_disable_rx;
48144-
48145- hw_if->powerup_tx = xgbe_powerup_tx;
48146- hw_if->powerdown_tx = xgbe_powerdown_tx;
48147- hw_if->powerup_rx = xgbe_powerup_rx;
48148- hw_if->powerdown_rx = xgbe_powerdown_rx;
48149-
48150- hw_if->dev_xmit = xgbe_dev_xmit;
48151- hw_if->dev_read = xgbe_dev_read;
48152- hw_if->enable_int = xgbe_enable_int;
48153- hw_if->disable_int = xgbe_disable_int;
48154- hw_if->init = xgbe_init;
48155- hw_if->exit = xgbe_exit;
48156+const struct xgbe_hw_if default_xgbe_hw_if = {
48157+ .tx_complete = xgbe_tx_complete,
48158+
48159+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
48160+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
48161+ .add_mac_addresses = xgbe_add_mac_addresses,
48162+ .set_mac_address = xgbe_set_mac_address,
48163+
48164+ .enable_rx_csum = xgbe_enable_rx_csum,
48165+ .disable_rx_csum = xgbe_disable_rx_csum,
48166+
48167+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
48168+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
48169+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
48170+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
48171+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
48172+
48173+ .read_mmd_regs = xgbe_read_mmd_regs,
48174+ .write_mmd_regs = xgbe_write_mmd_regs,
48175+
48176+ .set_gmii_speed = xgbe_set_gmii_speed,
48177+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
48178+ .set_xgmii_speed = xgbe_set_xgmii_speed,
48179+
48180+ .enable_tx = xgbe_enable_tx,
48181+ .disable_tx = xgbe_disable_tx,
48182+ .enable_rx = xgbe_enable_rx,
48183+ .disable_rx = xgbe_disable_rx,
48184+
48185+ .powerup_tx = xgbe_powerup_tx,
48186+ .powerdown_tx = xgbe_powerdown_tx,
48187+ .powerup_rx = xgbe_powerup_rx,
48188+ .powerdown_rx = xgbe_powerdown_rx,
48189+
48190+ .dev_xmit = xgbe_dev_xmit,
48191+ .dev_read = xgbe_dev_read,
48192+ .enable_int = xgbe_enable_int,
48193+ .disable_int = xgbe_disable_int,
48194+ .init = xgbe_init,
48195+ .exit = xgbe_exit,
48196
48197 /* Descriptor related Sequences have to be initialized here */
48198- hw_if->tx_desc_init = xgbe_tx_desc_init;
48199- hw_if->rx_desc_init = xgbe_rx_desc_init;
48200- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
48201- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
48202- hw_if->is_last_desc = xgbe_is_last_desc;
48203- hw_if->is_context_desc = xgbe_is_context_desc;
48204- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
48205+ .tx_desc_init = xgbe_tx_desc_init,
48206+ .rx_desc_init = xgbe_rx_desc_init,
48207+ .tx_desc_reset = xgbe_tx_desc_reset,
48208+ .rx_desc_reset = xgbe_rx_desc_reset,
48209+ .is_last_desc = xgbe_is_last_desc,
48210+ .is_context_desc = xgbe_is_context_desc,
48211+ .tx_start_xmit = xgbe_tx_start_xmit,
48212
48213 /* For FLOW ctrl */
48214- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
48215- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
48216+ .config_tx_flow_control = xgbe_config_tx_flow_control,
48217+ .config_rx_flow_control = xgbe_config_rx_flow_control,
48218
48219 /* For RX coalescing */
48220- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
48221- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
48222- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
48223- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
48224+ .config_rx_coalesce = xgbe_config_rx_coalesce,
48225+ .config_tx_coalesce = xgbe_config_tx_coalesce,
48226+ .usec_to_riwt = xgbe_usec_to_riwt,
48227+ .riwt_to_usec = xgbe_riwt_to_usec,
48228
48229 /* For RX and TX threshold config */
48230- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
48231- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
48232+ .config_rx_threshold = xgbe_config_rx_threshold,
48233+ .config_tx_threshold = xgbe_config_tx_threshold,
48234
48235 /* For RX and TX Store and Forward Mode config */
48236- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
48237- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
48238+ .config_rsf_mode = xgbe_config_rsf_mode,
48239+ .config_tsf_mode = xgbe_config_tsf_mode,
48240
48241 /* For TX DMA Operating on Second Frame config */
48242- hw_if->config_osp_mode = xgbe_config_osp_mode;
48243+ .config_osp_mode = xgbe_config_osp_mode,
48244
48245 /* For RX and TX PBL config */
48246- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
48247- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
48248- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
48249- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
48250- hw_if->config_pblx8 = xgbe_config_pblx8;
48251+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
48252+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
48253+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
48254+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
48255+ .config_pblx8 = xgbe_config_pblx8,
48256
48257 /* For MMC statistics support */
48258- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
48259- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
48260- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
48261+ .tx_mmc_int = xgbe_tx_mmc_int,
48262+ .rx_mmc_int = xgbe_rx_mmc_int,
48263+ .read_mmc_stats = xgbe_read_mmc_stats,
48264
48265 /* For PTP config */
48266- hw_if->config_tstamp = xgbe_config_tstamp;
48267- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
48268- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
48269- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
48270- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
48271+ .config_tstamp = xgbe_config_tstamp,
48272+ .update_tstamp_addend = xgbe_update_tstamp_addend,
48273+ .set_tstamp_time = xgbe_set_tstamp_time,
48274+ .get_tstamp_time = xgbe_get_tstamp_time,
48275+ .get_tx_tstamp = xgbe_get_tx_tstamp,
48276
48277 /* For Data Center Bridging config */
48278- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
48279- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
48280+ .config_dcb_tc = xgbe_config_dcb_tc,
48281+ .config_dcb_pfc = xgbe_config_dcb_pfc,
48282
48283 /* For Receive Side Scaling */
48284- hw_if->enable_rss = xgbe_enable_rss;
48285- hw_if->disable_rss = xgbe_disable_rss;
48286- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
48287- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
48288-
48289- DBGPR("<--xgbe_init_function_ptrs\n");
48290-}
48291+ .enable_rss = xgbe_enable_rss,
48292+ .disable_rss = xgbe_disable_rss,
48293+ .set_rss_hash_key = xgbe_set_rss_hash_key,
48294+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
48295+};
48296diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48297index 885b02b..4b31a4c 100644
48298--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48299+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48300@@ -244,7 +244,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
48301 * support, tell it now
48302 */
48303 if (ring->tx.xmit_more)
48304- pdata->hw_if.tx_start_xmit(channel, ring);
48305+ pdata->hw_if->tx_start_xmit(channel, ring);
48306
48307 return NETDEV_TX_BUSY;
48308 }
48309@@ -272,7 +272,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
48310
48311 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48312 {
48313- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48314+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48315 struct xgbe_channel *channel;
48316 enum xgbe_int int_id;
48317 unsigned int i;
48318@@ -294,7 +294,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48319
48320 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48321 {
48322- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48323+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48324 struct xgbe_channel *channel;
48325 enum xgbe_int int_id;
48326 unsigned int i;
48327@@ -317,7 +317,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48328 static irqreturn_t xgbe_isr(int irq, void *data)
48329 {
48330 struct xgbe_prv_data *pdata = data;
48331- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48332+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48333 struct xgbe_channel *channel;
48334 unsigned int dma_isr, dma_ch_isr;
48335 unsigned int mac_isr, mac_tssr;
48336@@ -673,7 +673,7 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
48337
48338 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48339 {
48340- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48341+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48342
48343 DBGPR("-->xgbe_init_tx_coalesce\n");
48344
48345@@ -687,7 +687,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48346
48347 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48348 {
48349- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48350+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48351
48352 DBGPR("-->xgbe_init_rx_coalesce\n");
48353
48354@@ -701,7 +701,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48355
48356 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
48357 {
48358- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48359+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48360 struct xgbe_channel *channel;
48361 struct xgbe_ring *ring;
48362 struct xgbe_ring_data *rdata;
48363@@ -726,7 +726,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
48364
48365 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
48366 {
48367- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48368+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48369 struct xgbe_channel *channel;
48370 struct xgbe_ring *ring;
48371 struct xgbe_ring_data *rdata;
48372@@ -752,7 +752,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
48373 static void xgbe_adjust_link(struct net_device *netdev)
48374 {
48375 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48376- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48377+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48378 struct phy_device *phydev = pdata->phydev;
48379 int new_state = 0;
48380
48381@@ -860,7 +860,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
48382 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48383 {
48384 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48385- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48386+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48387 unsigned long flags;
48388
48389 DBGPR("-->xgbe_powerdown\n");
48390@@ -898,7 +898,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48391 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48392 {
48393 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48394- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48395+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48396 unsigned long flags;
48397
48398 DBGPR("-->xgbe_powerup\n");
48399@@ -935,7 +935,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48400
48401 static int xgbe_start(struct xgbe_prv_data *pdata)
48402 {
48403- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48404+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48405 struct net_device *netdev = pdata->netdev;
48406 int ret;
48407
48408@@ -976,7 +976,7 @@ err_napi:
48409
48410 static void xgbe_stop(struct xgbe_prv_data *pdata)
48411 {
48412- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48413+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48414 struct xgbe_channel *channel;
48415 struct net_device *netdev = pdata->netdev;
48416 struct netdev_queue *txq;
48417@@ -1203,7 +1203,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
48418 return -ERANGE;
48419 }
48420
48421- pdata->hw_if.config_tstamp(pdata, mac_tscr);
48422+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
48423
48424 memcpy(&pdata->tstamp_config, &config, sizeof(config));
48425
48426@@ -1352,7 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
48427 static int xgbe_open(struct net_device *netdev)
48428 {
48429 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48430- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48431+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48432 int ret;
48433
48434 DBGPR("-->xgbe_open\n");
48435@@ -1424,7 +1424,7 @@ err_phy_init:
48436 static int xgbe_close(struct net_device *netdev)
48437 {
48438 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48439- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48440+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48441
48442 DBGPR("-->xgbe_close\n");
48443
48444@@ -1452,8 +1452,8 @@ static int xgbe_close(struct net_device *netdev)
48445 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
48446 {
48447 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48448- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48449- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48450+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48451+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48452 struct xgbe_channel *channel;
48453 struct xgbe_ring *ring;
48454 struct xgbe_packet_data *packet;
48455@@ -1521,7 +1521,7 @@ tx_netdev_return:
48456 static void xgbe_set_rx_mode(struct net_device *netdev)
48457 {
48458 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48459- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48460+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48461 unsigned int pr_mode, am_mode;
48462
48463 DBGPR("-->xgbe_set_rx_mode\n");
48464@@ -1540,7 +1540,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
48465 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
48466 {
48467 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48468- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48469+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48470 struct sockaddr *saddr = addr;
48471
48472 DBGPR("-->xgbe_set_mac_address\n");
48473@@ -1607,7 +1607,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
48474
48475 DBGPR("-->%s\n", __func__);
48476
48477- pdata->hw_if.read_mmc_stats(pdata);
48478+ pdata->hw_if->read_mmc_stats(pdata);
48479
48480 s->rx_packets = pstats->rxframecount_gb;
48481 s->rx_bytes = pstats->rxoctetcount_gb;
48482@@ -1634,7 +1634,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
48483 u16 vid)
48484 {
48485 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48486- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48487+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48488
48489 DBGPR("-->%s\n", __func__);
48490
48491@@ -1650,7 +1650,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
48492 u16 vid)
48493 {
48494 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48495- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48496+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48497
48498 DBGPR("-->%s\n", __func__);
48499
48500@@ -1716,7 +1716,7 @@ static int xgbe_set_features(struct net_device *netdev,
48501 netdev_features_t features)
48502 {
48503 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48504- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48505+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48506 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
48507 int ret = 0;
48508
48509@@ -1781,8 +1781,8 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
48510 static void xgbe_rx_refresh(struct xgbe_channel *channel)
48511 {
48512 struct xgbe_prv_data *pdata = channel->pdata;
48513- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48514- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48515+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48516+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48517 struct xgbe_ring *ring = channel->rx_ring;
48518 struct xgbe_ring_data *rdata;
48519
48520@@ -1835,8 +1835,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
48521 static int xgbe_tx_poll(struct xgbe_channel *channel)
48522 {
48523 struct xgbe_prv_data *pdata = channel->pdata;
48524- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48525- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48526+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48527+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48528 struct xgbe_ring *ring = channel->tx_ring;
48529 struct xgbe_ring_data *rdata;
48530 struct xgbe_ring_desc *rdesc;
48531@@ -1901,7 +1901,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
48532 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
48533 {
48534 struct xgbe_prv_data *pdata = channel->pdata;
48535- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48536+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48537 struct xgbe_ring *ring = channel->rx_ring;
48538 struct xgbe_ring_data *rdata;
48539 struct xgbe_packet_data *packet;
48540diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48541index ebf4893..a8f51c6 100644
48542--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48543+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48544@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
48545
48546 DBGPR("-->%s\n", __func__);
48547
48548- pdata->hw_if.read_mmc_stats(pdata);
48549+ pdata->hw_if->read_mmc_stats(pdata);
48550 for (i = 0; i < XGBE_STATS_COUNT; i++) {
48551 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
48552 *data++ = *(u64 *)stat;
48553@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
48554 struct ethtool_coalesce *ec)
48555 {
48556 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48557- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48558+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48559 unsigned int riwt;
48560
48561 DBGPR("-->xgbe_get_coalesce\n");
48562@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
48563 struct ethtool_coalesce *ec)
48564 {
48565 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48566- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48567+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48568 unsigned int rx_frames, rx_riwt, rx_usecs;
48569 unsigned int tx_frames, tx_usecs;
48570
48571@@ -536,7 +536,7 @@ static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
48572 const u8 *key, const u8 hfunc)
48573 {
48574 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48575- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48576+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48577 unsigned int ret;
48578
48579 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
48580diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48581index 32dd651..225cca3 100644
48582--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48583+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48584@@ -159,12 +159,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
48585 DBGPR("<--xgbe_default_config\n");
48586 }
48587
48588-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
48589-{
48590- xgbe_init_function_ptrs_dev(&pdata->hw_if);
48591- xgbe_init_function_ptrs_desc(&pdata->desc_if);
48592-}
48593-
48594 #ifdef CONFIG_ACPI
48595 static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
48596 {
48597@@ -396,9 +390,8 @@ static int xgbe_probe(struct platform_device *pdev)
48598 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
48599
48600 /* Set all the function pointers */
48601- xgbe_init_all_fptrs(pdata);
48602- hw_if = &pdata->hw_if;
48603- desc_if = &pdata->desc_if;
48604+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
48605+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
48606
48607 /* Issue software reset to device */
48608 hw_if->exit(pdata);
48609diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48610index 59e267f..0842a88 100644
48611--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48612+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48613@@ -126,7 +126,7 @@
48614 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
48615 {
48616 struct xgbe_prv_data *pdata = mii->priv;
48617- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48618+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48619 int mmd_data;
48620
48621 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
48622@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
48623 u16 mmd_val)
48624 {
48625 struct xgbe_prv_data *pdata = mii->priv;
48626- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48627+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48628 int mmd_data = mmd_val;
48629
48630 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
48631diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48632index f326178..8bd7daf 100644
48633--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48634+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48635@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
48636 tstamp_cc);
48637 u64 nsec;
48638
48639- nsec = pdata->hw_if.get_tstamp_time(pdata);
48640+ nsec = pdata->hw_if->get_tstamp_time(pdata);
48641
48642 return nsec;
48643 }
48644@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
48645
48646 spin_lock_irqsave(&pdata->tstamp_lock, flags);
48647
48648- pdata->hw_if.update_tstamp_addend(pdata, addend);
48649+ pdata->hw_if->update_tstamp_addend(pdata, addend);
48650
48651 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
48652
48653diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
48654index 13e8f95..1d8beef 100644
48655--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
48656+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
48657@@ -675,8 +675,8 @@ struct xgbe_prv_data {
48658 int dev_irq;
48659 unsigned int per_channel_irq;
48660
48661- struct xgbe_hw_if hw_if;
48662- struct xgbe_desc_if desc_if;
48663+ struct xgbe_hw_if *hw_if;
48664+ struct xgbe_desc_if *desc_if;
48665
48666 /* AXI DMA settings */
48667 unsigned int coherent;
48668@@ -798,6 +798,9 @@ struct xgbe_prv_data {
48669 #endif
48670 };
48671
48672+extern const struct xgbe_hw_if default_xgbe_hw_if;
48673+extern const struct xgbe_desc_if default_xgbe_desc_if;
48674+
48675 /* Function prototypes*/
48676
48677 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
48678diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48679index adcacda..fa6e0ae 100644
48680--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48681+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48682@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
48683 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
48684 {
48685 /* RX_MODE controlling object */
48686- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
48687+ bnx2x_init_rx_mode_obj(bp);
48688
48689 /* multicast configuration controlling object */
48690 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
48691diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48692index 07cdf9b..b08ecc7 100644
48693--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48694+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48695@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
48696 return rc;
48697 }
48698
48699-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48700- struct bnx2x_rx_mode_obj *o)
48701+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
48702 {
48703 if (CHIP_IS_E1x(bp)) {
48704- o->wait_comp = bnx2x_empty_rx_mode_wait;
48705- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
48706+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
48707+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
48708 } else {
48709- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
48710- o->config_rx_mode = bnx2x_set_rx_mode_e2;
48711+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
48712+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
48713 }
48714 }
48715
48716diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48717index 86baecb..ff3bb46 100644
48718--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48719+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48720@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
48721
48722 /********************* RX MODE ****************/
48723
48724-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48725- struct bnx2x_rx_mode_obj *o);
48726+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
48727
48728 /**
48729 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
48730diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
48731index 31c9f82..e65e986 100644
48732--- a/drivers/net/ethernet/broadcom/tg3.h
48733+++ b/drivers/net/ethernet/broadcom/tg3.h
48734@@ -150,6 +150,7 @@
48735 #define CHIPREV_ID_5750_A0 0x4000
48736 #define CHIPREV_ID_5750_A1 0x4001
48737 #define CHIPREV_ID_5750_A3 0x4003
48738+#define CHIPREV_ID_5750_C1 0x4201
48739 #define CHIPREV_ID_5750_C2 0x4202
48740 #define CHIPREV_ID_5752_A0_HW 0x5000
48741 #define CHIPREV_ID_5752_A0 0x6000
48742diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
48743index 903466e..b285864 100644
48744--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
48745+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
48746@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
48747 }
48748
48749 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
48750- bna_cb_ioceth_enable,
48751- bna_cb_ioceth_disable,
48752- bna_cb_ioceth_hbfail,
48753- bna_cb_ioceth_reset
48754+ .enable_cbfn = bna_cb_ioceth_enable,
48755+ .disable_cbfn = bna_cb_ioceth_disable,
48756+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
48757+ .reset_cbfn = bna_cb_ioceth_reset
48758 };
48759
48760 static void bna_attr_init(struct bna_ioceth *ioceth)
48761diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48762index 8cffcdf..aadf043 100644
48763--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48764+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48765@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
48766 */
48767 struct l2t_skb_cb {
48768 arp_failure_handler_func arp_failure_handler;
48769-};
48770+} __no_const;
48771
48772 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
48773
48774diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48775index d929951..a2c23f5 100644
48776--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48777+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48778@@ -2215,7 +2215,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
48779
48780 int i;
48781 struct adapter *ap = netdev2adap(dev);
48782- static const unsigned int *reg_ranges;
48783+ const unsigned int *reg_ranges;
48784 int arr_size = 0, buf_size = 0;
48785
48786 if (is_t4(ap->params.chip)) {
48787diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
48788index badff18..e15c4ec 100644
48789--- a/drivers/net/ethernet/dec/tulip/de4x5.c
48790+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
48791@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48792 for (i=0; i<ETH_ALEN; i++) {
48793 tmp.addr[i] = dev->dev_addr[i];
48794 }
48795- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48796+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48797 break;
48798
48799 case DE4X5_SET_HWADDR: /* Set the hardware address */
48800@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48801 spin_lock_irqsave(&lp->lock, flags);
48802 memcpy(&statbuf, &lp->pktStats, ioc->len);
48803 spin_unlock_irqrestore(&lp->lock, flags);
48804- if (copy_to_user(ioc->data, &statbuf, ioc->len))
48805+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
48806 return -EFAULT;
48807 break;
48808 }
48809diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
48810index e6b790f..051ba2d 100644
48811--- a/drivers/net/ethernet/emulex/benet/be_main.c
48812+++ b/drivers/net/ethernet/emulex/benet/be_main.c
48813@@ -536,7 +536,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
48814
48815 if (wrapped)
48816 newacc += 65536;
48817- ACCESS_ONCE(*acc) = newacc;
48818+ ACCESS_ONCE_RW(*acc) = newacc;
48819 }
48820
48821 static void populate_erx_stats(struct be_adapter *adapter,
48822diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
48823index 6d0c5d5..55be363 100644
48824--- a/drivers/net/ethernet/faraday/ftgmac100.c
48825+++ b/drivers/net/ethernet/faraday/ftgmac100.c
48826@@ -30,6 +30,8 @@
48827 #include <linux/netdevice.h>
48828 #include <linux/phy.h>
48829 #include <linux/platform_device.h>
48830+#include <linux/interrupt.h>
48831+#include <linux/irqreturn.h>
48832 #include <net/ip.h>
48833
48834 #include "ftgmac100.h"
48835diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
48836index dce5f7b..2433466 100644
48837--- a/drivers/net/ethernet/faraday/ftmac100.c
48838+++ b/drivers/net/ethernet/faraday/ftmac100.c
48839@@ -31,6 +31,8 @@
48840 #include <linux/module.h>
48841 #include <linux/netdevice.h>
48842 #include <linux/platform_device.h>
48843+#include <linux/interrupt.h>
48844+#include <linux/irqreturn.h>
48845
48846 #include "ftmac100.h"
48847
48848diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48849index fabcfa1..188fd22 100644
48850--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48851+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48852@@ -419,7 +419,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
48853 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
48854
48855 /* Update the base adjustement value. */
48856- ACCESS_ONCE(pf->ptp_base_adj) = incval;
48857+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
48858 smp_mb(); /* Force the above update. */
48859 }
48860
48861diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48862index 79c00f5..8da39f6 100644
48863--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48864+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48865@@ -785,7 +785,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
48866 }
48867
48868 /* update the base incval used to calculate frequency adjustment */
48869- ACCESS_ONCE(adapter->base_incval) = incval;
48870+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
48871 smp_mb();
48872
48873 /* need lock to prevent incorrect read while modifying cyclecounter */
48874diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48875index 8c234ec..757331f 100644
48876--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48877+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48878@@ -468,8 +468,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
48879 wmb();
48880
48881 /* we want to dirty this cache line once */
48882- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
48883- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
48884+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
48885+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
48886
48887 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
48888
48889diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48890index 6223930..975033d 100644
48891--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
48892+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48893@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48894 struct __vxge_hw_fifo *fifo;
48895 struct vxge_hw_fifo_config *config;
48896 u32 txdl_size, txdl_per_memblock;
48897- struct vxge_hw_mempool_cbs fifo_mp_callback;
48898+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
48899+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
48900+ };
48901+
48902 struct __vxge_hw_virtualpath *vpath;
48903
48904 if ((vp == NULL) || (attr == NULL)) {
48905@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48906 goto exit;
48907 }
48908
48909- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
48910-
48911 fifo->mempool =
48912 __vxge_hw_mempool_create(vpath->hldev,
48913 fifo->config->memblock_size,
48914diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48915index 2bb48d5..d1a865d 100644
48916--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48917+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48918@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
48919 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
48920 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
48921 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
48922- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48923+ pax_open_kernel();
48924+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48925+ pax_close_kernel();
48926 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48927 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
48928 max_tx_rings = QLCNIC_MAX_TX_RINGS;
48929diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48930index be7d7a6..a8983f8 100644
48931--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48932+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48933@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
48934 case QLCNIC_NON_PRIV_FUNC:
48935 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
48936 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48937- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48938+ pax_open_kernel();
48939+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48940+ pax_close_kernel();
48941 break;
48942 case QLCNIC_PRIV_FUNC:
48943 ahw->op_mode = QLCNIC_PRIV_FUNC;
48944 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
48945- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48946+ pax_open_kernel();
48947+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48948+ pax_close_kernel();
48949 break;
48950 case QLCNIC_MGMT_FUNC:
48951 ahw->op_mode = QLCNIC_MGMT_FUNC;
48952 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48953- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48954+ pax_open_kernel();
48955+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48956+ pax_close_kernel();
48957 break;
48958 default:
48959 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
48960diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48961index 332bb8a..e6adcd1 100644
48962--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48963+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48964@@ -1285,7 +1285,7 @@ flash_temp:
48965 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
48966 {
48967 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
48968- static const struct qlcnic_dump_operations *fw_dump_ops;
48969+ const struct qlcnic_dump_operations *fw_dump_ops;
48970 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
48971 u32 entry_offset, dump, no_entries, buf_offset = 0;
48972 int i, k, ops_cnt, ops_index, dump_size = 0;
48973diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
48974index c70ab40..00b28e0 100644
48975--- a/drivers/net/ethernet/realtek/r8169.c
48976+++ b/drivers/net/ethernet/realtek/r8169.c
48977@@ -788,22 +788,22 @@ struct rtl8169_private {
48978 struct mdio_ops {
48979 void (*write)(struct rtl8169_private *, int, int);
48980 int (*read)(struct rtl8169_private *, int);
48981- } mdio_ops;
48982+ } __no_const mdio_ops;
48983
48984 struct pll_power_ops {
48985 void (*down)(struct rtl8169_private *);
48986 void (*up)(struct rtl8169_private *);
48987- } pll_power_ops;
48988+ } __no_const pll_power_ops;
48989
48990 struct jumbo_ops {
48991 void (*enable)(struct rtl8169_private *);
48992 void (*disable)(struct rtl8169_private *);
48993- } jumbo_ops;
48994+ } __no_const jumbo_ops;
48995
48996 struct csi_ops {
48997 void (*write)(struct rtl8169_private *, int, int);
48998 u32 (*read)(struct rtl8169_private *, int);
48999- } csi_ops;
49000+ } __no_const csi_ops;
49001
49002 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
49003 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
49004diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
49005index 6b861e3..204ac86 100644
49006--- a/drivers/net/ethernet/sfc/ptp.c
49007+++ b/drivers/net/ethernet/sfc/ptp.c
49008@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
49009 ptp->start.dma_addr);
49010
49011 /* Clear flag that signals MC ready */
49012- ACCESS_ONCE(*start) = 0;
49013+ ACCESS_ONCE_RW(*start) = 0;
49014 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
49015 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
49016 EFX_BUG_ON_PARANOID(rc);
49017diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
49018index 10b6173..b605dfd5 100644
49019--- a/drivers/net/ethernet/sfc/selftest.c
49020+++ b/drivers/net/ethernet/sfc/selftest.c
49021@@ -46,7 +46,7 @@ struct efx_loopback_payload {
49022 struct iphdr ip;
49023 struct udphdr udp;
49024 __be16 iteration;
49025- const char msg[64];
49026+ char msg[64];
49027 } __packed;
49028
49029 /* Loopback test source MAC address */
49030diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49031index 08c483b..2c4a553 100644
49032--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49033+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49034@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
49035
49036 writel(value, ioaddr + MMC_CNTRL);
49037
49038- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49039- MMC_CNTRL, value);
49040+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49041+// MMC_CNTRL, value);
49042 }
49043
49044 /* To mask all all interrupts.*/
49045diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
49046index 17e2766..c332f1e 100644
49047--- a/drivers/net/ethernet/via/via-rhine.c
49048+++ b/drivers/net/ethernet/via/via-rhine.c
49049@@ -2514,7 +2514,7 @@ static struct platform_driver rhine_driver_platform = {
49050 }
49051 };
49052
49053-static struct dmi_system_id rhine_dmi_table[] __initdata = {
49054+static const struct dmi_system_id rhine_dmi_table[] __initconst = {
49055 {
49056 .ident = "EPIA-M",
49057 .matches = {
49058diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
49059index 384ca4f..dd7d4f9 100644
49060--- a/drivers/net/hyperv/hyperv_net.h
49061+++ b/drivers/net/hyperv/hyperv_net.h
49062@@ -171,7 +171,7 @@ struct rndis_device {
49063 enum rndis_device_state state;
49064 bool link_state;
49065 bool link_change;
49066- atomic_t new_req_id;
49067+ atomic_unchecked_t new_req_id;
49068
49069 spinlock_t request_lock;
49070 struct list_head req_list;
49071diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
49072index 7816d98..7890614 100644
49073--- a/drivers/net/hyperv/rndis_filter.c
49074+++ b/drivers/net/hyperv/rndis_filter.c
49075@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
49076 * template
49077 */
49078 set = &rndis_msg->msg.set_req;
49079- set->req_id = atomic_inc_return(&dev->new_req_id);
49080+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49081
49082 /* Add to the request list */
49083 spin_lock_irqsave(&dev->request_lock, flags);
49084@@ -918,7 +918,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
49085
49086 /* Setup the rndis set */
49087 halt = &request->request_msg.msg.halt_req;
49088- halt->req_id = atomic_inc_return(&dev->new_req_id);
49089+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49090
49091 /* Ignore return since this msg is optional. */
49092 rndis_filter_send_request(dev, request);
49093diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
49094index 34f846b..4a0d5b1 100644
49095--- a/drivers/net/ifb.c
49096+++ b/drivers/net/ifb.c
49097@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
49098 return 0;
49099 }
49100
49101-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
49102+static struct rtnl_link_ops ifb_link_ops = {
49103 .kind = "ifb",
49104 .priv_size = sizeof(struct ifb_private),
49105 .setup = ifb_setup,
49106diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
49107index 1df38bd..4bc20b0 100644
49108--- a/drivers/net/macvlan.c
49109+++ b/drivers/net/macvlan.c
49110@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
49111 free_nskb:
49112 kfree_skb(nskb);
49113 err:
49114- atomic_long_inc(&skb->dev->rx_dropped);
49115+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
49116 }
49117
49118 static void macvlan_flush_sources(struct macvlan_port *port,
49119@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
49120 int macvlan_link_register(struct rtnl_link_ops *ops)
49121 {
49122 /* common fields */
49123- ops->priv_size = sizeof(struct macvlan_dev);
49124- ops->validate = macvlan_validate;
49125- ops->maxtype = IFLA_MACVLAN_MAX;
49126- ops->policy = macvlan_policy;
49127- ops->changelink = macvlan_changelink;
49128- ops->get_size = macvlan_get_size;
49129- ops->fill_info = macvlan_fill_info;
49130+ pax_open_kernel();
49131+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
49132+ *(void **)&ops->validate = macvlan_validate;
49133+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
49134+ *(const void **)&ops->policy = macvlan_policy;
49135+ *(void **)&ops->changelink = macvlan_changelink;
49136+ *(void **)&ops->get_size = macvlan_get_size;
49137+ *(void **)&ops->fill_info = macvlan_fill_info;
49138+ pax_close_kernel();
49139
49140 return rtnl_link_register(ops);
49141 };
49142@@ -1551,7 +1553,7 @@ static int macvlan_device_event(struct notifier_block *unused,
49143 return NOTIFY_DONE;
49144 }
49145
49146-static struct notifier_block macvlan_notifier_block __read_mostly = {
49147+static struct notifier_block macvlan_notifier_block = {
49148 .notifier_call = macvlan_device_event,
49149 };
49150
49151diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
49152index 27ecc5c..f636328 100644
49153--- a/drivers/net/macvtap.c
49154+++ b/drivers/net/macvtap.c
49155@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
49156 dev->tx_queue_len = TUN_READQ_SIZE;
49157 }
49158
49159-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
49160+static struct rtnl_link_ops macvtap_link_ops = {
49161 .kind = "macvtap",
49162 .setup = macvtap_setup,
49163 .newlink = macvtap_newlink,
49164@@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
49165
49166 ret = 0;
49167 u = q->flags;
49168- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49169+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49170 put_user(u, &ifr->ifr_flags))
49171 ret = -EFAULT;
49172 macvtap_put_vlan(vlan);
49173@@ -1217,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
49174 return NOTIFY_DONE;
49175 }
49176
49177-static struct notifier_block macvtap_notifier_block __read_mostly = {
49178+static struct notifier_block macvtap_notifier_block = {
49179 .notifier_call = macvtap_device_event,
49180 };
49181
49182diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
49183index 34924df..a747360 100644
49184--- a/drivers/net/nlmon.c
49185+++ b/drivers/net/nlmon.c
49186@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
49187 return 0;
49188 }
49189
49190-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
49191+static struct rtnl_link_ops nlmon_link_ops = {
49192 .kind = "nlmon",
49193 .priv_size = sizeof(struct nlmon),
49194 .setup = nlmon_setup,
49195diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
49196index bdfe51f..e7845c7 100644
49197--- a/drivers/net/phy/phy_device.c
49198+++ b/drivers/net/phy/phy_device.c
49199@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
49200 * zero on success.
49201 *
49202 */
49203-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
49204+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
49205 struct phy_c45_device_ids *c45_ids) {
49206 int phy_reg;
49207 int i, reg_addr;
49208@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
49209 * its return value is in turn returned.
49210 *
49211 */
49212-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
49213+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
49214 bool is_c45, struct phy_c45_device_ids *c45_ids)
49215 {
49216 int phy_reg;
49217@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
49218 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
49219 {
49220 struct phy_c45_device_ids c45_ids = {0};
49221- u32 phy_id = 0;
49222+ int phy_id = 0;
49223 int r;
49224
49225 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
49226diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
49227index 9d15566..5ad4ef6 100644
49228--- a/drivers/net/ppp/ppp_generic.c
49229+++ b/drivers/net/ppp/ppp_generic.c
49230@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49231 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
49232 struct ppp_stats stats;
49233 struct ppp_comp_stats cstats;
49234- char *vers;
49235
49236 switch (cmd) {
49237 case SIOCGPPPSTATS:
49238@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49239 break;
49240
49241 case SIOCGPPPVER:
49242- vers = PPP_VERSION;
49243- if (copy_to_user(addr, vers, strlen(vers) + 1))
49244+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
49245 break;
49246 err = 0;
49247 break;
49248diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
49249index 079f7ad..b2a2bfa7 100644
49250--- a/drivers/net/slip/slhc.c
49251+++ b/drivers/net/slip/slhc.c
49252@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
49253 register struct tcphdr *thp;
49254 register struct iphdr *ip;
49255 register struct cstate *cs;
49256- int len, hdrlen;
49257+ long len, hdrlen;
49258 unsigned char *cp = icp;
49259
49260 /* We've got a compressed packet; read the change byte */
49261diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
49262index 7d39484..d58499d 100644
49263--- a/drivers/net/team/team.c
49264+++ b/drivers/net/team/team.c
49265@@ -2099,7 +2099,7 @@ static unsigned int team_get_num_rx_queues(void)
49266 return TEAM_DEFAULT_NUM_RX_QUEUES;
49267 }
49268
49269-static struct rtnl_link_ops team_link_ops __read_mostly = {
49270+static struct rtnl_link_ops team_link_ops = {
49271 .kind = DRV_NAME,
49272 .priv_size = sizeof(struct team),
49273 .setup = team_setup,
49274@@ -2889,7 +2889,7 @@ static int team_device_event(struct notifier_block *unused,
49275 return NOTIFY_DONE;
49276 }
49277
49278-static struct notifier_block team_notifier_block __read_mostly = {
49279+static struct notifier_block team_notifier_block = {
49280 .notifier_call = team_device_event,
49281 };
49282
49283diff --git a/drivers/net/tun.c b/drivers/net/tun.c
49284index 857dca4..642f532 100644
49285--- a/drivers/net/tun.c
49286+++ b/drivers/net/tun.c
49287@@ -1421,7 +1421,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
49288 return -EINVAL;
49289 }
49290
49291-static struct rtnl_link_ops tun_link_ops __read_mostly = {
49292+static struct rtnl_link_ops tun_link_ops = {
49293 .kind = DRV_NAME,
49294 .priv_size = sizeof(struct tun_struct),
49295 .setup = tun_setup,
49296@@ -1830,7 +1830,7 @@ unlock:
49297 }
49298
49299 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49300- unsigned long arg, int ifreq_len)
49301+ unsigned long arg, size_t ifreq_len)
49302 {
49303 struct tun_file *tfile = file->private_data;
49304 struct tun_struct *tun;
49305@@ -1844,6 +1844,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49306 int le;
49307 int ret;
49308
49309+ if (ifreq_len > sizeof ifr)
49310+ return -EFAULT;
49311+
49312 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
49313 if (copy_from_user(&ifr, argp, ifreq_len))
49314 return -EFAULT;
49315diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
49316index 778e915..58c4d95 100644
49317--- a/drivers/net/usb/hso.c
49318+++ b/drivers/net/usb/hso.c
49319@@ -70,7 +70,7 @@
49320 #include <asm/byteorder.h>
49321 #include <linux/serial_core.h>
49322 #include <linux/serial.h>
49323-
49324+#include <asm/local.h>
49325
49326 #define MOD_AUTHOR "Option Wireless"
49327 #define MOD_DESCRIPTION "USB High Speed Option driver"
49328@@ -1183,7 +1183,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
49329 struct urb *urb;
49330
49331 urb = serial->rx_urb[0];
49332- if (serial->port.count > 0) {
49333+ if (atomic_read(&serial->port.count) > 0) {
49334 count = put_rxbuf_data(urb, serial);
49335 if (count == -1)
49336 return;
49337@@ -1221,7 +1221,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
49338 DUMP1(urb->transfer_buffer, urb->actual_length);
49339
49340 /* Anyone listening? */
49341- if (serial->port.count == 0)
49342+ if (atomic_read(&serial->port.count) == 0)
49343 return;
49344
49345 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
49346@@ -1282,8 +1282,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49347 tty_port_tty_set(&serial->port, tty);
49348
49349 /* check for port already opened, if not set the termios */
49350- serial->port.count++;
49351- if (serial->port.count == 1) {
49352+ if (atomic_inc_return(&serial->port.count) == 1) {
49353 serial->rx_state = RX_IDLE;
49354 /* Force default termio settings */
49355 _hso_serial_set_termios(tty, NULL);
49356@@ -1293,7 +1292,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49357 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
49358 if (result) {
49359 hso_stop_serial_device(serial->parent);
49360- serial->port.count--;
49361+ atomic_dec(&serial->port.count);
49362 } else {
49363 kref_get(&serial->parent->ref);
49364 }
49365@@ -1331,10 +1330,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
49366
49367 /* reset the rts and dtr */
49368 /* do the actual close */
49369- serial->port.count--;
49370+ atomic_dec(&serial->port.count);
49371
49372- if (serial->port.count <= 0) {
49373- serial->port.count = 0;
49374+ if (atomic_read(&serial->port.count) <= 0) {
49375+ atomic_set(&serial->port.count, 0);
49376 tty_port_tty_set(&serial->port, NULL);
49377 if (!usb_gone)
49378 hso_stop_serial_device(serial->parent);
49379@@ -1417,7 +1416,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
49380
49381 /* the actual setup */
49382 spin_lock_irqsave(&serial->serial_lock, flags);
49383- if (serial->port.count)
49384+ if (atomic_read(&serial->port.count))
49385 _hso_serial_set_termios(tty, old);
49386 else
49387 tty->termios = *old;
49388@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
49389 D1("Pending read interrupt on port %d\n", i);
49390 spin_lock(&serial->serial_lock);
49391 if (serial->rx_state == RX_IDLE &&
49392- serial->port.count > 0) {
49393+ atomic_read(&serial->port.count) > 0) {
49394 /* Setup and send a ctrl req read on
49395 * port i */
49396 if (!serial->rx_urb_filled[0]) {
49397@@ -3053,7 +3052,7 @@ static int hso_resume(struct usb_interface *iface)
49398 /* Start all serial ports */
49399 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
49400 if (serial_table[i] && (serial_table[i]->interface == iface)) {
49401- if (dev2ser(serial_table[i])->port.count) {
49402+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
49403 result =
49404 hso_start_serial_device(serial_table[i], GFP_NOIO);
49405 hso_kick_transmit(dev2ser(serial_table[i]));
49406diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
49407index 9f7c0ab..1577b4a 100644
49408--- a/drivers/net/usb/r8152.c
49409+++ b/drivers/net/usb/r8152.c
49410@@ -601,7 +601,7 @@ struct r8152 {
49411 void (*unload)(struct r8152 *);
49412 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
49413 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
49414- } rtl_ops;
49415+ } __no_const rtl_ops;
49416
49417 int intr_interval;
49418 u32 saved_wolopts;
49419diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
49420index a2515887..6d13233 100644
49421--- a/drivers/net/usb/sierra_net.c
49422+++ b/drivers/net/usb/sierra_net.c
49423@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
49424 /* atomic counter partially included in MAC address to make sure 2 devices
49425 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
49426 */
49427-static atomic_t iface_counter = ATOMIC_INIT(0);
49428+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
49429
49430 /*
49431 * SYNC Timer Delay definition used to set the expiry time
49432@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
49433 dev->net->netdev_ops = &sierra_net_device_ops;
49434
49435 /* change MAC addr to include, ifacenum, and to be unique */
49436- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
49437+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
49438 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
49439
49440 /* we will have to manufacture ethernet headers, prepare template */
49441diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
49442index 777757a..395a767 100644
49443--- a/drivers/net/usb/usbnet.c
49444+++ b/drivers/net/usb/usbnet.c
49445@@ -1285,7 +1285,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
49446 struct net_device *net)
49447 {
49448 struct usbnet *dev = netdev_priv(net);
49449- int length;
49450+ unsigned int length;
49451 struct urb *urb = NULL;
49452 struct skb_data *entry;
49453 struct driver_info *info = dev->driver_info;
49454@@ -1413,7 +1413,7 @@ not_drop:
49455 }
49456 } else
49457 netif_dbg(dev, tx_queued, dev->net,
49458- "> tx, len %d, type 0x%x\n", length, skb->protocol);
49459+ "> tx, len %u, type 0x%x\n", length, skb->protocol);
49460 #ifdef CONFIG_PM
49461 deferred:
49462 #endif
49463diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
49464index 59b0e97..a6ed579 100644
49465--- a/drivers/net/virtio_net.c
49466+++ b/drivers/net/virtio_net.c
49467@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
49468 #define RECEIVE_AVG_WEIGHT 64
49469
49470 /* Minimum alignment for mergeable packet buffers. */
49471-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
49472+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
49473
49474 #define VIRTNET_DRIVER_VERSION "1.0.0"
49475
49476diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
49477index fceb637..37c70fd 100644
49478--- a/drivers/net/vxlan.c
49479+++ b/drivers/net/vxlan.c
49480@@ -2935,7 +2935,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
49481 return vxlan->net;
49482 }
49483
49484-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
49485+static struct rtnl_link_ops vxlan_link_ops = {
49486 .kind = "vxlan",
49487 .maxtype = IFLA_VXLAN_MAX,
49488 .policy = vxlan_policy,
49489@@ -2983,7 +2983,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
49490 return NOTIFY_DONE;
49491 }
49492
49493-static struct notifier_block vxlan_notifier_block __read_mostly = {
49494+static struct notifier_block vxlan_notifier_block = {
49495 .notifier_call = vxlan_lowerdev_event,
49496 };
49497
49498diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
49499index 5920c99..ff2e4a5 100644
49500--- a/drivers/net/wan/lmc/lmc_media.c
49501+++ b/drivers/net/wan/lmc/lmc_media.c
49502@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
49503 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
49504
49505 lmc_media_t lmc_ds3_media = {
49506- lmc_ds3_init, /* special media init stuff */
49507- lmc_ds3_default, /* reset to default state */
49508- lmc_ds3_set_status, /* reset status to state provided */
49509- lmc_dummy_set_1, /* set clock source */
49510- lmc_dummy_set2_1, /* set line speed */
49511- lmc_ds3_set_100ft, /* set cable length */
49512- lmc_ds3_set_scram, /* set scrambler */
49513- lmc_ds3_get_link_status, /* get link status */
49514- lmc_dummy_set_1, /* set link status */
49515- lmc_ds3_set_crc_length, /* set CRC length */
49516- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49517- lmc_ds3_watchdog
49518+ .init = lmc_ds3_init, /* special media init stuff */
49519+ .defaults = lmc_ds3_default, /* reset to default state */
49520+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
49521+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
49522+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49523+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
49524+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
49525+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
49526+ .set_link_status = lmc_dummy_set_1, /* set link status */
49527+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
49528+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49529+ .watchdog = lmc_ds3_watchdog
49530 };
49531
49532 lmc_media_t lmc_hssi_media = {
49533- lmc_hssi_init, /* special media init stuff */
49534- lmc_hssi_default, /* reset to default state */
49535- lmc_hssi_set_status, /* reset status to state provided */
49536- lmc_hssi_set_clock, /* set clock source */
49537- lmc_dummy_set2_1, /* set line speed */
49538- lmc_dummy_set_1, /* set cable length */
49539- lmc_dummy_set_1, /* set scrambler */
49540- lmc_hssi_get_link_status, /* get link status */
49541- lmc_hssi_set_link_status, /* set link status */
49542- lmc_hssi_set_crc_length, /* set CRC length */
49543- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49544- lmc_hssi_watchdog
49545+ .init = lmc_hssi_init, /* special media init stuff */
49546+ .defaults = lmc_hssi_default, /* reset to default state */
49547+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
49548+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
49549+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49550+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49551+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49552+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
49553+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
49554+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
49555+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49556+ .watchdog = lmc_hssi_watchdog
49557 };
49558
49559-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
49560- lmc_ssi_default, /* reset to default state */
49561- lmc_ssi_set_status, /* reset status to state provided */
49562- lmc_ssi_set_clock, /* set clock source */
49563- lmc_ssi_set_speed, /* set line speed */
49564- lmc_dummy_set_1, /* set cable length */
49565- lmc_dummy_set_1, /* set scrambler */
49566- lmc_ssi_get_link_status, /* get link status */
49567- lmc_ssi_set_link_status, /* set link status */
49568- lmc_ssi_set_crc_length, /* set CRC length */
49569- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49570- lmc_ssi_watchdog
49571+lmc_media_t lmc_ssi_media = {
49572+ .init = lmc_ssi_init, /* special media init stuff */
49573+ .defaults = lmc_ssi_default, /* reset to default state */
49574+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
49575+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
49576+ .set_speed = lmc_ssi_set_speed, /* set line speed */
49577+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49578+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49579+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
49580+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
49581+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
49582+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49583+ .watchdog = lmc_ssi_watchdog
49584 };
49585
49586 lmc_media_t lmc_t1_media = {
49587- lmc_t1_init, /* special media init stuff */
49588- lmc_t1_default, /* reset to default state */
49589- lmc_t1_set_status, /* reset status to state provided */
49590- lmc_t1_set_clock, /* set clock source */
49591- lmc_dummy_set2_1, /* set line speed */
49592- lmc_dummy_set_1, /* set cable length */
49593- lmc_dummy_set_1, /* set scrambler */
49594- lmc_t1_get_link_status, /* get link status */
49595- lmc_dummy_set_1, /* set link status */
49596- lmc_t1_set_crc_length, /* set CRC length */
49597- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49598- lmc_t1_watchdog
49599+ .init = lmc_t1_init, /* special media init stuff */
49600+ .defaults = lmc_t1_default, /* reset to default state */
49601+ .set_status = lmc_t1_set_status, /* reset status to state provided */
49602+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
49603+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49604+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49605+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49606+ .get_link_status = lmc_t1_get_link_status, /* get link status */
49607+ .set_link_status = lmc_dummy_set_1, /* set link status */
49608+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
49609+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49610+ .watchdog = lmc_t1_watchdog
49611 };
49612
49613 static void
49614diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
49615index feacc3b..5bac0de 100644
49616--- a/drivers/net/wan/z85230.c
49617+++ b/drivers/net/wan/z85230.c
49618@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
49619
49620 struct z8530_irqhandler z8530_sync =
49621 {
49622- z8530_rx,
49623- z8530_tx,
49624- z8530_status
49625+ .rx = z8530_rx,
49626+ .tx = z8530_tx,
49627+ .status = z8530_status
49628 };
49629
49630 EXPORT_SYMBOL(z8530_sync);
49631@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
49632 }
49633
49634 static struct z8530_irqhandler z8530_dma_sync = {
49635- z8530_dma_rx,
49636- z8530_dma_tx,
49637- z8530_dma_status
49638+ .rx = z8530_dma_rx,
49639+ .tx = z8530_dma_tx,
49640+ .status = z8530_dma_status
49641 };
49642
49643 static struct z8530_irqhandler z8530_txdma_sync = {
49644- z8530_rx,
49645- z8530_dma_tx,
49646- z8530_dma_status
49647+ .rx = z8530_rx,
49648+ .tx = z8530_dma_tx,
49649+ .status = z8530_dma_status
49650 };
49651
49652 /**
49653@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
49654
49655 struct z8530_irqhandler z8530_nop=
49656 {
49657- z8530_rx_clear,
49658- z8530_tx_clear,
49659- z8530_status_clear
49660+ .rx = z8530_rx_clear,
49661+ .tx = z8530_tx_clear,
49662+ .status = z8530_status_clear
49663 };
49664
49665
49666diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
49667index 0b60295..b8bfa5b 100644
49668--- a/drivers/net/wimax/i2400m/rx.c
49669+++ b/drivers/net/wimax/i2400m/rx.c
49670@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
49671 if (i2400m->rx_roq == NULL)
49672 goto error_roq_alloc;
49673
49674- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
49675+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
49676 GFP_KERNEL);
49677 if (rd == NULL) {
49678 result = -ENOMEM;
49679diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
49680index e71a2ce..2268d61 100644
49681--- a/drivers/net/wireless/airo.c
49682+++ b/drivers/net/wireless/airo.c
49683@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
49684 struct airo_info *ai = dev->ml_priv;
49685 int ridcode;
49686 int enabled;
49687- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49688+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49689 unsigned char *iobuf;
49690
49691 /* Only super-user can write RIDs */
49692diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
49693index da92bfa..5a9001a 100644
49694--- a/drivers/net/wireless/at76c50x-usb.c
49695+++ b/drivers/net/wireless/at76c50x-usb.c
49696@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
49697 }
49698
49699 /* Convert timeout from the DFU status to jiffies */
49700-static inline unsigned long at76_get_timeout(struct dfu_status *s)
49701+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
49702 {
49703 return msecs_to_jiffies((s->poll_timeout[2] << 16)
49704 | (s->poll_timeout[1] << 8)
49705diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
49706index 2fd9e18..3f55bdd 100644
49707--- a/drivers/net/wireless/ath/ath10k/htc.c
49708+++ b/drivers/net/wireless/ath/ath10k/htc.c
49709@@ -849,7 +849,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
49710 /* registered target arrival callback from the HIF layer */
49711 int ath10k_htc_init(struct ath10k *ar)
49712 {
49713- struct ath10k_hif_cb htc_callbacks;
49714+ static struct ath10k_hif_cb htc_callbacks = {
49715+ .rx_completion = ath10k_htc_rx_completion_handler,
49716+ .tx_completion = ath10k_htc_tx_completion_handler,
49717+ };
49718 struct ath10k_htc_ep *ep = NULL;
49719 struct ath10k_htc *htc = &ar->htc;
49720
49721@@ -858,8 +861,6 @@ int ath10k_htc_init(struct ath10k *ar)
49722 ath10k_htc_reset_endpoint_states(htc);
49723
49724 /* setup HIF layer callbacks */
49725- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
49726- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
49727 htc->ar = ar;
49728
49729 /* Get HIF default pipe for HTC message exchange */
49730diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
49731index 527179c..a890150 100644
49732--- a/drivers/net/wireless/ath/ath10k/htc.h
49733+++ b/drivers/net/wireless/ath/ath10k/htc.h
49734@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
49735
49736 struct ath10k_htc_ops {
49737 void (*target_send_suspend_complete)(struct ath10k *ar);
49738-};
49739+} __no_const;
49740
49741 struct ath10k_htc_ep_ops {
49742 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
49743 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
49744 void (*ep_tx_credits)(struct ath10k *);
49745-};
49746+} __no_const;
49747
49748 /* service connection information */
49749 struct ath10k_htc_svc_conn_req {
49750diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49751index f816909..e56cd8b 100644
49752--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49753+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49754@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49755 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
49756 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
49757
49758- ACCESS_ONCE(ads->ds_link) = i->link;
49759- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
49760+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
49761+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
49762
49763 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
49764 ctl6 = SM(i->keytype, AR_EncrType);
49765@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49766
49767 if ((i->is_first || i->is_last) &&
49768 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
49769- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
49770+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
49771 | set11nTries(i->rates, 1)
49772 | set11nTries(i->rates, 2)
49773 | set11nTries(i->rates, 3)
49774 | (i->dur_update ? AR_DurUpdateEna : 0)
49775 | SM(0, AR_BurstDur);
49776
49777- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
49778+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
49779 | set11nRate(i->rates, 1)
49780 | set11nRate(i->rates, 2)
49781 | set11nRate(i->rates, 3);
49782 } else {
49783- ACCESS_ONCE(ads->ds_ctl2) = 0;
49784- ACCESS_ONCE(ads->ds_ctl3) = 0;
49785+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
49786+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
49787 }
49788
49789 if (!i->is_first) {
49790- ACCESS_ONCE(ads->ds_ctl0) = 0;
49791- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
49792- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
49793+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
49794+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
49795+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
49796 return;
49797 }
49798
49799@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49800 break;
49801 }
49802
49803- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
49804+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
49805 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49806 | SM(i->txpower[0], AR_XmitPower0)
49807 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49808@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49809 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
49810 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
49811
49812- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
49813- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
49814+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
49815+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
49816
49817 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
49818 return;
49819
49820- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
49821+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
49822 | set11nPktDurRTSCTS(i->rates, 1);
49823
49824- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
49825+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
49826 | set11nPktDurRTSCTS(i->rates, 3);
49827
49828- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
49829+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
49830 | set11nRateFlags(i->rates, 1)
49831 | set11nRateFlags(i->rates, 2)
49832 | set11nRateFlags(i->rates, 3)
49833 | SM(i->rtscts_rate, AR_RTSCTSRate);
49834
49835- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
49836- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
49837- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
49838+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
49839+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
49840+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
49841 }
49842
49843 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
49844diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49845index da84b70..83e4978 100644
49846--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49847+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49848@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49849 (i->qcu << AR_TxQcuNum_S) | desc_len;
49850
49851 checksum += val;
49852- ACCESS_ONCE(ads->info) = val;
49853+ ACCESS_ONCE_RW(ads->info) = val;
49854
49855 checksum += i->link;
49856- ACCESS_ONCE(ads->link) = i->link;
49857+ ACCESS_ONCE_RW(ads->link) = i->link;
49858
49859 checksum += i->buf_addr[0];
49860- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
49861+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
49862 checksum += i->buf_addr[1];
49863- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
49864+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
49865 checksum += i->buf_addr[2];
49866- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
49867+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
49868 checksum += i->buf_addr[3];
49869- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
49870+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
49871
49872 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
49873- ACCESS_ONCE(ads->ctl3) = val;
49874+ ACCESS_ONCE_RW(ads->ctl3) = val;
49875 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
49876- ACCESS_ONCE(ads->ctl5) = val;
49877+ ACCESS_ONCE_RW(ads->ctl5) = val;
49878 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
49879- ACCESS_ONCE(ads->ctl7) = val;
49880+ ACCESS_ONCE_RW(ads->ctl7) = val;
49881 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
49882- ACCESS_ONCE(ads->ctl9) = val;
49883+ ACCESS_ONCE_RW(ads->ctl9) = val;
49884
49885 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
49886- ACCESS_ONCE(ads->ctl10) = checksum;
49887+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
49888
49889 if (i->is_first || i->is_last) {
49890- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
49891+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
49892 | set11nTries(i->rates, 1)
49893 | set11nTries(i->rates, 2)
49894 | set11nTries(i->rates, 3)
49895 | (i->dur_update ? AR_DurUpdateEna : 0)
49896 | SM(0, AR_BurstDur);
49897
49898- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
49899+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
49900 | set11nRate(i->rates, 1)
49901 | set11nRate(i->rates, 2)
49902 | set11nRate(i->rates, 3);
49903 } else {
49904- ACCESS_ONCE(ads->ctl13) = 0;
49905- ACCESS_ONCE(ads->ctl14) = 0;
49906+ ACCESS_ONCE_RW(ads->ctl13) = 0;
49907+ ACCESS_ONCE_RW(ads->ctl14) = 0;
49908 }
49909
49910 ads->ctl20 = 0;
49911@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49912
49913 ctl17 = SM(i->keytype, AR_EncrType);
49914 if (!i->is_first) {
49915- ACCESS_ONCE(ads->ctl11) = 0;
49916- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49917- ACCESS_ONCE(ads->ctl15) = 0;
49918- ACCESS_ONCE(ads->ctl16) = 0;
49919- ACCESS_ONCE(ads->ctl17) = ctl17;
49920- ACCESS_ONCE(ads->ctl18) = 0;
49921- ACCESS_ONCE(ads->ctl19) = 0;
49922+ ACCESS_ONCE_RW(ads->ctl11) = 0;
49923+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49924+ ACCESS_ONCE_RW(ads->ctl15) = 0;
49925+ ACCESS_ONCE_RW(ads->ctl16) = 0;
49926+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49927+ ACCESS_ONCE_RW(ads->ctl18) = 0;
49928+ ACCESS_ONCE_RW(ads->ctl19) = 0;
49929 return;
49930 }
49931
49932- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49933+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49934 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49935 | SM(i->txpower[0], AR_XmitPower0)
49936 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49937@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49938 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
49939 ctl12 |= SM(val, AR_PAPRDChainMask);
49940
49941- ACCESS_ONCE(ads->ctl12) = ctl12;
49942- ACCESS_ONCE(ads->ctl17) = ctl17;
49943+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
49944+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49945
49946- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49947+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49948 | set11nPktDurRTSCTS(i->rates, 1);
49949
49950- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49951+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49952 | set11nPktDurRTSCTS(i->rates, 3);
49953
49954- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
49955+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
49956 | set11nRateFlags(i->rates, 1)
49957 | set11nRateFlags(i->rates, 2)
49958 | set11nRateFlags(i->rates, 3)
49959 | SM(i->rtscts_rate, AR_RTSCTSRate);
49960
49961- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
49962+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
49963
49964- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49965- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49966- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49967+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49968+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49969+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49970 }
49971
49972 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
49973diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
49974index e82e570..8c3cf90 100644
49975--- a/drivers/net/wireless/ath/ath9k/hw.h
49976+++ b/drivers/net/wireless/ath/ath9k/hw.h
49977@@ -646,7 +646,7 @@ struct ath_hw_private_ops {
49978
49979 /* ANI */
49980 void (*ani_cache_ini_regs)(struct ath_hw *ah);
49981-};
49982+} __no_const;
49983
49984 /**
49985 * struct ath_spec_scan - parameters for Atheros spectral scan
49986@@ -722,7 +722,7 @@ struct ath_hw_ops {
49987 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
49988 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
49989 #endif
49990-};
49991+} __no_const;
49992
49993 struct ath_nf_limits {
49994 s16 max;
49995diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
49996index 9ede991..a8f08fb 100644
49997--- a/drivers/net/wireless/ath/ath9k/main.c
49998+++ b/drivers/net/wireless/ath/ath9k/main.c
49999@@ -2537,16 +2537,18 @@ void ath9k_fill_chanctx_ops(void)
50000 if (!ath9k_is_chanctx_enabled())
50001 return;
50002
50003- ath9k_ops.hw_scan = ath9k_hw_scan;
50004- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50005- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50006- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50007- ath9k_ops.add_chanctx = ath9k_add_chanctx;
50008- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50009- ath9k_ops.change_chanctx = ath9k_change_chanctx;
50010- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50011- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50012- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
50013+ pax_open_kernel();
50014+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
50015+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50016+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50017+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50018+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
50019+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50020+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
50021+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50022+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50023+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
50024+ pax_close_kernel();
50025 }
50026
50027 #endif
50028diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
50029index 058a9f2..d5cb1ba 100644
50030--- a/drivers/net/wireless/b43/phy_lp.c
50031+++ b/drivers/net/wireless/b43/phy_lp.c
50032@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
50033 {
50034 struct ssb_bus *bus = dev->dev->sdev->bus;
50035
50036- static const struct b206x_channel *chandata = NULL;
50037+ const struct b206x_channel *chandata = NULL;
50038 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
50039 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
50040 u16 old_comm15, scale;
50041diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
50042index e566580..2c218ca 100644
50043--- a/drivers/net/wireless/iwlegacy/3945-mac.c
50044+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
50045@@ -3631,7 +3631,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
50046 */
50047 if (il3945_mod_params.disable_hw_scan) {
50048 D_INFO("Disabling hw_scan\n");
50049- il3945_mac_ops.hw_scan = NULL;
50050+ pax_open_kernel();
50051+ *(void **)&il3945_mac_ops.hw_scan = NULL;
50052+ pax_close_kernel();
50053 }
50054
50055 D_INFO("*** LOAD DRIVER ***\n");
50056diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50057index 0ffb6ff..c0b7f0e 100644
50058--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50059+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50060@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
50061 {
50062 struct iwl_priv *priv = file->private_data;
50063 char buf[64];
50064- int buf_size;
50065+ size_t buf_size;
50066 u32 offset, len;
50067
50068 memset(buf, 0, sizeof(buf));
50069@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
50070 struct iwl_priv *priv = file->private_data;
50071
50072 char buf[8];
50073- int buf_size;
50074+ size_t buf_size;
50075 u32 reset_flag;
50076
50077 memset(buf, 0, sizeof(buf));
50078@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
50079 {
50080 struct iwl_priv *priv = file->private_data;
50081 char buf[8];
50082- int buf_size;
50083+ size_t buf_size;
50084 int ht40;
50085
50086 memset(buf, 0, sizeof(buf));
50087@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
50088 {
50089 struct iwl_priv *priv = file->private_data;
50090 char buf[8];
50091- int buf_size;
50092+ size_t buf_size;
50093 int value;
50094
50095 memset(buf, 0, sizeof(buf));
50096@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
50097 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
50098 DEBUGFS_READ_FILE_OPS(current_sleep_command);
50099
50100-static const char *fmt_value = " %-30s %10u\n";
50101-static const char *fmt_hex = " %-30s 0x%02X\n";
50102-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
50103-static const char *fmt_header =
50104+static const char fmt_value[] = " %-30s %10u\n";
50105+static const char fmt_hex[] = " %-30s 0x%02X\n";
50106+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
50107+static const char fmt_header[] =
50108 "%-32s current cumulative delta max\n";
50109
50110 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
50111@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
50112 {
50113 struct iwl_priv *priv = file->private_data;
50114 char buf[8];
50115- int buf_size;
50116+ size_t buf_size;
50117 int clear;
50118
50119 memset(buf, 0, sizeof(buf));
50120@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
50121 {
50122 struct iwl_priv *priv = file->private_data;
50123 char buf[8];
50124- int buf_size;
50125+ size_t buf_size;
50126 int trace;
50127
50128 memset(buf, 0, sizeof(buf));
50129@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
50130 {
50131 struct iwl_priv *priv = file->private_data;
50132 char buf[8];
50133- int buf_size;
50134+ size_t buf_size;
50135 int missed;
50136
50137 memset(buf, 0, sizeof(buf));
50138@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
50139
50140 struct iwl_priv *priv = file->private_data;
50141 char buf[8];
50142- int buf_size;
50143+ size_t buf_size;
50144 int plcp;
50145
50146 memset(buf, 0, sizeof(buf));
50147@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
50148
50149 struct iwl_priv *priv = file->private_data;
50150 char buf[8];
50151- int buf_size;
50152+ size_t buf_size;
50153 int flush;
50154
50155 memset(buf, 0, sizeof(buf));
50156@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
50157
50158 struct iwl_priv *priv = file->private_data;
50159 char buf[8];
50160- int buf_size;
50161+ size_t buf_size;
50162 int rts;
50163
50164 if (!priv->cfg->ht_params)
50165@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
50166 {
50167 struct iwl_priv *priv = file->private_data;
50168 char buf[8];
50169- int buf_size;
50170+ size_t buf_size;
50171
50172 memset(buf, 0, sizeof(buf));
50173 buf_size = min(count, sizeof(buf) - 1);
50174@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
50175 struct iwl_priv *priv = file->private_data;
50176 u32 event_log_flag;
50177 char buf[8];
50178- int buf_size;
50179+ size_t buf_size;
50180
50181 /* check that the interface is up */
50182 if (!iwl_is_ready(priv))
50183@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
50184 struct iwl_priv *priv = file->private_data;
50185 char buf[8];
50186 u32 calib_disabled;
50187- int buf_size;
50188+ size_t buf_size;
50189
50190 memset(buf, 0, sizeof(buf));
50191 buf_size = min(count, sizeof(buf) - 1);
50192diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
50193index 69935aa..c1ca128 100644
50194--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
50195+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
50196@@ -1836,7 +1836,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
50197 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
50198
50199 char buf[8];
50200- int buf_size;
50201+ size_t buf_size;
50202 u32 reset_flag;
50203
50204 memset(buf, 0, sizeof(buf));
50205@@ -1857,7 +1857,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
50206 {
50207 struct iwl_trans *trans = file->private_data;
50208 char buf[8];
50209- int buf_size;
50210+ size_t buf_size;
50211 int csr;
50212
50213 memset(buf, 0, sizeof(buf));
50214diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
50215index 8908be6..fe97ddd 100644
50216--- a/drivers/net/wireless/mac80211_hwsim.c
50217+++ b/drivers/net/wireless/mac80211_hwsim.c
50218@@ -3070,20 +3070,20 @@ static int __init init_mac80211_hwsim(void)
50219 if (channels < 1)
50220 return -EINVAL;
50221
50222- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
50223- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50224- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50225- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50226- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50227- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50228- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50229- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50230- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50231- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50232- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
50233- mac80211_hwsim_assign_vif_chanctx;
50234- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
50235- mac80211_hwsim_unassign_vif_chanctx;
50236+ pax_open_kernel();
50237+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
50238+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50239+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50240+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50241+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50242+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50243+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50244+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50245+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50246+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50247+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
50248+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
50249+ pax_close_kernel();
50250
50251 spin_lock_init(&hwsim_radio_lock);
50252 INIT_LIST_HEAD(&hwsim_radios);
50253diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
50254index 60d44ce..884dd1c 100644
50255--- a/drivers/net/wireless/rndis_wlan.c
50256+++ b/drivers/net/wireless/rndis_wlan.c
50257@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
50258
50259 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
50260
50261- if (rts_threshold < 0 || rts_threshold > 2347)
50262+ if (rts_threshold > 2347)
50263 rts_threshold = 2347;
50264
50265 tmp = cpu_to_le32(rts_threshold);
50266diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
50267index 9bb398b..b0cc047 100644
50268--- a/drivers/net/wireless/rt2x00/rt2x00.h
50269+++ b/drivers/net/wireless/rt2x00/rt2x00.h
50270@@ -375,7 +375,7 @@ struct rt2x00_intf {
50271 * for hardware which doesn't support hardware
50272 * sequence counting.
50273 */
50274- atomic_t seqno;
50275+ atomic_unchecked_t seqno;
50276 };
50277
50278 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
50279diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
50280index 68b620b..92ecd9e 100644
50281--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
50282+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
50283@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
50284 * sequence counter given by mac80211.
50285 */
50286 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
50287- seqno = atomic_add_return(0x10, &intf->seqno);
50288+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
50289 else
50290- seqno = atomic_read(&intf->seqno);
50291+ seqno = atomic_read_unchecked(&intf->seqno);
50292
50293 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
50294 hdr->seq_ctrl |= cpu_to_le16(seqno);
50295diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
50296index b661f896..ddf7d2b 100644
50297--- a/drivers/net/wireless/ti/wl1251/sdio.c
50298+++ b/drivers/net/wireless/ti/wl1251/sdio.c
50299@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
50300
50301 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
50302
50303- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50304- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50305+ pax_open_kernel();
50306+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50307+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50308+ pax_close_kernel();
50309
50310 wl1251_info("using dedicated interrupt line");
50311 } else {
50312- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50313- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50314+ pax_open_kernel();
50315+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50316+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50317+ pax_close_kernel();
50318
50319 wl1251_info("using SDIO interrupt");
50320 }
50321diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
50322index 144d1f8..7030936 100644
50323--- a/drivers/net/wireless/ti/wl12xx/main.c
50324+++ b/drivers/net/wireless/ti/wl12xx/main.c
50325@@ -657,7 +657,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50326 sizeof(wl->conf.mem));
50327
50328 /* read data preparation is only needed by wl127x */
50329- wl->ops->prepare_read = wl127x_prepare_read;
50330+ pax_open_kernel();
50331+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50332+ pax_close_kernel();
50333
50334 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50335 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50336@@ -682,7 +684,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50337 sizeof(wl->conf.mem));
50338
50339 /* read data preparation is only needed by wl127x */
50340- wl->ops->prepare_read = wl127x_prepare_read;
50341+ pax_open_kernel();
50342+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50343+ pax_close_kernel();
50344
50345 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50346 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50347diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
50348index 717c4f5..a813aeb 100644
50349--- a/drivers/net/wireless/ti/wl18xx/main.c
50350+++ b/drivers/net/wireless/ti/wl18xx/main.c
50351@@ -1923,8 +1923,10 @@ static int wl18xx_setup(struct wl1271 *wl)
50352 }
50353
50354 if (!checksum_param) {
50355- wl18xx_ops.set_rx_csum = NULL;
50356- wl18xx_ops.init_vif = NULL;
50357+ pax_open_kernel();
50358+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
50359+ *(void **)&wl18xx_ops.init_vif = NULL;
50360+ pax_close_kernel();
50361 }
50362
50363 /* Enable 11a Band only if we have 5G antennas */
50364diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
50365index a912dc0..a8225ba 100644
50366--- a/drivers/net/wireless/zd1211rw/zd_usb.c
50367+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
50368@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
50369 {
50370 struct zd_usb *usb = urb->context;
50371 struct zd_usb_interrupt *intr = &usb->intr;
50372- int len;
50373+ unsigned int len;
50374 u16 int_num;
50375
50376 ZD_ASSERT(in_interrupt());
50377diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
50378index ce2e2cf..f81e500 100644
50379--- a/drivers/nfc/nfcwilink.c
50380+++ b/drivers/nfc/nfcwilink.c
50381@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
50382
50383 static int nfcwilink_probe(struct platform_device *pdev)
50384 {
50385- static struct nfcwilink *drv;
50386+ struct nfcwilink *drv;
50387 int rc;
50388 __u32 protocols;
50389
50390diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
50391index 24d3d24..ff70d28 100644
50392--- a/drivers/nfc/st21nfca/st21nfca.c
50393+++ b/drivers/nfc/st21nfca/st21nfca.c
50394@@ -588,7 +588,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
50395 goto exit;
50396 }
50397
50398- gate = uid_skb->data;
50399+ memcpy(gate, uid_skb->data, uid_skb->len);
50400 *len = uid_skb->len;
50401 exit:
50402 kfree_skb(uid_skb);
50403diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
50404index 3a896c9..ac7b1c8 100644
50405--- a/drivers/of/fdt.c
50406+++ b/drivers/of/fdt.c
50407@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
50408 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
50409 return 0;
50410 }
50411- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
50412+ pax_open_kernel();
50413+ *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
50414+ pax_close_kernel();
50415 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
50416 }
50417 late_initcall(of_fdt_raw_init);
50418diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
50419index d93b2b6..ae50401 100644
50420--- a/drivers/oprofile/buffer_sync.c
50421+++ b/drivers/oprofile/buffer_sync.c
50422@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
50423 if (cookie == NO_COOKIE)
50424 offset = pc;
50425 if (cookie == INVALID_COOKIE) {
50426- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50427+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50428 offset = pc;
50429 }
50430 if (cookie != last_cookie) {
50431@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
50432 /* add userspace sample */
50433
50434 if (!mm) {
50435- atomic_inc(&oprofile_stats.sample_lost_no_mm);
50436+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
50437 return 0;
50438 }
50439
50440 cookie = lookup_dcookie(mm, s->eip, &offset);
50441
50442 if (cookie == INVALID_COOKIE) {
50443- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50444+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50445 return 0;
50446 }
50447
50448@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
50449 /* ignore backtraces if failed to add a sample */
50450 if (state == sb_bt_start) {
50451 state = sb_bt_ignore;
50452- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
50453+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
50454 }
50455 }
50456 release_mm(mm);
50457diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
50458index c0cc4e7..44d4e54 100644
50459--- a/drivers/oprofile/event_buffer.c
50460+++ b/drivers/oprofile/event_buffer.c
50461@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
50462 }
50463
50464 if (buffer_pos == buffer_size) {
50465- atomic_inc(&oprofile_stats.event_lost_overflow);
50466+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
50467 return;
50468 }
50469
50470diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
50471index ed2c3ec..deda85a 100644
50472--- a/drivers/oprofile/oprof.c
50473+++ b/drivers/oprofile/oprof.c
50474@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
50475 if (oprofile_ops.switch_events())
50476 return;
50477
50478- atomic_inc(&oprofile_stats.multiplex_counter);
50479+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
50480 start_switch_worker();
50481 }
50482
50483diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
50484index ee2cfce..7f8f699 100644
50485--- a/drivers/oprofile/oprofile_files.c
50486+++ b/drivers/oprofile/oprofile_files.c
50487@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
50488
50489 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
50490
50491-static ssize_t timeout_read(struct file *file, char __user *buf,
50492+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
50493 size_t count, loff_t *offset)
50494 {
50495 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
50496diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
50497index 59659ce..6c860a0 100644
50498--- a/drivers/oprofile/oprofile_stats.c
50499+++ b/drivers/oprofile/oprofile_stats.c
50500@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
50501 cpu_buf->sample_invalid_eip = 0;
50502 }
50503
50504- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
50505- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
50506- atomic_set(&oprofile_stats.event_lost_overflow, 0);
50507- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
50508- atomic_set(&oprofile_stats.multiplex_counter, 0);
50509+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
50510+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
50511+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
50512+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
50513+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
50514 }
50515
50516
50517diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
50518index 1fc622b..8c48fc3 100644
50519--- a/drivers/oprofile/oprofile_stats.h
50520+++ b/drivers/oprofile/oprofile_stats.h
50521@@ -13,11 +13,11 @@
50522 #include <linux/atomic.h>
50523
50524 struct oprofile_stat_struct {
50525- atomic_t sample_lost_no_mm;
50526- atomic_t sample_lost_no_mapping;
50527- atomic_t bt_lost_no_mapping;
50528- atomic_t event_lost_overflow;
50529- atomic_t multiplex_counter;
50530+ atomic_unchecked_t sample_lost_no_mm;
50531+ atomic_unchecked_t sample_lost_no_mapping;
50532+ atomic_unchecked_t bt_lost_no_mapping;
50533+ atomic_unchecked_t event_lost_overflow;
50534+ atomic_unchecked_t multiplex_counter;
50535 };
50536
50537 extern struct oprofile_stat_struct oprofile_stats;
50538diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
50539index 3f49345..c750d0b 100644
50540--- a/drivers/oprofile/oprofilefs.c
50541+++ b/drivers/oprofile/oprofilefs.c
50542@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
50543
50544 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
50545 {
50546- atomic_t *val = file->private_data;
50547- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
50548+ atomic_unchecked_t *val = file->private_data;
50549+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
50550 }
50551
50552
50553@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
50554
50555
50556 int oprofilefs_create_ro_atomic(struct dentry *root,
50557- char const *name, atomic_t *val)
50558+ char const *name, atomic_unchecked_t *val)
50559 {
50560 return __oprofilefs_create_file(root, name,
50561 &atomic_ro_fops, 0444, val);
50562diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
50563index bdef916..88c7dee 100644
50564--- a/drivers/oprofile/timer_int.c
50565+++ b/drivers/oprofile/timer_int.c
50566@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
50567 return NOTIFY_OK;
50568 }
50569
50570-static struct notifier_block __refdata oprofile_cpu_notifier = {
50571+static struct notifier_block oprofile_cpu_notifier = {
50572 .notifier_call = oprofile_cpu_notify,
50573 };
50574
50575diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
50576index 3b47080..6cd05dd 100644
50577--- a/drivers/parport/procfs.c
50578+++ b/drivers/parport/procfs.c
50579@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
50580
50581 *ppos += len;
50582
50583- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
50584+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
50585 }
50586
50587 #ifdef CONFIG_PARPORT_1284
50588@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
50589
50590 *ppos += len;
50591
50592- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
50593+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
50594 }
50595 #endif /* IEEE1284.3 support. */
50596
50597diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
50598index ba46e58..90cfc24 100644
50599--- a/drivers/pci/host/pci-host-generic.c
50600+++ b/drivers/pci/host/pci-host-generic.c
50601@@ -26,9 +26,9 @@
50602 #include <linux/platform_device.h>
50603
50604 struct gen_pci_cfg_bus_ops {
50605+ struct pci_ops ops;
50606 u32 bus_shift;
50607- void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int);
50608-};
50609+} __do_const;
50610
50611 struct gen_pci_cfg_windows {
50612 struct resource res;
50613@@ -56,8 +56,12 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
50614 }
50615
50616 static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
50617+ .ops = {
50618+ .map_bus = gen_pci_map_cfg_bus_cam,
50619+ .read = pci_generic_config_read,
50620+ .write = pci_generic_config_write,
50621+ },
50622 .bus_shift = 16,
50623- .map_bus = gen_pci_map_cfg_bus_cam,
50624 };
50625
50626 static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
50627@@ -72,13 +76,12 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
50628 }
50629
50630 static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
50631+ .ops = {
50632+ .map_bus = gen_pci_map_cfg_bus_ecam,
50633+ .read = pci_generic_config_read,
50634+ .write = pci_generic_config_write,
50635+ },
50636 .bus_shift = 20,
50637- .map_bus = gen_pci_map_cfg_bus_ecam,
50638-};
50639-
50640-static struct pci_ops gen_pci_ops = {
50641- .read = pci_generic_config_read,
50642- .write = pci_generic_config_write,
50643 };
50644
50645 static const struct of_device_id gen_pci_of_match[] = {
50646@@ -219,7 +222,6 @@ static int gen_pci_probe(struct platform_device *pdev)
50647 .private_data = (void **)&pci,
50648 .setup = gen_pci_setup,
50649 .map_irq = of_irq_parse_and_map_pci,
50650- .ops = &gen_pci_ops,
50651 };
50652
50653 if (!pci)
50654@@ -241,7 +243,7 @@ static int gen_pci_probe(struct platform_device *pdev)
50655
50656 of_id = of_match_node(gen_pci_of_match, np);
50657 pci->cfg.ops = of_id->data;
50658- gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
50659+ hw.ops = &pci->cfg.ops->ops;
50660 pci->host.dev.parent = dev;
50661 INIT_LIST_HEAD(&pci->host.windows);
50662 INIT_LIST_HEAD(&pci->resources);
50663diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
50664index 6ca2399..68d866b 100644
50665--- a/drivers/pci/hotplug/acpiphp_ibm.c
50666+++ b/drivers/pci/hotplug/acpiphp_ibm.c
50667@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
50668 goto init_cleanup;
50669 }
50670
50671- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50672+ pax_open_kernel();
50673+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50674+ pax_close_kernel();
50675 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
50676
50677 return retval;
50678diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
50679index 66b7bbe..26bee78 100644
50680--- a/drivers/pci/hotplug/cpcihp_generic.c
50681+++ b/drivers/pci/hotplug/cpcihp_generic.c
50682@@ -73,7 +73,6 @@ static u16 port;
50683 static unsigned int enum_bit;
50684 static u8 enum_mask;
50685
50686-static struct cpci_hp_controller_ops generic_hpc_ops;
50687 static struct cpci_hp_controller generic_hpc;
50688
50689 static int __init validate_parameters(void)
50690@@ -139,6 +138,10 @@ static int query_enum(void)
50691 return ((value & enum_mask) == enum_mask);
50692 }
50693
50694+static struct cpci_hp_controller_ops generic_hpc_ops = {
50695+ .query_enum = query_enum,
50696+};
50697+
50698 static int __init cpcihp_generic_init(void)
50699 {
50700 int status;
50701@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
50702 pci_dev_put(dev);
50703
50704 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
50705- generic_hpc_ops.query_enum = query_enum;
50706 generic_hpc.ops = &generic_hpc_ops;
50707
50708 status = cpci_hp_register_controller(&generic_hpc);
50709diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
50710index 7ecf34e..effed62 100644
50711--- a/drivers/pci/hotplug/cpcihp_zt5550.c
50712+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
50713@@ -59,7 +59,6 @@
50714 /* local variables */
50715 static bool debug;
50716 static bool poll;
50717-static struct cpci_hp_controller_ops zt5550_hpc_ops;
50718 static struct cpci_hp_controller zt5550_hpc;
50719
50720 /* Primary cPCI bus bridge device */
50721@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
50722 return 0;
50723 }
50724
50725+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
50726+ .query_enum = zt5550_hc_query_enum,
50727+};
50728+
50729 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
50730 {
50731 int status;
50732@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
50733 dbg("returned from zt5550_hc_config");
50734
50735 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
50736- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
50737 zt5550_hpc.ops = &zt5550_hpc_ops;
50738 if (!poll) {
50739 zt5550_hpc.irq = hc_dev->irq;
50740 zt5550_hpc.irq_flags = IRQF_SHARED;
50741 zt5550_hpc.dev_id = hc_dev;
50742
50743- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50744- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50745- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50746+ pax_open_kernel();
50747+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50748+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50749+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50750+ pax_open_kernel();
50751 } else {
50752 info("using ENUM# polling mode");
50753 }
50754diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
50755index 1e08ff8c..3cd145f 100644
50756--- a/drivers/pci/hotplug/cpqphp_nvram.c
50757+++ b/drivers/pci/hotplug/cpqphp_nvram.c
50758@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
50759
50760 void compaq_nvram_init (void __iomem *rom_start)
50761 {
50762+#ifndef CONFIG_PAX_KERNEXEC
50763 if (rom_start)
50764 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
50765+#endif
50766
50767 dbg("int15 entry = %p\n", compaq_int15_entry_point);
50768
50769diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
50770index 56d8486..f26113f 100644
50771--- a/drivers/pci/hotplug/pci_hotplug_core.c
50772+++ b/drivers/pci/hotplug/pci_hotplug_core.c
50773@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
50774 return -EINVAL;
50775 }
50776
50777- slot->ops->owner = owner;
50778- slot->ops->mod_name = mod_name;
50779+ pax_open_kernel();
50780+ *(struct module **)&slot->ops->owner = owner;
50781+ *(const char **)&slot->ops->mod_name = mod_name;
50782+ pax_close_kernel();
50783
50784 mutex_lock(&pci_hp_mutex);
50785 /*
50786diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
50787index 07aa722..84514b4 100644
50788--- a/drivers/pci/hotplug/pciehp_core.c
50789+++ b/drivers/pci/hotplug/pciehp_core.c
50790@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
50791 struct slot *slot = ctrl->slot;
50792 struct hotplug_slot *hotplug = NULL;
50793 struct hotplug_slot_info *info = NULL;
50794- struct hotplug_slot_ops *ops = NULL;
50795+ hotplug_slot_ops_no_const *ops = NULL;
50796 char name[SLOT_NAME_SIZE];
50797 int retval = -ENOMEM;
50798
50799diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
50800index c3e7dfc..cbd9625 100644
50801--- a/drivers/pci/msi.c
50802+++ b/drivers/pci/msi.c
50803@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
50804 {
50805 struct attribute **msi_attrs;
50806 struct attribute *msi_attr;
50807- struct device_attribute *msi_dev_attr;
50808- struct attribute_group *msi_irq_group;
50809+ device_attribute_no_const *msi_dev_attr;
50810+ attribute_group_no_const *msi_irq_group;
50811 const struct attribute_group **msi_irq_groups;
50812 struct msi_desc *entry;
50813 int ret = -ENOMEM;
50814@@ -573,7 +573,7 @@ error_attrs:
50815 count = 0;
50816 msi_attr = msi_attrs[count];
50817 while (msi_attr) {
50818- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
50819+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
50820 kfree(msi_attr->name);
50821 kfree(msi_dev_attr);
50822 ++count;
50823diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
50824index 312f23a..d21181c 100644
50825--- a/drivers/pci/pci-sysfs.c
50826+++ b/drivers/pci/pci-sysfs.c
50827@@ -1140,7 +1140,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
50828 {
50829 /* allocate attribute structure, piggyback attribute name */
50830 int name_len = write_combine ? 13 : 10;
50831- struct bin_attribute *res_attr;
50832+ bin_attribute_no_const *res_attr;
50833 int retval;
50834
50835 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
50836@@ -1317,7 +1317,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
50837 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
50838 {
50839 int retval;
50840- struct bin_attribute *attr;
50841+ bin_attribute_no_const *attr;
50842
50843 /* If the device has VPD, try to expose it in sysfs. */
50844 if (dev->vpd) {
50845@@ -1364,7 +1364,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
50846 {
50847 int retval;
50848 int rom_size = 0;
50849- struct bin_attribute *attr;
50850+ bin_attribute_no_const *attr;
50851
50852 if (!sysfs_initialized)
50853 return -EACCES;
50854diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
50855index 4091f82..7d98eef 100644
50856--- a/drivers/pci/pci.h
50857+++ b/drivers/pci/pci.h
50858@@ -99,7 +99,7 @@ struct pci_vpd_ops {
50859 struct pci_vpd {
50860 unsigned int len;
50861 const struct pci_vpd_ops *ops;
50862- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
50863+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
50864 };
50865
50866 int pci_vpd_pci22_init(struct pci_dev *dev);
50867diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
50868index 820740a..8b1c673 100644
50869--- a/drivers/pci/pcie/aspm.c
50870+++ b/drivers/pci/pcie/aspm.c
50871@@ -27,9 +27,9 @@
50872 #define MODULE_PARAM_PREFIX "pcie_aspm."
50873
50874 /* Note: those are not register definitions */
50875-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
50876-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
50877-#define ASPM_STATE_L1 (4) /* L1 state */
50878+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
50879+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
50880+#define ASPM_STATE_L1 (4U) /* L1 state */
50881 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
50882 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
50883
50884diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
50885index be35da2..ec16cdb 100644
50886--- a/drivers/pci/pcie/portdrv_pci.c
50887+++ b/drivers/pci/pcie/portdrv_pci.c
50888@@ -324,7 +324,7 @@ static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
50889 return 0;
50890 }
50891
50892-static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
50893+static const struct dmi_system_id __initconst pcie_portdrv_dmi_table[] = {
50894 /*
50895 * Boxes that should not use MSI for PCIe PME signaling.
50896 */
50897diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
50898index 8d2f400..c97cc91 100644
50899--- a/drivers/pci/probe.c
50900+++ b/drivers/pci/probe.c
50901@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
50902 u16 orig_cmd;
50903 struct pci_bus_region region, inverted_region;
50904
50905- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
50906+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
50907
50908 /* No printks while decoding is disabled! */
50909 if (!dev->mmio_always_on) {
50910diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
50911index 3f155e7..0f4b1f0 100644
50912--- a/drivers/pci/proc.c
50913+++ b/drivers/pci/proc.c
50914@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
50915 static int __init pci_proc_init(void)
50916 {
50917 struct pci_dev *dev = NULL;
50918+
50919+#ifdef CONFIG_GRKERNSEC_PROC_ADD
50920+#ifdef CONFIG_GRKERNSEC_PROC_USER
50921+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
50922+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50923+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
50924+#endif
50925+#else
50926 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
50927+#endif
50928 proc_create("devices", 0, proc_bus_pci_dir,
50929 &proc_bus_pci_dev_operations);
50930 proc_initialized = 1;
50931diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
50932index b84fdd6..b89d829 100644
50933--- a/drivers/platform/chrome/chromeos_laptop.c
50934+++ b/drivers/platform/chrome/chromeos_laptop.c
50935@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
50936 .callback = chromeos_laptop_dmi_matched, \
50937 .driver_data = (void *)&board_
50938
50939-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
50940+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
50941 {
50942 .ident = "Samsung Series 5 550",
50943 .matches = {
50944diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
50945index 3474920..acc9581 100644
50946--- a/drivers/platform/chrome/chromeos_pstore.c
50947+++ b/drivers/platform/chrome/chromeos_pstore.c
50948@@ -13,7 +13,7 @@
50949 #include <linux/platform_device.h>
50950 #include <linux/pstore_ram.h>
50951
50952-static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = {
50953+static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = {
50954 {
50955 /*
50956 * Today all Chromebooks/boxes ship with Google_* as version and
50957diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
50958index 1e1e594..8fe59c5 100644
50959--- a/drivers/platform/x86/alienware-wmi.c
50960+++ b/drivers/platform/x86/alienware-wmi.c
50961@@ -150,7 +150,7 @@ struct wmax_led_args {
50962 } __packed;
50963
50964 static struct platform_device *platform_device;
50965-static struct device_attribute *zone_dev_attrs;
50966+static device_attribute_no_const *zone_dev_attrs;
50967 static struct attribute **zone_attrs;
50968 static struct platform_zone *zone_data;
50969
50970@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
50971 }
50972 };
50973
50974-static struct attribute_group zone_attribute_group = {
50975+static attribute_group_no_const zone_attribute_group = {
50976 .name = "rgb_zones",
50977 };
50978
50979diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
50980index 7543a56..367ca8ed 100644
50981--- a/drivers/platform/x86/asus-wmi.c
50982+++ b/drivers/platform/x86/asus-wmi.c
50983@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
50984 int err;
50985 u32 retval = -1;
50986
50987+#ifdef CONFIG_GRKERNSEC_KMEM
50988+ return -EPERM;
50989+#endif
50990+
50991 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
50992
50993 if (err < 0)
50994@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
50995 int err;
50996 u32 retval = -1;
50997
50998+#ifdef CONFIG_GRKERNSEC_KMEM
50999+ return -EPERM;
51000+#endif
51001+
51002 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
51003 &retval);
51004
51005@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
51006 union acpi_object *obj;
51007 acpi_status status;
51008
51009+#ifdef CONFIG_GRKERNSEC_KMEM
51010+ return -EPERM;
51011+#endif
51012+
51013 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
51014 1, asus->debug.method_id,
51015 &input, &output);
51016diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
51017index bceb30b..bf063d4 100644
51018--- a/drivers/platform/x86/compal-laptop.c
51019+++ b/drivers/platform/x86/compal-laptop.c
51020@@ -766,7 +766,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id)
51021 return 1;
51022 }
51023
51024-static struct dmi_system_id __initdata compal_dmi_table[] = {
51025+static const struct dmi_system_id __initconst compal_dmi_table[] = {
51026 {
51027 .ident = "FL90/IFL90",
51028 .matches = {
51029diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
51030index 458e6c9..089aee7 100644
51031--- a/drivers/platform/x86/hdaps.c
51032+++ b/drivers/platform/x86/hdaps.c
51033@@ -514,7 +514,7 @@ static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
51034 "ThinkPad T42p", so the order of the entries matters.
51035 If your ThinkPad is not recognized, please update to latest
51036 BIOS. This is especially the case for some R52 ThinkPads. */
51037-static struct dmi_system_id __initdata hdaps_whitelist[] = {
51038+static const struct dmi_system_id __initconst hdaps_whitelist[] = {
51039 HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES),
51040 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"),
51041 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"),
51042diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
51043index 97c2be1..2ee50ce 100644
51044--- a/drivers/platform/x86/ibm_rtl.c
51045+++ b/drivers/platform/x86/ibm_rtl.c
51046@@ -227,7 +227,7 @@ static void rtl_teardown_sysfs(void) {
51047 }
51048
51049
51050-static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = {
51051+static const struct dmi_system_id __initconst ibm_rtl_dmi_table[] = {
51052 { \
51053 .matches = { \
51054 DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \
51055diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
51056index a4a4258..a58a04c 100644
51057--- a/drivers/platform/x86/intel_oaktrail.c
51058+++ b/drivers/platform/x86/intel_oaktrail.c
51059@@ -298,7 +298,7 @@ static int dmi_check_cb(const struct dmi_system_id *id)
51060 return 0;
51061 }
51062
51063-static struct dmi_system_id __initdata oaktrail_dmi_table[] = {
51064+static const struct dmi_system_id __initconst oaktrail_dmi_table[] = {
51065 {
51066 .ident = "OakTrail platform",
51067 .matches = {
51068diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
51069index 0859877..59d596d 100644
51070--- a/drivers/platform/x86/msi-laptop.c
51071+++ b/drivers/platform/x86/msi-laptop.c
51072@@ -604,7 +604,7 @@ static int dmi_check_cb(const struct dmi_system_id *dmi)
51073 return 1;
51074 }
51075
51076-static struct dmi_system_id __initdata msi_dmi_table[] = {
51077+static const struct dmi_system_id __initconst msi_dmi_table[] = {
51078 {
51079 .ident = "MSI S270",
51080 .matches = {
51081@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
51082
51083 if (!quirks->ec_read_only) {
51084 /* allow userland write sysfs file */
51085- dev_attr_bluetooth.store = store_bluetooth;
51086- dev_attr_wlan.store = store_wlan;
51087- dev_attr_threeg.store = store_threeg;
51088- dev_attr_bluetooth.attr.mode |= S_IWUSR;
51089- dev_attr_wlan.attr.mode |= S_IWUSR;
51090- dev_attr_threeg.attr.mode |= S_IWUSR;
51091+ pax_open_kernel();
51092+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
51093+ *(void **)&dev_attr_wlan.store = store_wlan;
51094+ *(void **)&dev_attr_threeg.store = store_threeg;
51095+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
51096+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
51097+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
51098+ pax_close_kernel();
51099 }
51100
51101 /* disable hardware control by fn key */
51102diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
51103index 6d2bac0..ec2b029 100644
51104--- a/drivers/platform/x86/msi-wmi.c
51105+++ b/drivers/platform/x86/msi-wmi.c
51106@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
51107 static void msi_wmi_notify(u32 value, void *context)
51108 {
51109 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
51110- static struct key_entry *key;
51111+ struct key_entry *key;
51112 union acpi_object *obj;
51113 acpi_status status;
51114
51115diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
51116index 9e701b2..c68a7b5 100644
51117--- a/drivers/platform/x86/samsung-laptop.c
51118+++ b/drivers/platform/x86/samsung-laptop.c
51119@@ -1567,7 +1567,7 @@ static int __init samsung_dmi_matched(const struct dmi_system_id *d)
51120 return 0;
51121 }
51122
51123-static struct dmi_system_id __initdata samsung_dmi_table[] = {
51124+static const struct dmi_system_id __initconst samsung_dmi_table[] = {
51125 {
51126 .matches = {
51127 DMI_MATCH(DMI_SYS_VENDOR,
51128diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
51129index e6aac72..e11ff24 100644
51130--- a/drivers/platform/x86/samsung-q10.c
51131+++ b/drivers/platform/x86/samsung-q10.c
51132@@ -95,7 +95,7 @@ static int __init dmi_check_callback(const struct dmi_system_id *id)
51133 return 1;
51134 }
51135
51136-static struct dmi_system_id __initdata samsungq10_dmi_table[] = {
51137+static const struct dmi_system_id __initconst samsungq10_dmi_table[] = {
51138 {
51139 .ident = "Samsung Q10",
51140 .matches = {
51141diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
51142index e51c1e7..71bb385 100644
51143--- a/drivers/platform/x86/sony-laptop.c
51144+++ b/drivers/platform/x86/sony-laptop.c
51145@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
51146 }
51147
51148 /* High speed charging function */
51149-static struct device_attribute *hsc_handle;
51150+static device_attribute_no_const *hsc_handle;
51151
51152 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
51153 struct device_attribute *attr,
51154@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
51155 }
51156
51157 /* low battery function */
51158-static struct device_attribute *lowbatt_handle;
51159+static device_attribute_no_const *lowbatt_handle;
51160
51161 static ssize_t sony_nc_lowbatt_store(struct device *dev,
51162 struct device_attribute *attr,
51163@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
51164 }
51165
51166 /* fan speed function */
51167-static struct device_attribute *fan_handle, *hsf_handle;
51168+static device_attribute_no_const *fan_handle, *hsf_handle;
51169
51170 static ssize_t sony_nc_hsfan_store(struct device *dev,
51171 struct device_attribute *attr,
51172@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
51173 }
51174
51175 /* USB charge function */
51176-static struct device_attribute *uc_handle;
51177+static device_attribute_no_const *uc_handle;
51178
51179 static ssize_t sony_nc_usb_charge_store(struct device *dev,
51180 struct device_attribute *attr,
51181@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
51182 }
51183
51184 /* Panel ID function */
51185-static struct device_attribute *panel_handle;
51186+static device_attribute_no_const *panel_handle;
51187
51188 static ssize_t sony_nc_panelid_show(struct device *dev,
51189 struct device_attribute *attr, char *buffer)
51190@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
51191 }
51192
51193 /* smart connect function */
51194-static struct device_attribute *sc_handle;
51195+static device_attribute_no_const *sc_handle;
51196
51197 static ssize_t sony_nc_smart_conn_store(struct device *dev,
51198 struct device_attribute *attr,
51199@@ -4854,7 +4854,7 @@ static struct acpi_driver sony_pic_driver = {
51200 .drv.pm = &sony_pic_pm,
51201 };
51202
51203-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
51204+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
51205 {
51206 .ident = "Sony Vaio",
51207 .matches = {
51208diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
51209index 3b8ceee..e18652c 100644
51210--- a/drivers/platform/x86/thinkpad_acpi.c
51211+++ b/drivers/platform/x86/thinkpad_acpi.c
51212@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
51213 return 0;
51214 }
51215
51216-void static hotkey_mask_warn_incomplete_mask(void)
51217+static void hotkey_mask_warn_incomplete_mask(void)
51218 {
51219 /* log only what the user can fix... */
51220 const u32 wantedmask = hotkey_driver_mask &
51221@@ -2437,10 +2437,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
51222 && !tp_features.bright_unkfw)
51223 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
51224 }
51225+}
51226
51227 #undef TPACPI_COMPARE_KEY
51228 #undef TPACPI_MAY_SEND_KEY
51229-}
51230
51231 /*
51232 * Polling driver
51233diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
51234index 438d4c7..ca8a2fb 100644
51235--- a/drivers/pnp/pnpbios/bioscalls.c
51236+++ b/drivers/pnp/pnpbios/bioscalls.c
51237@@ -59,7 +59,7 @@ do { \
51238 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
51239 } while(0)
51240
51241-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
51242+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
51243 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
51244
51245 /*
51246@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51247
51248 cpu = get_cpu();
51249 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
51250+
51251+ pax_open_kernel();
51252 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
51253+ pax_close_kernel();
51254
51255 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
51256 spin_lock_irqsave(&pnp_bios_lock, flags);
51257@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51258 :"memory");
51259 spin_unlock_irqrestore(&pnp_bios_lock, flags);
51260
51261+ pax_open_kernel();
51262 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
51263+ pax_close_kernel();
51264+
51265 put_cpu();
51266
51267 /* If we get here and this is set then the PnP BIOS faulted on us. */
51268@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
51269 return status;
51270 }
51271
51272-void pnpbios_calls_init(union pnp_bios_install_struct *header)
51273+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
51274 {
51275 int i;
51276
51277@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51278 pnp_bios_callpoint.offset = header->fields.pm16offset;
51279 pnp_bios_callpoint.segment = PNP_CS16;
51280
51281+ pax_open_kernel();
51282+
51283 for_each_possible_cpu(i) {
51284 struct desc_struct *gdt = get_cpu_gdt_table(i);
51285 if (!gdt)
51286@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51287 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
51288 (unsigned long)__va(header->fields.pm16dseg));
51289 }
51290+
51291+ pax_close_kernel();
51292 }
51293diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
51294index facd43b..b291260 100644
51295--- a/drivers/pnp/pnpbios/core.c
51296+++ b/drivers/pnp/pnpbios/core.c
51297@@ -494,7 +494,7 @@ static int __init exploding_pnp_bios(const struct dmi_system_id *d)
51298 return 0;
51299 }
51300
51301-static struct dmi_system_id pnpbios_dmi_table[] __initdata = {
51302+static const struct dmi_system_id pnpbios_dmi_table[] __initconst = {
51303 { /* PnPBIOS GPF on boot */
51304 .callback = exploding_pnp_bios,
51305 .ident = "Higraded P14H",
51306diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
51307index 0c52e2a..3421ab7 100644
51308--- a/drivers/power/pda_power.c
51309+++ b/drivers/power/pda_power.c
51310@@ -37,7 +37,11 @@ static int polling;
51311
51312 #if IS_ENABLED(CONFIG_USB_PHY)
51313 static struct usb_phy *transceiver;
51314-static struct notifier_block otg_nb;
51315+static int otg_handle_notification(struct notifier_block *nb,
51316+ unsigned long event, void *unused);
51317+static struct notifier_block otg_nb = {
51318+ .notifier_call = otg_handle_notification
51319+};
51320 #endif
51321
51322 static struct regulator *ac_draw;
51323@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
51324
51325 #if IS_ENABLED(CONFIG_USB_PHY)
51326 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
51327- otg_nb.notifier_call = otg_handle_notification;
51328 ret = usb_register_notifier(transceiver, &otg_nb);
51329 if (ret) {
51330 dev_err(dev, "failure to register otg notifier\n");
51331diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
51332index cc439fd..8fa30df 100644
51333--- a/drivers/power/power_supply.h
51334+++ b/drivers/power/power_supply.h
51335@@ -16,12 +16,12 @@ struct power_supply;
51336
51337 #ifdef CONFIG_SYSFS
51338
51339-extern void power_supply_init_attrs(struct device_type *dev_type);
51340+extern void power_supply_init_attrs(void);
51341 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
51342
51343 #else
51344
51345-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
51346+static inline void power_supply_init_attrs(void) {}
51347 #define power_supply_uevent NULL
51348
51349 #endif /* CONFIG_SYSFS */
51350diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
51351index 694e8cd..9f03483 100644
51352--- a/drivers/power/power_supply_core.c
51353+++ b/drivers/power/power_supply_core.c
51354@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
51355 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
51356 EXPORT_SYMBOL_GPL(power_supply_notifier);
51357
51358-static struct device_type power_supply_dev_type;
51359+extern const struct attribute_group *power_supply_attr_groups[];
51360+static struct device_type power_supply_dev_type = {
51361+ .groups = power_supply_attr_groups,
51362+};
51363
51364 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
51365 struct power_supply *supply)
51366@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
51367 return PTR_ERR(power_supply_class);
51368
51369 power_supply_class->dev_uevent = power_supply_uevent;
51370- power_supply_init_attrs(&power_supply_dev_type);
51371+ power_supply_init_attrs();
51372
51373 return 0;
51374 }
51375diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
51376index 62653f5..d0bb485 100644
51377--- a/drivers/power/power_supply_sysfs.c
51378+++ b/drivers/power/power_supply_sysfs.c
51379@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
51380 .is_visible = power_supply_attr_is_visible,
51381 };
51382
51383-static const struct attribute_group *power_supply_attr_groups[] = {
51384+const struct attribute_group *power_supply_attr_groups[] = {
51385 &power_supply_attr_group,
51386 NULL,
51387 };
51388
51389-void power_supply_init_attrs(struct device_type *dev_type)
51390+void power_supply_init_attrs(void)
51391 {
51392 int i;
51393
51394- dev_type->groups = power_supply_attr_groups;
51395-
51396 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
51397 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
51398 }
51399diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
51400index 84419af..268ede8 100644
51401--- a/drivers/powercap/powercap_sys.c
51402+++ b/drivers/powercap/powercap_sys.c
51403@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
51404 struct device_attribute name_attr;
51405 };
51406
51407+static ssize_t show_constraint_name(struct device *dev,
51408+ struct device_attribute *dev_attr,
51409+ char *buf);
51410+
51411 static struct powercap_constraint_attr
51412- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
51413+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
51414+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
51415+ .power_limit_attr = {
51416+ .attr = {
51417+ .name = NULL,
51418+ .mode = S_IWUSR | S_IRUGO
51419+ },
51420+ .show = show_constraint_power_limit_uw,
51421+ .store = store_constraint_power_limit_uw
51422+ },
51423+
51424+ .time_window_attr = {
51425+ .attr = {
51426+ .name = NULL,
51427+ .mode = S_IWUSR | S_IRUGO
51428+ },
51429+ .show = show_constraint_time_window_us,
51430+ .store = store_constraint_time_window_us
51431+ },
51432+
51433+ .max_power_attr = {
51434+ .attr = {
51435+ .name = NULL,
51436+ .mode = S_IRUGO
51437+ },
51438+ .show = show_constraint_max_power_uw,
51439+ .store = NULL
51440+ },
51441+
51442+ .min_power_attr = {
51443+ .attr = {
51444+ .name = NULL,
51445+ .mode = S_IRUGO
51446+ },
51447+ .show = show_constraint_min_power_uw,
51448+ .store = NULL
51449+ },
51450+
51451+ .max_time_window_attr = {
51452+ .attr = {
51453+ .name = NULL,
51454+ .mode = S_IRUGO
51455+ },
51456+ .show = show_constraint_max_time_window_us,
51457+ .store = NULL
51458+ },
51459+
51460+ .min_time_window_attr = {
51461+ .attr = {
51462+ .name = NULL,
51463+ .mode = S_IRUGO
51464+ },
51465+ .show = show_constraint_min_time_window_us,
51466+ .store = NULL
51467+ },
51468+
51469+ .name_attr = {
51470+ .attr = {
51471+ .name = NULL,
51472+ .mode = S_IRUGO
51473+ },
51474+ .show = show_constraint_name,
51475+ .store = NULL
51476+ }
51477+ }
51478+};
51479
51480 /* A list of powercap control_types */
51481 static LIST_HEAD(powercap_cntrl_list);
51482@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
51483 }
51484
51485 static int create_constraint_attribute(int id, const char *name,
51486- int mode,
51487- struct device_attribute *dev_attr,
51488- ssize_t (*show)(struct device *,
51489- struct device_attribute *, char *),
51490- ssize_t (*store)(struct device *,
51491- struct device_attribute *,
51492- const char *, size_t)
51493- )
51494+ struct device_attribute *dev_attr)
51495 {
51496+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
51497
51498- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
51499- id, name);
51500- if (!dev_attr->attr.name)
51501+ if (!name)
51502 return -ENOMEM;
51503- dev_attr->attr.mode = mode;
51504- dev_attr->show = show;
51505- dev_attr->store = store;
51506+
51507+ pax_open_kernel();
51508+ *(const char **)&dev_attr->attr.name = name;
51509+ pax_close_kernel();
51510
51511 return 0;
51512 }
51513@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
51514
51515 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
51516 ret = create_constraint_attribute(i, "power_limit_uw",
51517- S_IWUSR | S_IRUGO,
51518- &constraint_attrs[i].power_limit_attr,
51519- show_constraint_power_limit_uw,
51520- store_constraint_power_limit_uw);
51521+ &constraint_attrs[i].power_limit_attr);
51522 if (ret)
51523 goto err_alloc;
51524 ret = create_constraint_attribute(i, "time_window_us",
51525- S_IWUSR | S_IRUGO,
51526- &constraint_attrs[i].time_window_attr,
51527- show_constraint_time_window_us,
51528- store_constraint_time_window_us);
51529+ &constraint_attrs[i].time_window_attr);
51530 if (ret)
51531 goto err_alloc;
51532- ret = create_constraint_attribute(i, "name", S_IRUGO,
51533- &constraint_attrs[i].name_attr,
51534- show_constraint_name,
51535- NULL);
51536+ ret = create_constraint_attribute(i, "name",
51537+ &constraint_attrs[i].name_attr);
51538 if (ret)
51539 goto err_alloc;
51540- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
51541- &constraint_attrs[i].max_power_attr,
51542- show_constraint_max_power_uw,
51543- NULL);
51544+ ret = create_constraint_attribute(i, "max_power_uw",
51545+ &constraint_attrs[i].max_power_attr);
51546 if (ret)
51547 goto err_alloc;
51548- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
51549- &constraint_attrs[i].min_power_attr,
51550- show_constraint_min_power_uw,
51551- NULL);
51552+ ret = create_constraint_attribute(i, "min_power_uw",
51553+ &constraint_attrs[i].min_power_attr);
51554 if (ret)
51555 goto err_alloc;
51556 ret = create_constraint_attribute(i, "max_time_window_us",
51557- S_IRUGO,
51558- &constraint_attrs[i].max_time_window_attr,
51559- show_constraint_max_time_window_us,
51560- NULL);
51561+ &constraint_attrs[i].max_time_window_attr);
51562 if (ret)
51563 goto err_alloc;
51564 ret = create_constraint_attribute(i, "min_time_window_us",
51565- S_IRUGO,
51566- &constraint_attrs[i].min_time_window_attr,
51567- show_constraint_min_time_window_us,
51568- NULL);
51569+ &constraint_attrs[i].min_time_window_attr);
51570 if (ret)
51571 goto err_alloc;
51572
51573@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
51574 power_zone->zone_dev_attrs[count++] =
51575 &dev_attr_max_energy_range_uj.attr;
51576 if (power_zone->ops->get_energy_uj) {
51577+ pax_open_kernel();
51578 if (power_zone->ops->reset_energy_uj)
51579- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51580+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51581 else
51582- dev_attr_energy_uj.attr.mode = S_IRUGO;
51583+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
51584+ pax_close_kernel();
51585 power_zone->zone_dev_attrs[count++] =
51586 &dev_attr_energy_uj.attr;
51587 }
51588diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
51589index 9c5d414..c7900ce 100644
51590--- a/drivers/ptp/ptp_private.h
51591+++ b/drivers/ptp/ptp_private.h
51592@@ -51,7 +51,7 @@ struct ptp_clock {
51593 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
51594 wait_queue_head_t tsev_wq;
51595 int defunct; /* tells readers to go away when clock is being removed */
51596- struct device_attribute *pin_dev_attr;
51597+ device_attribute_no_const *pin_dev_attr;
51598 struct attribute **pin_attr;
51599 struct attribute_group pin_attr_group;
51600 };
51601diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
51602index 302e626..12579af 100644
51603--- a/drivers/ptp/ptp_sysfs.c
51604+++ b/drivers/ptp/ptp_sysfs.c
51605@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
51606 goto no_pin_attr;
51607
51608 for (i = 0; i < n_pins; i++) {
51609- struct device_attribute *da = &ptp->pin_dev_attr[i];
51610+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
51611 sysfs_attr_init(&da->attr);
51612 da->attr.name = info->pin_config[i].name;
51613 da->attr.mode = 0644;
51614diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
51615index a4a8a6d..a3456f4 100644
51616--- a/drivers/regulator/core.c
51617+++ b/drivers/regulator/core.c
51618@@ -3529,7 +3529,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51619 const struct regulation_constraints *constraints = NULL;
51620 const struct regulator_init_data *init_data;
51621 struct regulator_config *config = NULL;
51622- static atomic_t regulator_no = ATOMIC_INIT(-1);
51623+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(-1);
51624 struct regulator_dev *rdev;
51625 struct device *dev;
51626 int ret, i;
51627@@ -3613,7 +3613,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51628 rdev->dev.class = &regulator_class;
51629 rdev->dev.parent = dev;
51630 dev_set_name(&rdev->dev, "regulator.%lu",
51631- (unsigned long) atomic_inc_return(&regulator_no));
51632+ (unsigned long) atomic_inc_return_unchecked(&regulator_no));
51633 ret = device_register(&rdev->dev);
51634 if (ret != 0) {
51635 put_device(&rdev->dev);
51636diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
51637index 7eee2ca..4024513 100644
51638--- a/drivers/regulator/max8660.c
51639+++ b/drivers/regulator/max8660.c
51640@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
51641 max8660->shadow_regs[MAX8660_OVER1] = 5;
51642 } else {
51643 /* Otherwise devices can be toggled via software */
51644- max8660_dcdc_ops.enable = max8660_dcdc_enable;
51645- max8660_dcdc_ops.disable = max8660_dcdc_disable;
51646+ pax_open_kernel();
51647+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
51648+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
51649+ pax_close_kernel();
51650 }
51651
51652 /*
51653diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
51654index c3d55c2..0dddfe6 100644
51655--- a/drivers/regulator/max8973-regulator.c
51656+++ b/drivers/regulator/max8973-regulator.c
51657@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
51658 if (!pdata || !pdata->enable_ext_control) {
51659 max->desc.enable_reg = MAX8973_VOUT;
51660 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
51661- max->ops.enable = regulator_enable_regmap;
51662- max->ops.disable = regulator_disable_regmap;
51663- max->ops.is_enabled = regulator_is_enabled_regmap;
51664+ pax_open_kernel();
51665+ *(void **)&max->ops.enable = regulator_enable_regmap;
51666+ *(void **)&max->ops.disable = regulator_disable_regmap;
51667+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
51668+ pax_close_kernel();
51669 }
51670
51671 if (pdata) {
51672diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
51673index 0d17c92..a29f627 100644
51674--- a/drivers/regulator/mc13892-regulator.c
51675+++ b/drivers/regulator/mc13892-regulator.c
51676@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
51677 mc13xxx_unlock(mc13892);
51678
51679 /* update mc13892_vcam ops */
51680- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
51681+ pax_open_kernel();
51682+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
51683 sizeof(struct regulator_ops));
51684- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
51685- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
51686+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
51687+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
51688+ pax_close_kernel();
51689 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
51690
51691 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
51692diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
51693index 5b2e761..c8c8a4a 100644
51694--- a/drivers/rtc/rtc-cmos.c
51695+++ b/drivers/rtc/rtc-cmos.c
51696@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
51697 hpet_rtc_timer_init();
51698
51699 /* export at least the first block of NVRAM */
51700- nvram.size = address_space - NVRAM_OFFSET;
51701+ pax_open_kernel();
51702+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
51703+ pax_close_kernel();
51704 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
51705 if (retval < 0) {
51706 dev_dbg(dev, "can't create nvram file? %d\n", retval);
51707diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
51708index 799c34b..8e9786a 100644
51709--- a/drivers/rtc/rtc-dev.c
51710+++ b/drivers/rtc/rtc-dev.c
51711@@ -16,6 +16,7 @@
51712 #include <linux/module.h>
51713 #include <linux/rtc.h>
51714 #include <linux/sched.h>
51715+#include <linux/grsecurity.h>
51716 #include "rtc-core.h"
51717
51718 static dev_t rtc_devt;
51719@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
51720 if (copy_from_user(&tm, uarg, sizeof(tm)))
51721 return -EFAULT;
51722
51723+ gr_log_timechange();
51724+
51725 return rtc_set_time(rtc, &tm);
51726
51727 case RTC_PIE_ON:
51728diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
51729index 4ffabb3..1f87fca 100644
51730--- a/drivers/rtc/rtc-ds1307.c
51731+++ b/drivers/rtc/rtc-ds1307.c
51732@@ -107,7 +107,7 @@ struct ds1307 {
51733 u8 offset; /* register's offset */
51734 u8 regs[11];
51735 u16 nvram_offset;
51736- struct bin_attribute *nvram;
51737+ bin_attribute_no_const *nvram;
51738 enum ds_type type;
51739 unsigned long flags;
51740 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
51741diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
51742index 90abb5b..e0bf6dd 100644
51743--- a/drivers/rtc/rtc-m48t59.c
51744+++ b/drivers/rtc/rtc-m48t59.c
51745@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
51746 if (IS_ERR(m48t59->rtc))
51747 return PTR_ERR(m48t59->rtc);
51748
51749- m48t59_nvram_attr.size = pdata->offset;
51750+ pax_open_kernel();
51751+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
51752+ pax_close_kernel();
51753
51754 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
51755 if (ret)
51756diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
51757index e693af6..2e525b6 100644
51758--- a/drivers/scsi/bfa/bfa_fcpim.h
51759+++ b/drivers/scsi/bfa/bfa_fcpim.h
51760@@ -36,7 +36,7 @@ struct bfa_iotag_s {
51761
51762 struct bfa_itn_s {
51763 bfa_isr_func_t isr;
51764-};
51765+} __no_const;
51766
51767 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
51768 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
51769diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
51770index 0f19455..ef7adb5 100644
51771--- a/drivers/scsi/bfa/bfa_fcs.c
51772+++ b/drivers/scsi/bfa/bfa_fcs.c
51773@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
51774 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
51775
51776 static struct bfa_fcs_mod_s fcs_modules[] = {
51777- { bfa_fcs_port_attach, NULL, NULL },
51778- { bfa_fcs_uf_attach, NULL, NULL },
51779- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
51780- bfa_fcs_fabric_modexit },
51781+ {
51782+ .attach = bfa_fcs_port_attach,
51783+ .modinit = NULL,
51784+ .modexit = NULL
51785+ },
51786+ {
51787+ .attach = bfa_fcs_uf_attach,
51788+ .modinit = NULL,
51789+ .modexit = NULL
51790+ },
51791+ {
51792+ .attach = bfa_fcs_fabric_attach,
51793+ .modinit = bfa_fcs_fabric_modinit,
51794+ .modexit = bfa_fcs_fabric_modexit
51795+ },
51796 };
51797
51798 /*
51799diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
51800index ff75ef8..2dfe00a 100644
51801--- a/drivers/scsi/bfa/bfa_fcs_lport.c
51802+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
51803@@ -89,15 +89,26 @@ static struct {
51804 void (*offline) (struct bfa_fcs_lport_s *port);
51805 } __port_action[] = {
51806 {
51807- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
51808- bfa_fcs_lport_unknown_offline}, {
51809- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
51810- bfa_fcs_lport_fab_offline}, {
51811- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
51812- bfa_fcs_lport_n2n_offline}, {
51813- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
51814- bfa_fcs_lport_loop_offline},
51815- };
51816+ .init = bfa_fcs_lport_unknown_init,
51817+ .online = bfa_fcs_lport_unknown_online,
51818+ .offline = bfa_fcs_lport_unknown_offline
51819+ },
51820+ {
51821+ .init = bfa_fcs_lport_fab_init,
51822+ .online = bfa_fcs_lport_fab_online,
51823+ .offline = bfa_fcs_lport_fab_offline
51824+ },
51825+ {
51826+ .init = bfa_fcs_lport_n2n_init,
51827+ .online = bfa_fcs_lport_n2n_online,
51828+ .offline = bfa_fcs_lport_n2n_offline
51829+ },
51830+ {
51831+ .init = bfa_fcs_lport_loop_init,
51832+ .online = bfa_fcs_lport_loop_online,
51833+ .offline = bfa_fcs_lport_loop_offline
51834+ },
51835+};
51836
51837 /*
51838 * fcs_port_sm FCS logical port state machine
51839diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
51840index a38aafa0..fe8f03b 100644
51841--- a/drivers/scsi/bfa/bfa_ioc.h
51842+++ b/drivers/scsi/bfa/bfa_ioc.h
51843@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
51844 bfa_ioc_disable_cbfn_t disable_cbfn;
51845 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
51846 bfa_ioc_reset_cbfn_t reset_cbfn;
51847-};
51848+} __no_const;
51849
51850 /*
51851 * IOC event notification mechanism.
51852@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
51853 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
51854 enum bfi_ioc_state fwstate);
51855 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
51856-};
51857+} __no_const;
51858
51859 /*
51860 * Queue element to wait for room in request queue. FIFO order is
51861diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
51862index a14c784..6de6790 100644
51863--- a/drivers/scsi/bfa/bfa_modules.h
51864+++ b/drivers/scsi/bfa/bfa_modules.h
51865@@ -78,12 +78,12 @@ enum {
51866 \
51867 extern struct bfa_module_s hal_mod_ ## __mod; \
51868 struct bfa_module_s hal_mod_ ## __mod = { \
51869- bfa_ ## __mod ## _meminfo, \
51870- bfa_ ## __mod ## _attach, \
51871- bfa_ ## __mod ## _detach, \
51872- bfa_ ## __mod ## _start, \
51873- bfa_ ## __mod ## _stop, \
51874- bfa_ ## __mod ## _iocdisable, \
51875+ .meminfo = bfa_ ## __mod ## _meminfo, \
51876+ .attach = bfa_ ## __mod ## _attach, \
51877+ .detach = bfa_ ## __mod ## _detach, \
51878+ .start = bfa_ ## __mod ## _start, \
51879+ .stop = bfa_ ## __mod ## _stop, \
51880+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
51881 }
51882
51883 #define BFA_CACHELINE_SZ (256)
51884diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
51885index 045c4e1..13de803 100644
51886--- a/drivers/scsi/fcoe/fcoe_sysfs.c
51887+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
51888@@ -33,8 +33,8 @@
51889 */
51890 #include "libfcoe.h"
51891
51892-static atomic_t ctlr_num;
51893-static atomic_t fcf_num;
51894+static atomic_unchecked_t ctlr_num;
51895+static atomic_unchecked_t fcf_num;
51896
51897 /*
51898 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
51899@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
51900 if (!ctlr)
51901 goto out;
51902
51903- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
51904+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
51905 ctlr->f = f;
51906 ctlr->mode = FIP_CONN_TYPE_FABRIC;
51907 INIT_LIST_HEAD(&ctlr->fcfs);
51908@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
51909 fcf->dev.parent = &ctlr->dev;
51910 fcf->dev.bus = &fcoe_bus_type;
51911 fcf->dev.type = &fcoe_fcf_device_type;
51912- fcf->id = atomic_inc_return(&fcf_num) - 1;
51913+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
51914 fcf->state = FCOE_FCF_STATE_UNKNOWN;
51915
51916 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
51917@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
51918 {
51919 int error;
51920
51921- atomic_set(&ctlr_num, 0);
51922- atomic_set(&fcf_num, 0);
51923+ atomic_set_unchecked(&ctlr_num, 0);
51924+ atomic_set_unchecked(&fcf_num, 0);
51925
51926 error = bus_register(&fcoe_bus_type);
51927 if (error)
51928diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
51929index 8bb173e..20236b4 100644
51930--- a/drivers/scsi/hosts.c
51931+++ b/drivers/scsi/hosts.c
51932@@ -42,7 +42,7 @@
51933 #include "scsi_logging.h"
51934
51935
51936-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
51937+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
51938
51939
51940 static void scsi_host_cls_release(struct device *dev)
51941@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
51942 * subtract one because we increment first then return, but we need to
51943 * know what the next host number was before increment
51944 */
51945- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
51946+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
51947 shost->dma_channel = 0xff;
51948
51949 /* These three are default values which can be overridden */
51950diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
51951index a1cfbd3..d7f8ebc 100644
51952--- a/drivers/scsi/hpsa.c
51953+++ b/drivers/scsi/hpsa.c
51954@@ -697,10 +697,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
51955 struct reply_queue_buffer *rq = &h->reply_queue[q];
51956
51957 if (h->transMethod & CFGTBL_Trans_io_accel1)
51958- return h->access.command_completed(h, q);
51959+ return h->access->command_completed(h, q);
51960
51961 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
51962- return h->access.command_completed(h, q);
51963+ return h->access->command_completed(h, q);
51964
51965 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
51966 a = rq->head[rq->current_entry];
51967@@ -837,7 +837,7 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
51968 break;
51969 default:
51970 set_performant_mode(h, c);
51971- h->access.submit_command(h, c);
51972+ h->access->submit_command(h, c);
51973 }
51974 }
51975
51976@@ -5369,17 +5369,17 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
51977
51978 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
51979 {
51980- return h->access.command_completed(h, q);
51981+ return h->access->command_completed(h, q);
51982 }
51983
51984 static inline bool interrupt_pending(struct ctlr_info *h)
51985 {
51986- return h->access.intr_pending(h);
51987+ return h->access->intr_pending(h);
51988 }
51989
51990 static inline long interrupt_not_for_us(struct ctlr_info *h)
51991 {
51992- return (h->access.intr_pending(h) == 0) ||
51993+ return (h->access->intr_pending(h) == 0) ||
51994 (h->interrupts_enabled == 0);
51995 }
51996
51997@@ -6270,7 +6270,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
51998 if (prod_index < 0)
51999 return prod_index;
52000 h->product_name = products[prod_index].product_name;
52001- h->access = *(products[prod_index].access);
52002+ h->access = products[prod_index].access;
52003
52004 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
52005 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
52006@@ -6649,7 +6649,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
52007 unsigned long flags;
52008 u32 lockup_detected;
52009
52010- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52011+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52012 spin_lock_irqsave(&h->lock, flags);
52013 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
52014 if (!lockup_detected) {
52015@@ -6924,7 +6924,7 @@ reinit_after_soft_reset:
52016 }
52017
52018 /* make sure the board interrupts are off */
52019- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52020+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52021
52022 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
52023 goto clean2;
52024@@ -6960,7 +6960,7 @@ reinit_after_soft_reset:
52025 * fake ones to scoop up any residual completions.
52026 */
52027 spin_lock_irqsave(&h->lock, flags);
52028- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52029+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52030 spin_unlock_irqrestore(&h->lock, flags);
52031 hpsa_free_irqs(h);
52032 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
52033@@ -6979,9 +6979,9 @@ reinit_after_soft_reset:
52034 dev_info(&h->pdev->dev, "Board READY.\n");
52035 dev_info(&h->pdev->dev,
52036 "Waiting for stale completions to drain.\n");
52037- h->access.set_intr_mask(h, HPSA_INTR_ON);
52038+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52039 msleep(10000);
52040- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52041+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52042
52043 rc = controller_reset_failed(h->cfgtable);
52044 if (rc)
52045@@ -7006,7 +7006,7 @@ reinit_after_soft_reset:
52046
52047
52048 /* Turn the interrupts on so we can service requests */
52049- h->access.set_intr_mask(h, HPSA_INTR_ON);
52050+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52051
52052 hpsa_hba_inquiry(h);
52053 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
52054@@ -7079,7 +7079,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
52055 * To write all data in the battery backed cache to disks
52056 */
52057 hpsa_flush_cache(h);
52058- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52059+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52060 hpsa_free_irqs_and_disable_msix(h);
52061 }
52062
52063@@ -7200,7 +7200,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52064 CFGTBL_Trans_enable_directed_msix |
52065 (trans_support & (CFGTBL_Trans_io_accel1 |
52066 CFGTBL_Trans_io_accel2));
52067- struct access_method access = SA5_performant_access;
52068+ struct access_method *access = &SA5_performant_access;
52069
52070 /* This is a bit complicated. There are 8 registers on
52071 * the controller which we write to to tell it 8 different
52072@@ -7242,7 +7242,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52073 * perform the superfluous readl() after each command submission.
52074 */
52075 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
52076- access = SA5_performant_access_no_read;
52077+ access = &SA5_performant_access_no_read;
52078
52079 /* Controller spec: zero out this buffer. */
52080 for (i = 0; i < h->nreply_queues; i++)
52081@@ -7272,12 +7272,12 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52082 * enable outbound interrupt coalescing in accelerator mode;
52083 */
52084 if (trans_support & CFGTBL_Trans_io_accel1) {
52085- access = SA5_ioaccel_mode1_access;
52086+ access = &SA5_ioaccel_mode1_access;
52087 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52088 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52089 } else {
52090 if (trans_support & CFGTBL_Trans_io_accel2) {
52091- access = SA5_ioaccel_mode2_access;
52092+ access = &SA5_ioaccel_mode2_access;
52093 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52094 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52095 }
52096diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
52097index 6577130..955f9a4 100644
52098--- a/drivers/scsi/hpsa.h
52099+++ b/drivers/scsi/hpsa.h
52100@@ -143,7 +143,7 @@ struct ctlr_info {
52101 unsigned int msix_vector;
52102 unsigned int msi_vector;
52103 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
52104- struct access_method access;
52105+ struct access_method *access;
52106 char hba_mode_enabled;
52107
52108 /* queue and queue Info */
52109@@ -525,38 +525,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
52110 }
52111
52112 static struct access_method SA5_access = {
52113- SA5_submit_command,
52114- SA5_intr_mask,
52115- SA5_intr_pending,
52116- SA5_completed,
52117+ .submit_command = SA5_submit_command,
52118+ .set_intr_mask = SA5_intr_mask,
52119+ .intr_pending = SA5_intr_pending,
52120+ .command_completed = SA5_completed,
52121 };
52122
52123 static struct access_method SA5_ioaccel_mode1_access = {
52124- SA5_submit_command,
52125- SA5_performant_intr_mask,
52126- SA5_ioaccel_mode1_intr_pending,
52127- SA5_ioaccel_mode1_completed,
52128+ .submit_command = SA5_submit_command,
52129+ .set_intr_mask = SA5_performant_intr_mask,
52130+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
52131+ .command_completed = SA5_ioaccel_mode1_completed,
52132 };
52133
52134 static struct access_method SA5_ioaccel_mode2_access = {
52135- SA5_submit_command_ioaccel2,
52136- SA5_performant_intr_mask,
52137- SA5_performant_intr_pending,
52138- SA5_performant_completed,
52139+ .submit_command = SA5_submit_command_ioaccel2,
52140+ .set_intr_mask = SA5_performant_intr_mask,
52141+ .intr_pending = SA5_performant_intr_pending,
52142+ .command_completed = SA5_performant_completed,
52143 };
52144
52145 static struct access_method SA5_performant_access = {
52146- SA5_submit_command,
52147- SA5_performant_intr_mask,
52148- SA5_performant_intr_pending,
52149- SA5_performant_completed,
52150+ .submit_command = SA5_submit_command,
52151+ .set_intr_mask = SA5_performant_intr_mask,
52152+ .intr_pending = SA5_performant_intr_pending,
52153+ .command_completed = SA5_performant_completed,
52154 };
52155
52156 static struct access_method SA5_performant_access_no_read = {
52157- SA5_submit_command_no_read,
52158- SA5_performant_intr_mask,
52159- SA5_performant_intr_pending,
52160- SA5_performant_completed,
52161+ .submit_command = SA5_submit_command_no_read,
52162+ .set_intr_mask = SA5_performant_intr_mask,
52163+ .intr_pending = SA5_performant_intr_pending,
52164+ .command_completed = SA5_performant_completed,
52165 };
52166
52167 struct board_type {
52168diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
52169index 1b3a094..068e683 100644
52170--- a/drivers/scsi/libfc/fc_exch.c
52171+++ b/drivers/scsi/libfc/fc_exch.c
52172@@ -101,12 +101,12 @@ struct fc_exch_mgr {
52173 u16 pool_max_index;
52174
52175 struct {
52176- atomic_t no_free_exch;
52177- atomic_t no_free_exch_xid;
52178- atomic_t xid_not_found;
52179- atomic_t xid_busy;
52180- atomic_t seq_not_found;
52181- atomic_t non_bls_resp;
52182+ atomic_unchecked_t no_free_exch;
52183+ atomic_unchecked_t no_free_exch_xid;
52184+ atomic_unchecked_t xid_not_found;
52185+ atomic_unchecked_t xid_busy;
52186+ atomic_unchecked_t seq_not_found;
52187+ atomic_unchecked_t non_bls_resp;
52188 } stats;
52189 };
52190
52191@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
52192 /* allocate memory for exchange */
52193 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
52194 if (!ep) {
52195- atomic_inc(&mp->stats.no_free_exch);
52196+ atomic_inc_unchecked(&mp->stats.no_free_exch);
52197 goto out;
52198 }
52199 memset(ep, 0, sizeof(*ep));
52200@@ -874,7 +874,7 @@ out:
52201 return ep;
52202 err:
52203 spin_unlock_bh(&pool->lock);
52204- atomic_inc(&mp->stats.no_free_exch_xid);
52205+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
52206 mempool_free(ep, mp->ep_pool);
52207 return NULL;
52208 }
52209@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52210 xid = ntohs(fh->fh_ox_id); /* we originated exch */
52211 ep = fc_exch_find(mp, xid);
52212 if (!ep) {
52213- atomic_inc(&mp->stats.xid_not_found);
52214+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52215 reject = FC_RJT_OX_ID;
52216 goto out;
52217 }
52218@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52219 ep = fc_exch_find(mp, xid);
52220 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
52221 if (ep) {
52222- atomic_inc(&mp->stats.xid_busy);
52223+ atomic_inc_unchecked(&mp->stats.xid_busy);
52224 reject = FC_RJT_RX_ID;
52225 goto rel;
52226 }
52227@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52228 }
52229 xid = ep->xid; /* get our XID */
52230 } else if (!ep) {
52231- atomic_inc(&mp->stats.xid_not_found);
52232+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52233 reject = FC_RJT_RX_ID; /* XID not found */
52234 goto out;
52235 }
52236@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52237 } else {
52238 sp = &ep->seq;
52239 if (sp->id != fh->fh_seq_id) {
52240- atomic_inc(&mp->stats.seq_not_found);
52241+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52242 if (f_ctl & FC_FC_END_SEQ) {
52243 /*
52244 * Update sequence_id based on incoming last
52245@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52246
52247 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
52248 if (!ep) {
52249- atomic_inc(&mp->stats.xid_not_found);
52250+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52251 goto out;
52252 }
52253 if (ep->esb_stat & ESB_ST_COMPLETE) {
52254- atomic_inc(&mp->stats.xid_not_found);
52255+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52256 goto rel;
52257 }
52258 if (ep->rxid == FC_XID_UNKNOWN)
52259 ep->rxid = ntohs(fh->fh_rx_id);
52260 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
52261- atomic_inc(&mp->stats.xid_not_found);
52262+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52263 goto rel;
52264 }
52265 if (ep->did != ntoh24(fh->fh_s_id) &&
52266 ep->did != FC_FID_FLOGI) {
52267- atomic_inc(&mp->stats.xid_not_found);
52268+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52269 goto rel;
52270 }
52271 sof = fr_sof(fp);
52272@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52273 sp->ssb_stat |= SSB_ST_RESP;
52274 sp->id = fh->fh_seq_id;
52275 } else if (sp->id != fh->fh_seq_id) {
52276- atomic_inc(&mp->stats.seq_not_found);
52277+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52278 goto rel;
52279 }
52280
52281@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52282 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
52283
52284 if (!sp)
52285- atomic_inc(&mp->stats.xid_not_found);
52286+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52287 else
52288- atomic_inc(&mp->stats.non_bls_resp);
52289+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
52290
52291 fc_frame_free(fp);
52292 }
52293@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
52294
52295 list_for_each_entry(ema, &lport->ema_list, ema_list) {
52296 mp = ema->mp;
52297- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
52298+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
52299 st->fc_no_free_exch_xid +=
52300- atomic_read(&mp->stats.no_free_exch_xid);
52301- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
52302- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
52303- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
52304- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
52305+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
52306+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
52307+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
52308+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
52309+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
52310 }
52311 }
52312 EXPORT_SYMBOL(fc_exch_update_stats);
52313diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
52314index 9c706d8..d3e3ed2 100644
52315--- a/drivers/scsi/libsas/sas_ata.c
52316+++ b/drivers/scsi/libsas/sas_ata.c
52317@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
52318 .postreset = ata_std_postreset,
52319 .error_handler = ata_std_error_handler,
52320 .post_internal_cmd = sas_ata_post_internal,
52321- .qc_defer = ata_std_qc_defer,
52322+ .qc_defer = ata_std_qc_defer,
52323 .qc_prep = ata_noop_qc_prep,
52324 .qc_issue = sas_ata_qc_issue,
52325 .qc_fill_rtf = sas_ata_qc_fill_rtf,
52326diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
52327index 434e903..5a4a79b 100644
52328--- a/drivers/scsi/lpfc/lpfc.h
52329+++ b/drivers/scsi/lpfc/lpfc.h
52330@@ -430,7 +430,7 @@ struct lpfc_vport {
52331 struct dentry *debug_nodelist;
52332 struct dentry *vport_debugfs_root;
52333 struct lpfc_debugfs_trc *disc_trc;
52334- atomic_t disc_trc_cnt;
52335+ atomic_unchecked_t disc_trc_cnt;
52336 #endif
52337 uint8_t stat_data_enabled;
52338 uint8_t stat_data_blocked;
52339@@ -880,8 +880,8 @@ struct lpfc_hba {
52340 struct timer_list fabric_block_timer;
52341 unsigned long bit_flags;
52342 #define FABRIC_COMANDS_BLOCKED 0
52343- atomic_t num_rsrc_err;
52344- atomic_t num_cmd_success;
52345+ atomic_unchecked_t num_rsrc_err;
52346+ atomic_unchecked_t num_cmd_success;
52347 unsigned long last_rsrc_error_time;
52348 unsigned long last_ramp_down_time;
52349 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
52350@@ -916,7 +916,7 @@ struct lpfc_hba {
52351
52352 struct dentry *debug_slow_ring_trc;
52353 struct lpfc_debugfs_trc *slow_ring_trc;
52354- atomic_t slow_ring_trc_cnt;
52355+ atomic_unchecked_t slow_ring_trc_cnt;
52356 /* iDiag debugfs sub-directory */
52357 struct dentry *idiag_root;
52358 struct dentry *idiag_pci_cfg;
52359diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
52360index 5633e7d..8272114 100644
52361--- a/drivers/scsi/lpfc/lpfc_debugfs.c
52362+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
52363@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
52364
52365 #include <linux/debugfs.h>
52366
52367-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52368+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52369 static unsigned long lpfc_debugfs_start_time = 0L;
52370
52371 /* iDiag */
52372@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
52373 lpfc_debugfs_enable = 0;
52374
52375 len = 0;
52376- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
52377+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
52378 (lpfc_debugfs_max_disc_trc - 1);
52379 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
52380 dtp = vport->disc_trc + i;
52381@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
52382 lpfc_debugfs_enable = 0;
52383
52384 len = 0;
52385- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
52386+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
52387 (lpfc_debugfs_max_slow_ring_trc - 1);
52388 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
52389 dtp = phba->slow_ring_trc + i;
52390@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
52391 !vport || !vport->disc_trc)
52392 return;
52393
52394- index = atomic_inc_return(&vport->disc_trc_cnt) &
52395+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
52396 (lpfc_debugfs_max_disc_trc - 1);
52397 dtp = vport->disc_trc + index;
52398 dtp->fmt = fmt;
52399 dtp->data1 = data1;
52400 dtp->data2 = data2;
52401 dtp->data3 = data3;
52402- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52403+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52404 dtp->jif = jiffies;
52405 #endif
52406 return;
52407@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
52408 !phba || !phba->slow_ring_trc)
52409 return;
52410
52411- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
52412+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
52413 (lpfc_debugfs_max_slow_ring_trc - 1);
52414 dtp = phba->slow_ring_trc + index;
52415 dtp->fmt = fmt;
52416 dtp->data1 = data1;
52417 dtp->data2 = data2;
52418 dtp->data3 = data3;
52419- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52420+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52421 dtp->jif = jiffies;
52422 #endif
52423 return;
52424@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52425 "slow_ring buffer\n");
52426 goto debug_failed;
52427 }
52428- atomic_set(&phba->slow_ring_trc_cnt, 0);
52429+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
52430 memset(phba->slow_ring_trc, 0,
52431 (sizeof(struct lpfc_debugfs_trc) *
52432 lpfc_debugfs_max_slow_ring_trc));
52433@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52434 "buffer\n");
52435 goto debug_failed;
52436 }
52437- atomic_set(&vport->disc_trc_cnt, 0);
52438+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
52439
52440 snprintf(name, sizeof(name), "discovery_trace");
52441 vport->debug_disc_trc =
52442diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
52443index 0b2c53a..aec2b45 100644
52444--- a/drivers/scsi/lpfc/lpfc_init.c
52445+++ b/drivers/scsi/lpfc/lpfc_init.c
52446@@ -11290,8 +11290,10 @@ lpfc_init(void)
52447 "misc_register returned with status %d", error);
52448
52449 if (lpfc_enable_npiv) {
52450- lpfc_transport_functions.vport_create = lpfc_vport_create;
52451- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52452+ pax_open_kernel();
52453+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
52454+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52455+ pax_close_kernel();
52456 }
52457 lpfc_transport_template =
52458 fc_attach_transport(&lpfc_transport_functions);
52459diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
52460index 4f9222e..f1850e3 100644
52461--- a/drivers/scsi/lpfc/lpfc_scsi.c
52462+++ b/drivers/scsi/lpfc/lpfc_scsi.c
52463@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
52464 unsigned long expires;
52465
52466 spin_lock_irqsave(&phba->hbalock, flags);
52467- atomic_inc(&phba->num_rsrc_err);
52468+ atomic_inc_unchecked(&phba->num_rsrc_err);
52469 phba->last_rsrc_error_time = jiffies;
52470
52471 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
52472@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52473 unsigned long num_rsrc_err, num_cmd_success;
52474 int i;
52475
52476- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
52477- num_cmd_success = atomic_read(&phba->num_cmd_success);
52478+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
52479+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
52480
52481 /*
52482 * The error and success command counters are global per
52483@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52484 }
52485 }
52486 lpfc_destroy_vport_work_array(phba, vports);
52487- atomic_set(&phba->num_rsrc_err, 0);
52488- atomic_set(&phba->num_cmd_success, 0);
52489+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
52490+ atomic_set_unchecked(&phba->num_cmd_success, 0);
52491 }
52492
52493 /**
52494diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52495index 3f26147..ee8efd1 100644
52496--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52497+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52498@@ -1509,7 +1509,7 @@ _scsih_get_resync(struct device *dev)
52499 {
52500 struct scsi_device *sdev = to_scsi_device(dev);
52501 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52502- static struct _raid_device *raid_device;
52503+ struct _raid_device *raid_device;
52504 unsigned long flags;
52505 Mpi2RaidVolPage0_t vol_pg0;
52506 Mpi2ConfigReply_t mpi_reply;
52507@@ -1561,7 +1561,7 @@ _scsih_get_state(struct device *dev)
52508 {
52509 struct scsi_device *sdev = to_scsi_device(dev);
52510 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52511- static struct _raid_device *raid_device;
52512+ struct _raid_device *raid_device;
52513 unsigned long flags;
52514 Mpi2RaidVolPage0_t vol_pg0;
52515 Mpi2ConfigReply_t mpi_reply;
52516@@ -6641,7 +6641,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
52517 Mpi2EventDataIrOperationStatus_t *event_data =
52518 (Mpi2EventDataIrOperationStatus_t *)
52519 fw_event->event_data;
52520- static struct _raid_device *raid_device;
52521+ struct _raid_device *raid_device;
52522 unsigned long flags;
52523 u16 handle;
52524
52525@@ -7112,7 +7112,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
52526 u64 sas_address;
52527 struct _sas_device *sas_device;
52528 struct _sas_node *expander_device;
52529- static struct _raid_device *raid_device;
52530+ struct _raid_device *raid_device;
52531 u8 retry_count;
52532 unsigned long flags;
52533
52534diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
52535index ed31d8c..ab856b3 100644
52536--- a/drivers/scsi/pmcraid.c
52537+++ b/drivers/scsi/pmcraid.c
52538@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
52539 res->scsi_dev = scsi_dev;
52540 scsi_dev->hostdata = res;
52541 res->change_detected = 0;
52542- atomic_set(&res->read_failures, 0);
52543- atomic_set(&res->write_failures, 0);
52544+ atomic_set_unchecked(&res->read_failures, 0);
52545+ atomic_set_unchecked(&res->write_failures, 0);
52546 rc = 0;
52547 }
52548 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
52549@@ -2640,9 +2640,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
52550
52551 /* If this was a SCSI read/write command keep count of errors */
52552 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
52553- atomic_inc(&res->read_failures);
52554+ atomic_inc_unchecked(&res->read_failures);
52555 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
52556- atomic_inc(&res->write_failures);
52557+ atomic_inc_unchecked(&res->write_failures);
52558
52559 if (!RES_IS_GSCSI(res->cfg_entry) &&
52560 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
52561@@ -3468,7 +3468,7 @@ static int pmcraid_queuecommand_lck(
52562 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52563 * hrrq_id assigned here in queuecommand
52564 */
52565- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52566+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52567 pinstance->num_hrrq;
52568 cmd->cmd_done = pmcraid_io_done;
52569
52570@@ -3782,7 +3782,7 @@ static long pmcraid_ioctl_passthrough(
52571 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52572 * hrrq_id assigned here in queuecommand
52573 */
52574- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52575+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52576 pinstance->num_hrrq;
52577
52578 if (request_size) {
52579@@ -4420,7 +4420,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
52580
52581 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
52582 /* add resources only after host is added into system */
52583- if (!atomic_read(&pinstance->expose_resources))
52584+ if (!atomic_read_unchecked(&pinstance->expose_resources))
52585 return;
52586
52587 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
52588@@ -5237,8 +5237,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
52589 init_waitqueue_head(&pinstance->reset_wait_q);
52590
52591 atomic_set(&pinstance->outstanding_cmds, 0);
52592- atomic_set(&pinstance->last_message_id, 0);
52593- atomic_set(&pinstance->expose_resources, 0);
52594+ atomic_set_unchecked(&pinstance->last_message_id, 0);
52595+ atomic_set_unchecked(&pinstance->expose_resources, 0);
52596
52597 INIT_LIST_HEAD(&pinstance->free_res_q);
52598 INIT_LIST_HEAD(&pinstance->used_res_q);
52599@@ -5951,7 +5951,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
52600 /* Schedule worker thread to handle CCN and take care of adding and
52601 * removing devices to OS
52602 */
52603- atomic_set(&pinstance->expose_resources, 1);
52604+ atomic_set_unchecked(&pinstance->expose_resources, 1);
52605 schedule_work(&pinstance->worker_q);
52606 return rc;
52607
52608diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
52609index e1d150f..6c6df44 100644
52610--- a/drivers/scsi/pmcraid.h
52611+++ b/drivers/scsi/pmcraid.h
52612@@ -748,7 +748,7 @@ struct pmcraid_instance {
52613 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
52614
52615 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
52616- atomic_t last_message_id;
52617+ atomic_unchecked_t last_message_id;
52618
52619 /* configuration table */
52620 struct pmcraid_config_table *cfg_table;
52621@@ -777,7 +777,7 @@ struct pmcraid_instance {
52622 atomic_t outstanding_cmds;
52623
52624 /* should add/delete resources to mid-layer now ?*/
52625- atomic_t expose_resources;
52626+ atomic_unchecked_t expose_resources;
52627
52628
52629
52630@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
52631 struct pmcraid_config_table_entry_ext cfg_entry_ext;
52632 };
52633 struct scsi_device *scsi_dev; /* Link scsi_device structure */
52634- atomic_t read_failures; /* count of failed READ commands */
52635- atomic_t write_failures; /* count of failed WRITE commands */
52636+ atomic_unchecked_t read_failures; /* count of failed READ commands */
52637+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
52638
52639 /* To indicate add/delete/modify during CCN */
52640 u8 change_detected;
52641diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
52642index 82b92c4..3178171 100644
52643--- a/drivers/scsi/qla2xxx/qla_attr.c
52644+++ b/drivers/scsi/qla2xxx/qla_attr.c
52645@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
52646 return 0;
52647 }
52648
52649-struct fc_function_template qla2xxx_transport_functions = {
52650+fc_function_template_no_const qla2xxx_transport_functions = {
52651
52652 .show_host_node_name = 1,
52653 .show_host_port_name = 1,
52654@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
52655 .bsg_timeout = qla24xx_bsg_timeout,
52656 };
52657
52658-struct fc_function_template qla2xxx_transport_vport_functions = {
52659+fc_function_template_no_const qla2xxx_transport_vport_functions = {
52660
52661 .show_host_node_name = 1,
52662 .show_host_port_name = 1,
52663diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
52664index 7686bfe..4710893 100644
52665--- a/drivers/scsi/qla2xxx/qla_gbl.h
52666+++ b/drivers/scsi/qla2xxx/qla_gbl.h
52667@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
52668 struct device_attribute;
52669 extern struct device_attribute *qla2x00_host_attrs[];
52670 struct fc_function_template;
52671-extern struct fc_function_template qla2xxx_transport_functions;
52672-extern struct fc_function_template qla2xxx_transport_vport_functions;
52673+extern fc_function_template_no_const qla2xxx_transport_functions;
52674+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
52675 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
52676 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
52677 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
52678diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
52679index cce1cbc..5b9f0fe 100644
52680--- a/drivers/scsi/qla2xxx/qla_os.c
52681+++ b/drivers/scsi/qla2xxx/qla_os.c
52682@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
52683 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
52684 /* Ok, a 64bit DMA mask is applicable. */
52685 ha->flags.enable_64bit_addressing = 1;
52686- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52687- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52688+ pax_open_kernel();
52689+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52690+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52691+ pax_close_kernel();
52692 return;
52693 }
52694 }
52695diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
52696index 8f6d0fb..1b21097 100644
52697--- a/drivers/scsi/qla4xxx/ql4_def.h
52698+++ b/drivers/scsi/qla4xxx/ql4_def.h
52699@@ -305,7 +305,7 @@ struct ddb_entry {
52700 * (4000 only) */
52701 atomic_t relogin_timer; /* Max Time to wait for
52702 * relogin to complete */
52703- atomic_t relogin_retry_count; /* Num of times relogin has been
52704+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
52705 * retried */
52706 uint32_t default_time2wait; /* Default Min time between
52707 * relogins (+aens) */
52708diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
52709index 6d25879..3031a9f 100644
52710--- a/drivers/scsi/qla4xxx/ql4_os.c
52711+++ b/drivers/scsi/qla4xxx/ql4_os.c
52712@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
52713 */
52714 if (!iscsi_is_session_online(cls_sess)) {
52715 /* Reset retry relogin timer */
52716- atomic_inc(&ddb_entry->relogin_retry_count);
52717+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
52718 DEBUG2(ql4_printk(KERN_INFO, ha,
52719 "%s: index[%d] relogin timed out-retrying"
52720 " relogin (%d), retry (%d)\n", __func__,
52721 ddb_entry->fw_ddb_index,
52722- atomic_read(&ddb_entry->relogin_retry_count),
52723+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
52724 ddb_entry->default_time2wait + 4));
52725 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
52726 atomic_set(&ddb_entry->retry_relogin_timer,
52727@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
52728
52729 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
52730 atomic_set(&ddb_entry->relogin_timer, 0);
52731- atomic_set(&ddb_entry->relogin_retry_count, 0);
52732+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
52733 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
52734 ddb_entry->default_relogin_timeout =
52735 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
52736diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
52737index b1a2631..5bcd9c8 100644
52738--- a/drivers/scsi/scsi_lib.c
52739+++ b/drivers/scsi/scsi_lib.c
52740@@ -1597,7 +1597,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
52741 shost = sdev->host;
52742 scsi_init_cmd_errh(cmd);
52743 cmd->result = DID_NO_CONNECT << 16;
52744- atomic_inc(&cmd->device->iorequest_cnt);
52745+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52746
52747 /*
52748 * SCSI request completion path will do scsi_device_unbusy(),
52749@@ -1620,9 +1620,9 @@ static void scsi_softirq_done(struct request *rq)
52750
52751 INIT_LIST_HEAD(&cmd->eh_entry);
52752
52753- atomic_inc(&cmd->device->iodone_cnt);
52754+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
52755 if (cmd->result)
52756- atomic_inc(&cmd->device->ioerr_cnt);
52757+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
52758
52759 disposition = scsi_decide_disposition(cmd);
52760 if (disposition != SUCCESS &&
52761@@ -1663,7 +1663,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
52762 struct Scsi_Host *host = cmd->device->host;
52763 int rtn = 0;
52764
52765- atomic_inc(&cmd->device->iorequest_cnt);
52766+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52767
52768 /* check if the device is still usable */
52769 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
52770diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
52771index 1ac38e7..6acc656 100644
52772--- a/drivers/scsi/scsi_sysfs.c
52773+++ b/drivers/scsi/scsi_sysfs.c
52774@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
52775 char *buf) \
52776 { \
52777 struct scsi_device *sdev = to_scsi_device(dev); \
52778- unsigned long long count = atomic_read(&sdev->field); \
52779+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
52780 return snprintf(buf, 20, "0x%llx\n", count); \
52781 } \
52782 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
52783diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
52784index 5d6f348..18778a6b 100644
52785--- a/drivers/scsi/scsi_transport_fc.c
52786+++ b/drivers/scsi/scsi_transport_fc.c
52787@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
52788 * Netlink Infrastructure
52789 */
52790
52791-static atomic_t fc_event_seq;
52792+static atomic_unchecked_t fc_event_seq;
52793
52794 /**
52795 * fc_get_event_number - Obtain the next sequential FC event number
52796@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
52797 u32
52798 fc_get_event_number(void)
52799 {
52800- return atomic_add_return(1, &fc_event_seq);
52801+ return atomic_add_return_unchecked(1, &fc_event_seq);
52802 }
52803 EXPORT_SYMBOL(fc_get_event_number);
52804
52805@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
52806 {
52807 int error;
52808
52809- atomic_set(&fc_event_seq, 0);
52810+ atomic_set_unchecked(&fc_event_seq, 0);
52811
52812 error = transport_class_register(&fc_host_class);
52813 if (error)
52814@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
52815 char *cp;
52816
52817 *val = simple_strtoul(buf, &cp, 0);
52818- if ((*cp && (*cp != '\n')) || (*val < 0))
52819+ if (*cp && (*cp != '\n'))
52820 return -EINVAL;
52821 /*
52822 * Check for overflow; dev_loss_tmo is u32
52823diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
52824index 67d43e3..8cee73c 100644
52825--- a/drivers/scsi/scsi_transport_iscsi.c
52826+++ b/drivers/scsi/scsi_transport_iscsi.c
52827@@ -79,7 +79,7 @@ struct iscsi_internal {
52828 struct transport_container session_cont;
52829 };
52830
52831-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
52832+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
52833 static struct workqueue_struct *iscsi_eh_timer_workq;
52834
52835 static DEFINE_IDA(iscsi_sess_ida);
52836@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
52837 int err;
52838
52839 ihost = shost->shost_data;
52840- session->sid = atomic_add_return(1, &iscsi_session_nr);
52841+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
52842
52843 if (target_id == ISCSI_MAX_TARGET) {
52844 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
52845@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
52846 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
52847 ISCSI_TRANSPORT_VERSION);
52848
52849- atomic_set(&iscsi_session_nr, 0);
52850+ atomic_set_unchecked(&iscsi_session_nr, 0);
52851
52852 err = class_register(&iscsi_transport_class);
52853 if (err)
52854diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
52855index ae45bd9..c32a586 100644
52856--- a/drivers/scsi/scsi_transport_srp.c
52857+++ b/drivers/scsi/scsi_transport_srp.c
52858@@ -35,7 +35,7 @@
52859 #include "scsi_priv.h"
52860
52861 struct srp_host_attrs {
52862- atomic_t next_port_id;
52863+ atomic_unchecked_t next_port_id;
52864 };
52865 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
52866
52867@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
52868 struct Scsi_Host *shost = dev_to_shost(dev);
52869 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
52870
52871- atomic_set(&srp_host->next_port_id, 0);
52872+ atomic_set_unchecked(&srp_host->next_port_id, 0);
52873 return 0;
52874 }
52875
52876@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
52877 rport_fast_io_fail_timedout);
52878 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
52879
52880- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
52881+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
52882 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
52883
52884 transport_setup_device(&rport->dev);
52885diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
52886index 3290a3e..d65ac1c 100644
52887--- a/drivers/scsi/sd.c
52888+++ b/drivers/scsi/sd.c
52889@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
52890 sdkp->disk = gd;
52891 sdkp->index = index;
52892 atomic_set(&sdkp->openers, 0);
52893- atomic_set(&sdkp->device->ioerr_cnt, 0);
52894+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
52895
52896 if (!sdp->request_queue->rq_timeout) {
52897 if (sdp->type != TYPE_MOD)
52898diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
52899index 2270bd5..98408a5 100644
52900--- a/drivers/scsi/sg.c
52901+++ b/drivers/scsi/sg.c
52902@@ -1083,7 +1083,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
52903 sdp->disk->disk_name,
52904 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
52905 NULL,
52906- (char *)arg);
52907+ (char __user *)arg);
52908 case BLKTRACESTART:
52909 return blk_trace_startstop(sdp->device->request_queue, 1);
52910 case BLKTRACESTOP:
52911diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
52912index c0d660f..24a5854 100644
52913--- a/drivers/soc/tegra/fuse/fuse-tegra.c
52914+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
52915@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
52916 return i;
52917 }
52918
52919-static struct bin_attribute fuse_bin_attr = {
52920+static bin_attribute_no_const fuse_bin_attr = {
52921 .attr = { .name = "fuse", .mode = S_IRUGO, },
52922 .read = fuse_read,
52923 };
52924diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
52925index 57a1950..ae54e21 100644
52926--- a/drivers/spi/spi.c
52927+++ b/drivers/spi/spi.c
52928@@ -2307,7 +2307,7 @@ int spi_bus_unlock(struct spi_master *master)
52929 EXPORT_SYMBOL_GPL(spi_bus_unlock);
52930
52931 /* portable code must never pass more than 32 bytes */
52932-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
52933+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
52934
52935 static u8 *buf;
52936
52937diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
52938index b41429f..2de5373 100644
52939--- a/drivers/staging/android/timed_output.c
52940+++ b/drivers/staging/android/timed_output.c
52941@@ -25,7 +25,7 @@
52942 #include "timed_output.h"
52943
52944 static struct class *timed_output_class;
52945-static atomic_t device_count;
52946+static atomic_unchecked_t device_count;
52947
52948 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
52949 char *buf)
52950@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
52951 timed_output_class = class_create(THIS_MODULE, "timed_output");
52952 if (IS_ERR(timed_output_class))
52953 return PTR_ERR(timed_output_class);
52954- atomic_set(&device_count, 0);
52955+ atomic_set_unchecked(&device_count, 0);
52956 timed_output_class->dev_groups = timed_output_groups;
52957 }
52958
52959@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
52960 if (ret < 0)
52961 return ret;
52962
52963- tdev->index = atomic_inc_return(&device_count);
52964+ tdev->index = atomic_inc_return_unchecked(&device_count);
52965 tdev->dev = device_create(timed_output_class, NULL,
52966 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
52967 if (IS_ERR(tdev->dev))
52968diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
52969index 727640e..55bf61c 100644
52970--- a/drivers/staging/comedi/comedi_fops.c
52971+++ b/drivers/staging/comedi/comedi_fops.c
52972@@ -297,8 +297,8 @@ static void comedi_file_reset(struct file *file)
52973 }
52974 cfp->last_attached = dev->attached;
52975 cfp->last_detach_count = dev->detach_count;
52976- ACCESS_ONCE(cfp->read_subdev) = read_s;
52977- ACCESS_ONCE(cfp->write_subdev) = write_s;
52978+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
52979+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
52980 }
52981
52982 static void comedi_file_check(struct file *file)
52983@@ -1924,7 +1924,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
52984 !(s_old->async->cmd.flags & CMDF_WRITE))
52985 return -EBUSY;
52986
52987- ACCESS_ONCE(cfp->read_subdev) = s_new;
52988+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
52989 return 0;
52990 }
52991
52992@@ -1966,7 +1966,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
52993 (s_old->async->cmd.flags & CMDF_WRITE))
52994 return -EBUSY;
52995
52996- ACCESS_ONCE(cfp->write_subdev) = s_new;
52997+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
52998 return 0;
52999 }
53000
53001diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
53002index 37dcf7e..f3c2016 100644
53003--- a/drivers/staging/fbtft/fbtft-core.c
53004+++ b/drivers/staging/fbtft/fbtft-core.c
53005@@ -689,7 +689,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
53006 {
53007 struct fb_info *info;
53008 struct fbtft_par *par;
53009- struct fb_ops *fbops = NULL;
53010+ fb_ops_no_const *fbops = NULL;
53011 struct fb_deferred_io *fbdefio = NULL;
53012 struct fbtft_platform_data *pdata = dev->platform_data;
53013 u8 *vmem = NULL;
53014diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
53015index 0dbf3f9..fed0063 100644
53016--- a/drivers/staging/fbtft/fbtft.h
53017+++ b/drivers/staging/fbtft/fbtft.h
53018@@ -106,7 +106,7 @@ struct fbtft_ops {
53019
53020 int (*set_var)(struct fbtft_par *par);
53021 int (*set_gamma)(struct fbtft_par *par, unsigned long *curves);
53022-};
53023+} __no_const;
53024
53025 /**
53026 * struct fbtft_display - Describes the display properties
53027diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
53028index 001348c..cfaac8a 100644
53029--- a/drivers/staging/gdm724x/gdm_tty.c
53030+++ b/drivers/staging/gdm724x/gdm_tty.c
53031@@ -44,7 +44,7 @@
53032 #define gdm_tty_send_control(n, r, v, d, l) (\
53033 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
53034
53035-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
53036+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
53037
53038 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
53039 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
53040diff --git a/drivers/staging/i2o/i2o.h b/drivers/staging/i2o/i2o.h
53041index d23c3c2..eb63c81 100644
53042--- a/drivers/staging/i2o/i2o.h
53043+++ b/drivers/staging/i2o/i2o.h
53044@@ -565,7 +565,7 @@ struct i2o_controller {
53045 struct i2o_device *exec; /* Executive */
53046 #if BITS_PER_LONG == 64
53047 spinlock_t context_list_lock; /* lock for context_list */
53048- atomic_t context_list_counter; /* needed for unique contexts */
53049+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53050 struct list_head context_list; /* list of context id's
53051 and pointers */
53052 #endif
53053diff --git a/drivers/staging/i2o/i2o_proc.c b/drivers/staging/i2o/i2o_proc.c
53054index ad84f33..c5bdf65 100644
53055--- a/drivers/staging/i2o/i2o_proc.c
53056+++ b/drivers/staging/i2o/i2o_proc.c
53057@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
53058 "Array Controller Device"
53059 };
53060
53061-static char *chtostr(char *tmp, u8 *chars, int n)
53062-{
53063- tmp[0] = 0;
53064- return strncat(tmp, (char *)chars, n);
53065-}
53066-
53067 static int i2o_report_query_status(struct seq_file *seq, int block_status,
53068 char *group)
53069 {
53070@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
53071 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
53072 {
53073 struct i2o_controller *c = (struct i2o_controller *)seq->private;
53074- static u32 work32[5];
53075- static u8 *work8 = (u8 *) work32;
53076- static u16 *work16 = (u16 *) work32;
53077+ u32 work32[5];
53078+ u8 *work8 = (u8 *) work32;
53079+ u16 *work16 = (u16 *) work32;
53080 int token;
53081 u32 hwcap;
53082
53083@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
53084 } *result;
53085
53086 i2o_exec_execute_ddm_table ddm_table;
53087- char tmp[28 + 1];
53088
53089 result = kmalloc(sizeof(*result), GFP_KERNEL);
53090 if (!result)
53091@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
53092
53093 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
53094 seq_printf(seq, "%-#8x", ddm_table.module_id);
53095- seq_printf(seq, "%-29s",
53096- chtostr(tmp, ddm_table.module_name_version, 28));
53097+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
53098 seq_printf(seq, "%9d ", ddm_table.data_size);
53099 seq_printf(seq, "%8d", ddm_table.code_size);
53100
53101@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
53102
53103 i2o_driver_result_table *result;
53104 i2o_driver_store_table *dst;
53105- char tmp[28 + 1];
53106
53107 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
53108 if (result == NULL)
53109@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
53110
53111 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
53112 seq_printf(seq, "%-#8x", dst->module_id);
53113- seq_printf(seq, "%-29s",
53114- chtostr(tmp, dst->module_name_version, 28));
53115- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
53116+ seq_printf(seq, "%-.28s", dst->module_name_version);
53117+ seq_printf(seq, "%-.8s", dst->date);
53118 seq_printf(seq, "%8d ", dst->module_size);
53119 seq_printf(seq, "%8d ", dst->mpb_size);
53120 seq_printf(seq, "0x%04x", dst->module_flags);
53121@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
53122 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
53123 {
53124 struct i2o_device *d = (struct i2o_device *)seq->private;
53125- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
53126+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
53127 // == (allow) 512d bytes (max)
53128- static u16 *work16 = (u16 *) work32;
53129+ u16 *work16 = (u16 *) work32;
53130 int token;
53131- char tmp[16 + 1];
53132
53133 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
53134
53135@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
53136 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
53137 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
53138 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
53139- seq_printf(seq, "Vendor info : %s\n",
53140- chtostr(tmp, (u8 *) (work32 + 2), 16));
53141- seq_printf(seq, "Product info : %s\n",
53142- chtostr(tmp, (u8 *) (work32 + 6), 16));
53143- seq_printf(seq, "Description : %s\n",
53144- chtostr(tmp, (u8 *) (work32 + 10), 16));
53145- seq_printf(seq, "Product rev. : %s\n",
53146- chtostr(tmp, (u8 *) (work32 + 14), 8));
53147+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
53148+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
53149+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
53150+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
53151
53152 seq_printf(seq, "Serial number : ");
53153 print_serial_number(seq, (u8 *) (work32 + 16),
53154@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
53155 u8 pad[256]; // allow up to 256 byte (max) serial number
53156 } result;
53157
53158- char tmp[24 + 1];
53159-
53160 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
53161
53162 if (token < 0) {
53163@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
53164 }
53165
53166 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
53167- seq_printf(seq, "Module name : %s\n",
53168- chtostr(tmp, result.module_name, 24));
53169- seq_printf(seq, "Module revision : %s\n",
53170- chtostr(tmp, result.module_rev, 8));
53171+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
53172+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
53173
53174 seq_printf(seq, "Serial number : ");
53175 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
53176@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53177 u8 instance_number[4];
53178 } result;
53179
53180- char tmp[64 + 1];
53181-
53182 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
53183
53184 if (token < 0) {
53185@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53186 return 0;
53187 }
53188
53189- seq_printf(seq, "Device name : %s\n",
53190- chtostr(tmp, result.device_name, 64));
53191- seq_printf(seq, "Service name : %s\n",
53192- chtostr(tmp, result.service_name, 64));
53193- seq_printf(seq, "Physical name : %s\n",
53194- chtostr(tmp, result.physical_location, 64));
53195- seq_printf(seq, "Instance number : %s\n",
53196- chtostr(tmp, result.instance_number, 4));
53197+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
53198+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
53199+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
53200+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
53201
53202 return 0;
53203 }
53204@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53205 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
53206 {
53207 struct i2o_device *d = (struct i2o_device *)seq->private;
53208- static u32 work32[12];
53209- static u16 *work16 = (u16 *) work32;
53210- static u8 *work8 = (u8 *) work32;
53211+ u32 work32[12];
53212+ u16 *work16 = (u16 *) work32;
53213+ u8 *work8 = (u8 *) work32;
53214 int token;
53215
53216 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
53217diff --git a/drivers/staging/i2o/iop.c b/drivers/staging/i2o/iop.c
53218index 52334fc..d7f40b3 100644
53219--- a/drivers/staging/i2o/iop.c
53220+++ b/drivers/staging/i2o/iop.c
53221@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
53222
53223 spin_lock_irqsave(&c->context_list_lock, flags);
53224
53225- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
53226- atomic_inc(&c->context_list_counter);
53227+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
53228+ atomic_inc_unchecked(&c->context_list_counter);
53229
53230- entry->context = atomic_read(&c->context_list_counter);
53231+ entry->context = atomic_read_unchecked(&c->context_list_counter);
53232
53233 list_add(&entry->list, &c->context_list);
53234
53235@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
53236
53237 #if BITS_PER_LONG == 64
53238 spin_lock_init(&c->context_list_lock);
53239- atomic_set(&c->context_list_counter, 0);
53240+ atomic_set_unchecked(&c->context_list_counter, 0);
53241 INIT_LIST_HEAD(&c->context_list);
53242 #endif
53243
53244diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
53245index 463da07..e791ce9 100644
53246--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
53247+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
53248@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
53249 return 0;
53250 }
53251
53252-sfw_test_client_ops_t brw_test_client;
53253-void brw_init_test_client(void)
53254-{
53255- brw_test_client.tso_init = brw_client_init;
53256- brw_test_client.tso_fini = brw_client_fini;
53257- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
53258- brw_test_client.tso_done_rpc = brw_client_done_rpc;
53259+sfw_test_client_ops_t brw_test_client = {
53260+ .tso_init = brw_client_init,
53261+ .tso_fini = brw_client_fini,
53262+ .tso_prep_rpc = brw_client_prep_rpc,
53263+ .tso_done_rpc = brw_client_done_rpc,
53264 };
53265
53266 srpc_service_t brw_test_service;
53267diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
53268index 5709148..ccd9e0d 100644
53269--- a/drivers/staging/lustre/lnet/selftest/framework.c
53270+++ b/drivers/staging/lustre/lnet/selftest/framework.c
53271@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
53272
53273 extern sfw_test_client_ops_t ping_test_client;
53274 extern srpc_service_t ping_test_service;
53275-extern void ping_init_test_client(void);
53276 extern void ping_init_test_service(void);
53277
53278 extern sfw_test_client_ops_t brw_test_client;
53279 extern srpc_service_t brw_test_service;
53280-extern void brw_init_test_client(void);
53281 extern void brw_init_test_service(void);
53282
53283
53284@@ -1675,12 +1673,10 @@ sfw_startup (void)
53285 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
53286 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
53287
53288- brw_init_test_client();
53289 brw_init_test_service();
53290 rc = sfw_register_test(&brw_test_service, &brw_test_client);
53291 LASSERT (rc == 0);
53292
53293- ping_init_test_client();
53294 ping_init_test_service();
53295 rc = sfw_register_test(&ping_test_service, &ping_test_client);
53296 LASSERT (rc == 0);
53297diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
53298index d8c0df6..5041cbb 100644
53299--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
53300+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
53301@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
53302 return 0;
53303 }
53304
53305-sfw_test_client_ops_t ping_test_client;
53306-void ping_init_test_client(void)
53307-{
53308- ping_test_client.tso_init = ping_client_init;
53309- ping_test_client.tso_fini = ping_client_fini;
53310- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
53311- ping_test_client.tso_done_rpc = ping_client_done_rpc;
53312-}
53313+sfw_test_client_ops_t ping_test_client = {
53314+ .tso_init = ping_client_init,
53315+ .tso_fini = ping_client_fini,
53316+ .tso_prep_rpc = ping_client_prep_rpc,
53317+ .tso_done_rpc = ping_client_done_rpc,
53318+};
53319
53320 srpc_service_t ping_test_service;
53321 void ping_init_test_service(void)
53322diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53323index 83bc0a9..12ba00a 100644
53324--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
53325+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53326@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
53327 ldlm_completion_callback lcs_completion;
53328 ldlm_blocking_callback lcs_blocking;
53329 ldlm_glimpse_callback lcs_glimpse;
53330-};
53331+} __no_const;
53332
53333 /* ldlm_lockd.c */
53334 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
53335diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
53336index 2a88b80..62e7e5f 100644
53337--- a/drivers/staging/lustre/lustre/include/obd.h
53338+++ b/drivers/staging/lustre/lustre/include/obd.h
53339@@ -1362,7 +1362,7 @@ struct md_ops {
53340 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
53341 * wrapper function in include/linux/obd_class.h.
53342 */
53343-};
53344+} __no_const;
53345
53346 struct lsm_operations {
53347 void (*lsm_free)(struct lov_stripe_md *);
53348diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53349index a4c252f..b21acac 100644
53350--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53351+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53352@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
53353 int added = (mode == LCK_NL);
53354 int overlaps = 0;
53355 int splitted = 0;
53356- const struct ldlm_callback_suite null_cbs = { NULL };
53357+ const struct ldlm_callback_suite null_cbs = { };
53358
53359 CDEBUG(D_DLMTRACE,
53360 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
53361diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53362index c539e37..743b213 100644
53363--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53364+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53365@@ -237,7 +237,7 @@ static int proc_console_max_delay_cs(struct ctl_table *table, int write,
53366 loff_t *ppos)
53367 {
53368 int rc, max_delay_cs;
53369- struct ctl_table dummy = *table;
53370+ ctl_table_no_const dummy = *table;
53371 long d;
53372
53373 dummy.data = &max_delay_cs;
53374@@ -270,7 +270,7 @@ static int proc_console_min_delay_cs(struct ctl_table *table, int write,
53375 loff_t *ppos)
53376 {
53377 int rc, min_delay_cs;
53378- struct ctl_table dummy = *table;
53379+ ctl_table_no_const dummy = *table;
53380 long d;
53381
53382 dummy.data = &min_delay_cs;
53383@@ -302,7 +302,7 @@ static int proc_console_backoff(struct ctl_table *table, int write,
53384 void __user *buffer, size_t *lenp, loff_t *ppos)
53385 {
53386 int rc, backoff;
53387- struct ctl_table dummy = *table;
53388+ ctl_table_no_const dummy = *table;
53389
53390 dummy.data = &backoff;
53391 dummy.proc_handler = &proc_dointvec;
53392diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
53393index 7dc77dd..289d03e 100644
53394--- a/drivers/staging/lustre/lustre/libcfs/module.c
53395+++ b/drivers/staging/lustre/lustre/libcfs/module.c
53396@@ -313,11 +313,11 @@ out:
53397
53398
53399 struct cfs_psdev_ops libcfs_psdev_ops = {
53400- libcfs_psdev_open,
53401- libcfs_psdev_release,
53402- NULL,
53403- NULL,
53404- libcfs_ioctl
53405+ .p_open = libcfs_psdev_open,
53406+ .p_close = libcfs_psdev_release,
53407+ .p_read = NULL,
53408+ .p_write = NULL,
53409+ .p_ioctl = libcfs_ioctl
53410 };
53411
53412 extern int insert_proc(void);
53413diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
53414index 22667db..8b703b6 100644
53415--- a/drivers/staging/octeon/ethernet-rx.c
53416+++ b/drivers/staging/octeon/ethernet-rx.c
53417@@ -354,14 +354,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53418 /* Increment RX stats for virtual ports */
53419 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
53420 #ifdef CONFIG_64BIT
53421- atomic64_add(1,
53422+ atomic64_add_unchecked(1,
53423 (atomic64_t *)&priv->stats.rx_packets);
53424- atomic64_add(skb->len,
53425+ atomic64_add_unchecked(skb->len,
53426 (atomic64_t *)&priv->stats.rx_bytes);
53427 #else
53428- atomic_add(1,
53429+ atomic_add_unchecked(1,
53430 (atomic_t *)&priv->stats.rx_packets);
53431- atomic_add(skb->len,
53432+ atomic_add_unchecked(skb->len,
53433 (atomic_t *)&priv->stats.rx_bytes);
53434 #endif
53435 }
53436@@ -373,10 +373,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53437 dev->name);
53438 */
53439 #ifdef CONFIG_64BIT
53440- atomic64_add(1,
53441+ atomic64_add_unchecked(1,
53442 (atomic64_t *)&priv->stats.rx_dropped);
53443 #else
53444- atomic_add(1,
53445+ atomic_add_unchecked(1,
53446 (atomic_t *)&priv->stats.rx_dropped);
53447 #endif
53448 dev_kfree_skb_irq(skb);
53449diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
53450index 460e854..f926452 100644
53451--- a/drivers/staging/octeon/ethernet.c
53452+++ b/drivers/staging/octeon/ethernet.c
53453@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
53454 * since the RX tasklet also increments it.
53455 */
53456 #ifdef CONFIG_64BIT
53457- atomic64_add(rx_status.dropped_packets,
53458- (atomic64_t *)&priv->stats.rx_dropped);
53459+ atomic64_add_unchecked(rx_status.dropped_packets,
53460+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53461 #else
53462- atomic_add(rx_status.dropped_packets,
53463- (atomic_t *)&priv->stats.rx_dropped);
53464+ atomic_add_unchecked(rx_status.dropped_packets,
53465+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
53466 #endif
53467 }
53468
53469diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
53470index 3b476d8..f522d68 100644
53471--- a/drivers/staging/rtl8188eu/include/hal_intf.h
53472+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
53473@@ -225,7 +225,7 @@ struct hal_ops {
53474
53475 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
53476 void (*hal_reset_security_engine)(struct adapter *adapter);
53477-};
53478+} __no_const;
53479
53480 enum rt_eeprom_type {
53481 EEPROM_93C46,
53482diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
53483index 070cc03..6806e37 100644
53484--- a/drivers/staging/rtl8712/rtl871x_io.h
53485+++ b/drivers/staging/rtl8712/rtl871x_io.h
53486@@ -108,7 +108,7 @@ struct _io_ops {
53487 u8 *pmem);
53488 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
53489 u8 *pmem);
53490-};
53491+} __no_const;
53492
53493 struct io_req {
53494 struct list_head list;
53495diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
53496index 98f3ba4..c6a7fce 100644
53497--- a/drivers/staging/unisys/visorchipset/visorchipset.h
53498+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
53499@@ -171,7 +171,7 @@ struct visorchipset_busdev_notifiers {
53500 void (*device_resume)(ulong bus_no, ulong dev_no);
53501 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
53502 ulong *max_size);
53503-};
53504+} __no_const;
53505
53506 /* These functions live inside visorchipset, and will be called to indicate
53507 * responses to specific events (by code outside of visorchipset).
53508@@ -186,7 +186,7 @@ struct visorchipset_busdev_responders {
53509 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
53510 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
53511 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
53512-};
53513+} __no_const;
53514
53515 /** Register functions (in the bus driver) to get called by visorchipset
53516 * whenever a bus or device appears for which this service partition is
53517diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
53518index 9512af6..045bf5a 100644
53519--- a/drivers/target/sbp/sbp_target.c
53520+++ b/drivers/target/sbp/sbp_target.c
53521@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
53522
53523 #define SESSION_MAINTENANCE_INTERVAL HZ
53524
53525-static atomic_t login_id = ATOMIC_INIT(0);
53526+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
53527
53528 static void session_maintenance_work(struct work_struct *);
53529 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
53530@@ -444,7 +444,7 @@ static void sbp_management_request_login(
53531 login->lun = se_lun;
53532 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
53533 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
53534- login->login_id = atomic_inc_return(&login_id);
53535+ login->login_id = atomic_inc_return_unchecked(&login_id);
53536
53537 login->tgt_agt = sbp_target_agent_register(login);
53538 if (IS_ERR(login->tgt_agt)) {
53539diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
53540index 7faa6ae..ae6c410 100644
53541--- a/drivers/target/target_core_device.c
53542+++ b/drivers/target/target_core_device.c
53543@@ -1495,7 +1495,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
53544 spin_lock_init(&dev->se_tmr_lock);
53545 spin_lock_init(&dev->qf_cmd_lock);
53546 sema_init(&dev->caw_sem, 1);
53547- atomic_set(&dev->dev_ordered_id, 0);
53548+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
53549 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
53550 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
53551 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
53552diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
53553index f786de0..04b643e 100644
53554--- a/drivers/target/target_core_transport.c
53555+++ b/drivers/target/target_core_transport.c
53556@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
53557 * Used to determine when ORDERED commands should go from
53558 * Dormant to Active status.
53559 */
53560- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
53561+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
53562 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
53563 cmd->se_ordered_id, cmd->sam_task_attr,
53564 dev->transport->name);
53565diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
53566index 031018e..90981a1 100644
53567--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
53568+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
53569@@ -272,8 +272,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
53570 platform_set_drvdata(pdev, priv);
53571
53572 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
53573- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
53574- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
53575+ pax_open_kernel();
53576+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
53577+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
53578+ pax_close_kernel();
53579 }
53580 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
53581 priv, &int3400_thermal_ops,
53582diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
53583index 668fb1b..2737bbe 100644
53584--- a/drivers/thermal/of-thermal.c
53585+++ b/drivers/thermal/of-thermal.c
53586@@ -31,6 +31,7 @@
53587 #include <linux/export.h>
53588 #include <linux/string.h>
53589 #include <linux/thermal.h>
53590+#include <linux/mm.h>
53591
53592 #include "thermal_core.h"
53593
53594@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
53595 tz->ops = ops;
53596 tz->sensor_data = data;
53597
53598- tzd->ops->get_temp = of_thermal_get_temp;
53599- tzd->ops->get_trend = of_thermal_get_trend;
53600- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
53601+ pax_open_kernel();
53602+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
53603+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
53604+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
53605+ pax_close_kernel();
53606 mutex_unlock(&tzd->lock);
53607
53608 return tzd;
53609@@ -544,9 +547,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
53610 return;
53611
53612 mutex_lock(&tzd->lock);
53613- tzd->ops->get_temp = NULL;
53614- tzd->ops->get_trend = NULL;
53615- tzd->ops->set_emul_temp = NULL;
53616+ pax_open_kernel();
53617+ *(void **)&tzd->ops->get_temp = NULL;
53618+ *(void **)&tzd->ops->get_trend = NULL;
53619+ *(void **)&tzd->ops->set_emul_temp = NULL;
53620+ pax_close_kernel();
53621
53622 tz->ops = NULL;
53623 tz->sensor_data = NULL;
53624diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
53625index 9ea3d9d..53e8792 100644
53626--- a/drivers/thermal/x86_pkg_temp_thermal.c
53627+++ b/drivers/thermal/x86_pkg_temp_thermal.c
53628@@ -567,7 +567,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
53629 return NOTIFY_OK;
53630 }
53631
53632-static struct notifier_block pkg_temp_thermal_notifier __refdata = {
53633+static struct notifier_block pkg_temp_thermal_notifier __refconst = {
53634 .notifier_call = pkg_temp_thermal_cpu_callback,
53635 };
53636
53637diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
53638index fd66f57..48e6376 100644
53639--- a/drivers/tty/cyclades.c
53640+++ b/drivers/tty/cyclades.c
53641@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
53642 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
53643 info->port.count);
53644 #endif
53645- info->port.count++;
53646+ atomic_inc(&info->port.count);
53647 #ifdef CY_DEBUG_COUNT
53648 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
53649- current->pid, info->port.count);
53650+ current->pid, atomic_read(&info->port.count));
53651 #endif
53652
53653 /*
53654@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
53655 for (j = 0; j < cy_card[i].nports; j++) {
53656 info = &cy_card[i].ports[j];
53657
53658- if (info->port.count) {
53659+ if (atomic_read(&info->port.count)) {
53660 /* XXX is the ldisc num worth this? */
53661 struct tty_struct *tty;
53662 struct tty_ldisc *ld;
53663diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
53664index 4fcec1d..5a036f7 100644
53665--- a/drivers/tty/hvc/hvc_console.c
53666+++ b/drivers/tty/hvc/hvc_console.c
53667@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
53668
53669 spin_lock_irqsave(&hp->port.lock, flags);
53670 /* Check and then increment for fast path open. */
53671- if (hp->port.count++ > 0) {
53672+ if (atomic_inc_return(&hp->port.count) > 1) {
53673 spin_unlock_irqrestore(&hp->port.lock, flags);
53674 hvc_kick();
53675 return 0;
53676@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
53677
53678 spin_lock_irqsave(&hp->port.lock, flags);
53679
53680- if (--hp->port.count == 0) {
53681+ if (atomic_dec_return(&hp->port.count) == 0) {
53682 spin_unlock_irqrestore(&hp->port.lock, flags);
53683 /* We are done with the tty pointer now. */
53684 tty_port_tty_set(&hp->port, NULL);
53685@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
53686 */
53687 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
53688 } else {
53689- if (hp->port.count < 0)
53690+ if (atomic_read(&hp->port.count) < 0)
53691 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
53692- hp->vtermno, hp->port.count);
53693+ hp->vtermno, atomic_read(&hp->port.count));
53694 spin_unlock_irqrestore(&hp->port.lock, flags);
53695 }
53696 }
53697@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
53698 * open->hangup case this can be called after the final close so prevent
53699 * that from happening for now.
53700 */
53701- if (hp->port.count <= 0) {
53702+ if (atomic_read(&hp->port.count) <= 0) {
53703 spin_unlock_irqrestore(&hp->port.lock, flags);
53704 return;
53705 }
53706
53707- hp->port.count = 0;
53708+ atomic_set(&hp->port.count, 0);
53709 spin_unlock_irqrestore(&hp->port.lock, flags);
53710 tty_port_tty_set(&hp->port, NULL);
53711
53712@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
53713 return -EPIPE;
53714
53715 /* FIXME what's this (unprotected) check for? */
53716- if (hp->port.count <= 0)
53717+ if (atomic_read(&hp->port.count) <= 0)
53718 return -EIO;
53719
53720 spin_lock_irqsave(&hp->lock, flags);
53721diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
53722index 81ff7e1..dfb7b71 100644
53723--- a/drivers/tty/hvc/hvcs.c
53724+++ b/drivers/tty/hvc/hvcs.c
53725@@ -83,6 +83,7 @@
53726 #include <asm/hvcserver.h>
53727 #include <asm/uaccess.h>
53728 #include <asm/vio.h>
53729+#include <asm/local.h>
53730
53731 /*
53732 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
53733@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
53734
53735 spin_lock_irqsave(&hvcsd->lock, flags);
53736
53737- if (hvcsd->port.count > 0) {
53738+ if (atomic_read(&hvcsd->port.count) > 0) {
53739 spin_unlock_irqrestore(&hvcsd->lock, flags);
53740 printk(KERN_INFO "HVCS: vterm state unchanged. "
53741 "The hvcs device node is still in use.\n");
53742@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
53743 }
53744 }
53745
53746- hvcsd->port.count = 0;
53747+ atomic_set(&hvcsd->port.count, 0);
53748 hvcsd->port.tty = tty;
53749 tty->driver_data = hvcsd;
53750
53751@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
53752 unsigned long flags;
53753
53754 spin_lock_irqsave(&hvcsd->lock, flags);
53755- hvcsd->port.count++;
53756+ atomic_inc(&hvcsd->port.count);
53757 hvcsd->todo_mask |= HVCS_SCHED_READ;
53758 spin_unlock_irqrestore(&hvcsd->lock, flags);
53759
53760@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
53761 hvcsd = tty->driver_data;
53762
53763 spin_lock_irqsave(&hvcsd->lock, flags);
53764- if (--hvcsd->port.count == 0) {
53765+ if (atomic_dec_and_test(&hvcsd->port.count)) {
53766
53767 vio_disable_interrupts(hvcsd->vdev);
53768
53769@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
53770
53771 free_irq(irq, hvcsd);
53772 return;
53773- } else if (hvcsd->port.count < 0) {
53774+ } else if (atomic_read(&hvcsd->port.count) < 0) {
53775 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
53776 " is missmanaged.\n",
53777- hvcsd->vdev->unit_address, hvcsd->port.count);
53778+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
53779 }
53780
53781 spin_unlock_irqrestore(&hvcsd->lock, flags);
53782@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
53783
53784 spin_lock_irqsave(&hvcsd->lock, flags);
53785 /* Preserve this so that we know how many kref refs to put */
53786- temp_open_count = hvcsd->port.count;
53787+ temp_open_count = atomic_read(&hvcsd->port.count);
53788
53789 /*
53790 * Don't kref put inside the spinlock because the destruction
53791@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
53792 tty->driver_data = NULL;
53793 hvcsd->port.tty = NULL;
53794
53795- hvcsd->port.count = 0;
53796+ atomic_set(&hvcsd->port.count, 0);
53797
53798 /* This will drop any buffered data on the floor which is OK in a hangup
53799 * scenario. */
53800@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
53801 * the middle of a write operation? This is a crummy place to do this
53802 * but we want to keep it all in the spinlock.
53803 */
53804- if (hvcsd->port.count <= 0) {
53805+ if (atomic_read(&hvcsd->port.count) <= 0) {
53806 spin_unlock_irqrestore(&hvcsd->lock, flags);
53807 return -ENODEV;
53808 }
53809@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
53810 {
53811 struct hvcs_struct *hvcsd = tty->driver_data;
53812
53813- if (!hvcsd || hvcsd->port.count <= 0)
53814+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
53815 return 0;
53816
53817 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
53818diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
53819index 4190199..06d5bfa 100644
53820--- a/drivers/tty/hvc/hvsi.c
53821+++ b/drivers/tty/hvc/hvsi.c
53822@@ -85,7 +85,7 @@ struct hvsi_struct {
53823 int n_outbuf;
53824 uint32_t vtermno;
53825 uint32_t virq;
53826- atomic_t seqno; /* HVSI packet sequence number */
53827+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
53828 uint16_t mctrl;
53829 uint8_t state; /* HVSI protocol state */
53830 uint8_t flags;
53831@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
53832
53833 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
53834 packet.hdr.len = sizeof(struct hvsi_query_response);
53835- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53836+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53837 packet.verb = VSV_SEND_VERSION_NUMBER;
53838 packet.u.version = HVSI_VERSION;
53839 packet.query_seqno = query_seqno+1;
53840@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
53841
53842 packet.hdr.type = VS_QUERY_PACKET_HEADER;
53843 packet.hdr.len = sizeof(struct hvsi_query);
53844- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53845+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53846 packet.verb = verb;
53847
53848 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
53849@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
53850 int wrote;
53851
53852 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
53853- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53854+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53855 packet.hdr.len = sizeof(struct hvsi_control);
53856 packet.verb = VSV_SET_MODEM_CTL;
53857 packet.mask = HVSI_TSDTR;
53858@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
53859 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
53860
53861 packet.hdr.type = VS_DATA_PACKET_HEADER;
53862- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53863+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53864 packet.hdr.len = count + sizeof(struct hvsi_header);
53865 memcpy(&packet.data, buf, count);
53866
53867@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
53868 struct hvsi_control packet __ALIGNED__;
53869
53870 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
53871- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53872+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53873 packet.hdr.len = 6;
53874 packet.verb = VSV_CLOSE_PROTOCOL;
53875
53876@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
53877
53878 tty_port_tty_set(&hp->port, tty);
53879 spin_lock_irqsave(&hp->lock, flags);
53880- hp->port.count++;
53881+ atomic_inc(&hp->port.count);
53882 atomic_set(&hp->seqno, 0);
53883 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
53884 spin_unlock_irqrestore(&hp->lock, flags);
53885@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53886
53887 spin_lock_irqsave(&hp->lock, flags);
53888
53889- if (--hp->port.count == 0) {
53890+ if (atomic_dec_return(&hp->port.count) == 0) {
53891 tty_port_tty_set(&hp->port, NULL);
53892 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
53893
53894@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53895
53896 spin_lock_irqsave(&hp->lock, flags);
53897 }
53898- } else if (hp->port.count < 0)
53899+ } else if (atomic_read(&hp->port.count) < 0)
53900 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
53901- hp - hvsi_ports, hp->port.count);
53902+ hp - hvsi_ports, atomic_read(&hp->port.count));
53903
53904 spin_unlock_irqrestore(&hp->lock, flags);
53905 }
53906@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
53907 tty_port_tty_set(&hp->port, NULL);
53908
53909 spin_lock_irqsave(&hp->lock, flags);
53910- hp->port.count = 0;
53911+ atomic_set(&hp->port.count, 0);
53912 hp->n_outbuf = 0;
53913 spin_unlock_irqrestore(&hp->lock, flags);
53914 }
53915diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
53916index a270f04..7c77b5d 100644
53917--- a/drivers/tty/hvc/hvsi_lib.c
53918+++ b/drivers/tty/hvc/hvsi_lib.c
53919@@ -8,7 +8,7 @@
53920
53921 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
53922 {
53923- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
53924+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
53925
53926 /* Assumes that always succeeds, works in practice */
53927 return pv->put_chars(pv->termno, (char *)packet, packet->len);
53928@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
53929
53930 /* Reset state */
53931 pv->established = 0;
53932- atomic_set(&pv->seqno, 0);
53933+ atomic_set_unchecked(&pv->seqno, 0);
53934
53935 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
53936
53937diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
53938index 345cebb..d5a1e9e 100644
53939--- a/drivers/tty/ipwireless/tty.c
53940+++ b/drivers/tty/ipwireless/tty.c
53941@@ -28,6 +28,7 @@
53942 #include <linux/tty_driver.h>
53943 #include <linux/tty_flip.h>
53944 #include <linux/uaccess.h>
53945+#include <asm/local.h>
53946
53947 #include "tty.h"
53948 #include "network.h"
53949@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53950 return -ENODEV;
53951
53952 mutex_lock(&tty->ipw_tty_mutex);
53953- if (tty->port.count == 0)
53954+ if (atomic_read(&tty->port.count) == 0)
53955 tty->tx_bytes_queued = 0;
53956
53957- tty->port.count++;
53958+ atomic_inc(&tty->port.count);
53959
53960 tty->port.tty = linux_tty;
53961 linux_tty->driver_data = tty;
53962@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53963
53964 static void do_ipw_close(struct ipw_tty *tty)
53965 {
53966- tty->port.count--;
53967-
53968- if (tty->port.count == 0) {
53969+ if (atomic_dec_return(&tty->port.count) == 0) {
53970 struct tty_struct *linux_tty = tty->port.tty;
53971
53972 if (linux_tty != NULL) {
53973@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
53974 return;
53975
53976 mutex_lock(&tty->ipw_tty_mutex);
53977- if (tty->port.count == 0) {
53978+ if (atomic_read(&tty->port.count) == 0) {
53979 mutex_unlock(&tty->ipw_tty_mutex);
53980 return;
53981 }
53982@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
53983
53984 mutex_lock(&tty->ipw_tty_mutex);
53985
53986- if (!tty->port.count) {
53987+ if (!atomic_read(&tty->port.count)) {
53988 mutex_unlock(&tty->ipw_tty_mutex);
53989 return;
53990 }
53991@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
53992 return -ENODEV;
53993
53994 mutex_lock(&tty->ipw_tty_mutex);
53995- if (!tty->port.count) {
53996+ if (!atomic_read(&tty->port.count)) {
53997 mutex_unlock(&tty->ipw_tty_mutex);
53998 return -EINVAL;
53999 }
54000@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
54001 if (!tty)
54002 return -ENODEV;
54003
54004- if (!tty->port.count)
54005+ if (!atomic_read(&tty->port.count))
54006 return -EINVAL;
54007
54008 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
54009@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
54010 if (!tty)
54011 return 0;
54012
54013- if (!tty->port.count)
54014+ if (!atomic_read(&tty->port.count))
54015 return 0;
54016
54017 return tty->tx_bytes_queued;
54018@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
54019 if (!tty)
54020 return -ENODEV;
54021
54022- if (!tty->port.count)
54023+ if (!atomic_read(&tty->port.count))
54024 return -EINVAL;
54025
54026 return get_control_lines(tty);
54027@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
54028 if (!tty)
54029 return -ENODEV;
54030
54031- if (!tty->port.count)
54032+ if (!atomic_read(&tty->port.count))
54033 return -EINVAL;
54034
54035 return set_control_lines(tty, set, clear);
54036@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
54037 if (!tty)
54038 return -ENODEV;
54039
54040- if (!tty->port.count)
54041+ if (!atomic_read(&tty->port.count))
54042 return -EINVAL;
54043
54044 /* FIXME: Exactly how is the tty object locked here .. */
54045@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
54046 * are gone */
54047 mutex_lock(&ttyj->ipw_tty_mutex);
54048 }
54049- while (ttyj->port.count)
54050+ while (atomic_read(&ttyj->port.count))
54051 do_ipw_close(ttyj);
54052 ipwireless_disassociate_network_ttys(network,
54053 ttyj->channel_idx);
54054diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
54055index 14c54e0..1efd4f2 100644
54056--- a/drivers/tty/moxa.c
54057+++ b/drivers/tty/moxa.c
54058@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
54059 }
54060
54061 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
54062- ch->port.count++;
54063+ atomic_inc(&ch->port.count);
54064 tty->driver_data = ch;
54065 tty_port_tty_set(&ch->port, tty);
54066 mutex_lock(&ch->port.mutex);
54067diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
54068index c434376..114ce13 100644
54069--- a/drivers/tty/n_gsm.c
54070+++ b/drivers/tty/n_gsm.c
54071@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
54072 spin_lock_init(&dlci->lock);
54073 mutex_init(&dlci->mutex);
54074 dlci->fifo = &dlci->_fifo;
54075- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
54076+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
54077 kfree(dlci);
54078 return NULL;
54079 }
54080@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
54081 struct gsm_dlci *dlci = tty->driver_data;
54082 struct tty_port *port = &dlci->port;
54083
54084- port->count++;
54085+ atomic_inc(&port->count);
54086 tty_port_tty_set(port, tty);
54087
54088 dlci->modem_rx = 0;
54089diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
54090index cf6e0f2..4283167 100644
54091--- a/drivers/tty/n_tty.c
54092+++ b/drivers/tty/n_tty.c
54093@@ -116,7 +116,7 @@ struct n_tty_data {
54094 int minimum_to_wake;
54095
54096 /* consumer-published */
54097- size_t read_tail;
54098+ size_t read_tail __intentional_overflow(-1);
54099 size_t line_start;
54100
54101 /* protected by output lock */
54102@@ -2547,6 +2547,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
54103 {
54104 *ops = tty_ldisc_N_TTY;
54105 ops->owner = NULL;
54106- ops->refcount = ops->flags = 0;
54107+ atomic_set(&ops->refcount, 0);
54108+ ops->flags = 0;
54109 }
54110 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
54111diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
54112index e72ee62..d977ad9 100644
54113--- a/drivers/tty/pty.c
54114+++ b/drivers/tty/pty.c
54115@@ -848,8 +848,10 @@ static void __init unix98_pty_init(void)
54116 panic("Couldn't register Unix98 pts driver");
54117
54118 /* Now create the /dev/ptmx special device */
54119+ pax_open_kernel();
54120 tty_default_fops(&ptmx_fops);
54121- ptmx_fops.open = ptmx_open;
54122+ *(void **)&ptmx_fops.open = ptmx_open;
54123+ pax_close_kernel();
54124
54125 cdev_init(&ptmx_cdev, &ptmx_fops);
54126 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
54127diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
54128index c8dd8dc..dca6cfd 100644
54129--- a/drivers/tty/rocket.c
54130+++ b/drivers/tty/rocket.c
54131@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54132 tty->driver_data = info;
54133 tty_port_tty_set(port, tty);
54134
54135- if (port->count++ == 0) {
54136+ if (atomic_inc_return(&port->count) == 1) {
54137 atomic_inc(&rp_num_ports_open);
54138
54139 #ifdef ROCKET_DEBUG_OPEN
54140@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54141 #endif
54142 }
54143 #ifdef ROCKET_DEBUG_OPEN
54144- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
54145+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
54146 #endif
54147
54148 /*
54149@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
54150 spin_unlock_irqrestore(&info->port.lock, flags);
54151 return;
54152 }
54153- if (info->port.count)
54154+ if (atomic_read(&info->port.count))
54155 atomic_dec(&rp_num_ports_open);
54156 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
54157 spin_unlock_irqrestore(&info->port.lock, flags);
54158diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
54159index aa28209..e08fb85 100644
54160--- a/drivers/tty/serial/ioc4_serial.c
54161+++ b/drivers/tty/serial/ioc4_serial.c
54162@@ -437,7 +437,7 @@ struct ioc4_soft {
54163 } is_intr_info[MAX_IOC4_INTR_ENTS];
54164
54165 /* Number of entries active in the above array */
54166- atomic_t is_num_intrs;
54167+ atomic_unchecked_t is_num_intrs;
54168 } is_intr_type[IOC4_NUM_INTR_TYPES];
54169
54170 /* is_ir_lock must be held while
54171@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
54172 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
54173 || (type == IOC4_OTHER_INTR_TYPE)));
54174
54175- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
54176+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
54177 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
54178
54179 /* Save off the lower level interrupt handler */
54180@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
54181
54182 soft = arg;
54183 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
54184- num_intrs = (int)atomic_read(
54185+ num_intrs = (int)atomic_read_unchecked(
54186 &soft->is_intr_type[intr_type].is_num_intrs);
54187
54188 this_mir = this_ir = pending_intrs(soft, intr_type);
54189diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
54190index 129dc5b..1da5bb8 100644
54191--- a/drivers/tty/serial/kgdb_nmi.c
54192+++ b/drivers/tty/serial/kgdb_nmi.c
54193@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
54194 * I/O utilities that messages sent to the console will automatically
54195 * be displayed on the dbg_io.
54196 */
54197- dbg_io_ops->is_console = true;
54198+ pax_open_kernel();
54199+ *(int *)&dbg_io_ops->is_console = true;
54200+ pax_close_kernel();
54201
54202 return 0;
54203 }
54204diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
54205index a260cde..6b2b5ce 100644
54206--- a/drivers/tty/serial/kgdboc.c
54207+++ b/drivers/tty/serial/kgdboc.c
54208@@ -24,8 +24,9 @@
54209 #define MAX_CONFIG_LEN 40
54210
54211 static struct kgdb_io kgdboc_io_ops;
54212+static struct kgdb_io kgdboc_io_ops_console;
54213
54214-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
54215+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
54216 static int configured = -1;
54217
54218 static char config[MAX_CONFIG_LEN];
54219@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
54220 kgdboc_unregister_kbd();
54221 if (configured == 1)
54222 kgdb_unregister_io_module(&kgdboc_io_ops);
54223+ else if (configured == 2)
54224+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
54225 }
54226
54227 static int configure_kgdboc(void)
54228@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
54229 int err;
54230 char *cptr = config;
54231 struct console *cons;
54232+ int is_console = 0;
54233
54234 err = kgdboc_option_setup(config);
54235 if (err || !strlen(config) || isspace(config[0]))
54236 goto noconfig;
54237
54238 err = -ENODEV;
54239- kgdboc_io_ops.is_console = 0;
54240 kgdb_tty_driver = NULL;
54241
54242 kgdboc_use_kms = 0;
54243@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
54244 int idx;
54245 if (cons->device && cons->device(cons, &idx) == p &&
54246 idx == tty_line) {
54247- kgdboc_io_ops.is_console = 1;
54248+ is_console = 1;
54249 break;
54250 }
54251 cons = cons->next;
54252@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
54253 kgdb_tty_line = tty_line;
54254
54255 do_register:
54256- err = kgdb_register_io_module(&kgdboc_io_ops);
54257+ if (is_console) {
54258+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
54259+ configured = 2;
54260+ } else {
54261+ err = kgdb_register_io_module(&kgdboc_io_ops);
54262+ configured = 1;
54263+ }
54264 if (err)
54265 goto noconfig;
54266
54267@@ -205,8 +214,6 @@ do_register:
54268 if (err)
54269 goto nmi_con_failed;
54270
54271- configured = 1;
54272-
54273 return 0;
54274
54275 nmi_con_failed:
54276@@ -223,7 +230,7 @@ noconfig:
54277 static int __init init_kgdboc(void)
54278 {
54279 /* Already configured? */
54280- if (configured == 1)
54281+ if (configured >= 1)
54282 return 0;
54283
54284 return configure_kgdboc();
54285@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
54286 if (config[len - 1] == '\n')
54287 config[len - 1] = '\0';
54288
54289- if (configured == 1)
54290+ if (configured >= 1)
54291 cleanup_kgdboc();
54292
54293 /* Go and configure with the new params. */
54294@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
54295 .post_exception = kgdboc_post_exp_handler,
54296 };
54297
54298+static struct kgdb_io kgdboc_io_ops_console = {
54299+ .name = "kgdboc",
54300+ .read_char = kgdboc_get_char,
54301+ .write_char = kgdboc_put_char,
54302+ .pre_exception = kgdboc_pre_exp_handler,
54303+ .post_exception = kgdboc_post_exp_handler,
54304+ .is_console = 1
54305+};
54306+
54307 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
54308 /* This is only available if kgdboc is a built in for early debugging */
54309 static int __init kgdboc_early_init(char *opt)
54310diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
54311index b73889c..9f74f0a 100644
54312--- a/drivers/tty/serial/msm_serial.c
54313+++ b/drivers/tty/serial/msm_serial.c
54314@@ -1012,7 +1012,7 @@ static struct uart_driver msm_uart_driver = {
54315 .cons = MSM_CONSOLE,
54316 };
54317
54318-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
54319+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
54320
54321 static const struct of_device_id msm_uartdm_table[] = {
54322 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
54323@@ -1036,7 +1036,7 @@ static int msm_serial_probe(struct platform_device *pdev)
54324 line = pdev->id;
54325
54326 if (line < 0)
54327- line = atomic_inc_return(&msm_uart_next_id) - 1;
54328+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
54329
54330 if (unlikely(line < 0 || line >= UART_NR))
54331 return -ENXIO;
54332diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
54333index cf08876..711e0bf 100644
54334--- a/drivers/tty/serial/samsung.c
54335+++ b/drivers/tty/serial/samsung.c
54336@@ -987,11 +987,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
54337 ourport->tx_in_progress = 0;
54338 }
54339
54340+static int s3c64xx_serial_startup(struct uart_port *port);
54341 static int s3c24xx_serial_startup(struct uart_port *port)
54342 {
54343 struct s3c24xx_uart_port *ourport = to_ourport(port);
54344 int ret;
54345
54346+ /* Startup sequence is different for s3c64xx and higher SoC's */
54347+ if (s3c24xx_serial_has_interrupt_mask(port))
54348+ return s3c64xx_serial_startup(port);
54349+
54350 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
54351 port, (unsigned long long)port->mapbase, port->membase);
54352
54353@@ -1697,10 +1702,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
54354 /* setup info for port */
54355 port->dev = &platdev->dev;
54356
54357- /* Startup sequence is different for s3c64xx and higher SoC's */
54358- if (s3c24xx_serial_has_interrupt_mask(port))
54359- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
54360-
54361 port->uartclk = 1;
54362
54363 if (cfg->uart_flags & UPF_CONS_FLOW) {
54364diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
54365index 6a1055a..5ca9ad9 100644
54366--- a/drivers/tty/serial/serial_core.c
54367+++ b/drivers/tty/serial/serial_core.c
54368@@ -1377,7 +1377,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54369 state = drv->state + tty->index;
54370 port = &state->port;
54371 spin_lock_irq(&port->lock);
54372- --port->count;
54373+ atomic_dec(&port->count);
54374 spin_unlock_irq(&port->lock);
54375 return;
54376 }
54377@@ -1387,7 +1387,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54378
54379 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
54380
54381- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
54382+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
54383 return;
54384
54385 /*
54386@@ -1511,7 +1511,7 @@ static void uart_hangup(struct tty_struct *tty)
54387 uart_flush_buffer(tty);
54388 uart_shutdown(tty, state);
54389 spin_lock_irqsave(&port->lock, flags);
54390- port->count = 0;
54391+ atomic_set(&port->count, 0);
54392 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
54393 spin_unlock_irqrestore(&port->lock, flags);
54394 tty_port_tty_set(port, NULL);
54395@@ -1598,7 +1598,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54396 pr_debug("uart_open(%d) called\n", line);
54397
54398 spin_lock_irq(&port->lock);
54399- ++port->count;
54400+ atomic_inc(&port->count);
54401 spin_unlock_irq(&port->lock);
54402
54403 /*
54404diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
54405index b799170..87dafd5 100644
54406--- a/drivers/tty/synclink.c
54407+++ b/drivers/tty/synclink.c
54408@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54409
54410 if (debug_level >= DEBUG_LEVEL_INFO)
54411 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
54412- __FILE__,__LINE__, info->device_name, info->port.count);
54413+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54414
54415 if (tty_port_close_start(&info->port, tty, filp) == 0)
54416 goto cleanup;
54417@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54418 cleanup:
54419 if (debug_level >= DEBUG_LEVEL_INFO)
54420 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
54421- tty->driver->name, info->port.count);
54422+ tty->driver->name, atomic_read(&info->port.count));
54423
54424 } /* end of mgsl_close() */
54425
54426@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
54427
54428 mgsl_flush_buffer(tty);
54429 shutdown(info);
54430-
54431- info->port.count = 0;
54432+
54433+ atomic_set(&info->port.count, 0);
54434 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54435 info->port.tty = NULL;
54436
54437@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54438
54439 if (debug_level >= DEBUG_LEVEL_INFO)
54440 printk("%s(%d):block_til_ready before block on %s count=%d\n",
54441- __FILE__,__LINE__, tty->driver->name, port->count );
54442+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54443
54444 spin_lock_irqsave(&info->irq_spinlock, flags);
54445- port->count--;
54446+ atomic_dec(&port->count);
54447 spin_unlock_irqrestore(&info->irq_spinlock, flags);
54448 port->blocked_open++;
54449
54450@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54451
54452 if (debug_level >= DEBUG_LEVEL_INFO)
54453 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
54454- __FILE__,__LINE__, tty->driver->name, port->count );
54455+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54456
54457 tty_unlock(tty);
54458 schedule();
54459@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54460
54461 /* FIXME: Racy on hangup during close wait */
54462 if (!tty_hung_up_p(filp))
54463- port->count++;
54464+ atomic_inc(&port->count);
54465 port->blocked_open--;
54466
54467 if (debug_level >= DEBUG_LEVEL_INFO)
54468 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
54469- __FILE__,__LINE__, tty->driver->name, port->count );
54470+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54471
54472 if (!retval)
54473 port->flags |= ASYNC_NORMAL_ACTIVE;
54474@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54475
54476 if (debug_level >= DEBUG_LEVEL_INFO)
54477 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
54478- __FILE__,__LINE__,tty->driver->name, info->port.count);
54479+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54480
54481 /* If port is closing, signal caller to try again */
54482 if (info->port.flags & ASYNC_CLOSING){
54483@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54484 spin_unlock_irqrestore(&info->netlock, flags);
54485 goto cleanup;
54486 }
54487- info->port.count++;
54488+ atomic_inc(&info->port.count);
54489 spin_unlock_irqrestore(&info->netlock, flags);
54490
54491- if (info->port.count == 1) {
54492+ if (atomic_read(&info->port.count) == 1) {
54493 /* 1st open on this device, init hardware */
54494 retval = startup(info);
54495 if (retval < 0)
54496@@ -3442,8 +3442,8 @@ cleanup:
54497 if (retval) {
54498 if (tty->count == 1)
54499 info->port.tty = NULL; /* tty layer will release tty struct */
54500- if(info->port.count)
54501- info->port.count--;
54502+ if (atomic_read(&info->port.count))
54503+ atomic_dec(&info->port.count);
54504 }
54505
54506 return retval;
54507@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54508 unsigned short new_crctype;
54509
54510 /* return error if TTY interface open */
54511- if (info->port.count)
54512+ if (atomic_read(&info->port.count))
54513 return -EBUSY;
54514
54515 switch (encoding)
54516@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
54517
54518 /* arbitrate between network and tty opens */
54519 spin_lock_irqsave(&info->netlock, flags);
54520- if (info->port.count != 0 || info->netcount != 0) {
54521+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54522 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54523 spin_unlock_irqrestore(&info->netlock, flags);
54524 return -EBUSY;
54525@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54526 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54527
54528 /* return error if TTY interface open */
54529- if (info->port.count)
54530+ if (atomic_read(&info->port.count))
54531 return -EBUSY;
54532
54533 if (cmd != SIOCWANDEV)
54534diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
54535index 0e8c39b..e0cb171 100644
54536--- a/drivers/tty/synclink_gt.c
54537+++ b/drivers/tty/synclink_gt.c
54538@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54539 tty->driver_data = info;
54540 info->port.tty = tty;
54541
54542- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
54543+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
54544
54545 /* If port is closing, signal caller to try again */
54546 if (info->port.flags & ASYNC_CLOSING){
54547@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54548 mutex_unlock(&info->port.mutex);
54549 goto cleanup;
54550 }
54551- info->port.count++;
54552+ atomic_inc(&info->port.count);
54553 spin_unlock_irqrestore(&info->netlock, flags);
54554
54555- if (info->port.count == 1) {
54556+ if (atomic_read(&info->port.count) == 1) {
54557 /* 1st open on this device, init hardware */
54558 retval = startup(info);
54559 if (retval < 0) {
54560@@ -715,8 +715,8 @@ cleanup:
54561 if (retval) {
54562 if (tty->count == 1)
54563 info->port.tty = NULL; /* tty layer will release tty struct */
54564- if(info->port.count)
54565- info->port.count--;
54566+ if(atomic_read(&info->port.count))
54567+ atomic_dec(&info->port.count);
54568 }
54569
54570 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
54571@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54572
54573 if (sanity_check(info, tty->name, "close"))
54574 return;
54575- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
54576+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
54577
54578 if (tty_port_close_start(&info->port, tty, filp) == 0)
54579 goto cleanup;
54580@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54581 tty_port_close_end(&info->port, tty);
54582 info->port.tty = NULL;
54583 cleanup:
54584- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
54585+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
54586 }
54587
54588 static void hangup(struct tty_struct *tty)
54589@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
54590 shutdown(info);
54591
54592 spin_lock_irqsave(&info->port.lock, flags);
54593- info->port.count = 0;
54594+ atomic_set(&info->port.count, 0);
54595 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54596 info->port.tty = NULL;
54597 spin_unlock_irqrestore(&info->port.lock, flags);
54598@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54599 unsigned short new_crctype;
54600
54601 /* return error if TTY interface open */
54602- if (info->port.count)
54603+ if (atomic_read(&info->port.count))
54604 return -EBUSY;
54605
54606 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
54607@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
54608
54609 /* arbitrate between network and tty opens */
54610 spin_lock_irqsave(&info->netlock, flags);
54611- if (info->port.count != 0 || info->netcount != 0) {
54612+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54613 DBGINFO(("%s hdlc_open busy\n", dev->name));
54614 spin_unlock_irqrestore(&info->netlock, flags);
54615 return -EBUSY;
54616@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54617 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
54618
54619 /* return error if TTY interface open */
54620- if (info->port.count)
54621+ if (atomic_read(&info->port.count))
54622 return -EBUSY;
54623
54624 if (cmd != SIOCWANDEV)
54625@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
54626 if (port == NULL)
54627 continue;
54628 spin_lock(&port->lock);
54629- if ((port->port.count || port->netcount) &&
54630+ if ((atomic_read(&port->port.count) || port->netcount) &&
54631 port->pending_bh && !port->bh_running &&
54632 !port->bh_requested) {
54633 DBGISR(("%s bh queued\n", port->device_name));
54634@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54635 add_wait_queue(&port->open_wait, &wait);
54636
54637 spin_lock_irqsave(&info->lock, flags);
54638- port->count--;
54639+ atomic_dec(&port->count);
54640 spin_unlock_irqrestore(&info->lock, flags);
54641 port->blocked_open++;
54642
54643@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54644 remove_wait_queue(&port->open_wait, &wait);
54645
54646 if (!tty_hung_up_p(filp))
54647- port->count++;
54648+ atomic_inc(&port->count);
54649 port->blocked_open--;
54650
54651 if (!retval)
54652diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
54653index c3f9091..abe4601 100644
54654--- a/drivers/tty/synclinkmp.c
54655+++ b/drivers/tty/synclinkmp.c
54656@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54657
54658 if (debug_level >= DEBUG_LEVEL_INFO)
54659 printk("%s(%d):%s open(), old ref count = %d\n",
54660- __FILE__,__LINE__,tty->driver->name, info->port.count);
54661+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54662
54663 /* If port is closing, signal caller to try again */
54664 if (info->port.flags & ASYNC_CLOSING){
54665@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54666 spin_unlock_irqrestore(&info->netlock, flags);
54667 goto cleanup;
54668 }
54669- info->port.count++;
54670+ atomic_inc(&info->port.count);
54671 spin_unlock_irqrestore(&info->netlock, flags);
54672
54673- if (info->port.count == 1) {
54674+ if (atomic_read(&info->port.count) == 1) {
54675 /* 1st open on this device, init hardware */
54676 retval = startup(info);
54677 if (retval < 0)
54678@@ -796,8 +796,8 @@ cleanup:
54679 if (retval) {
54680 if (tty->count == 1)
54681 info->port.tty = NULL; /* tty layer will release tty struct */
54682- if(info->port.count)
54683- info->port.count--;
54684+ if(atomic_read(&info->port.count))
54685+ atomic_dec(&info->port.count);
54686 }
54687
54688 return retval;
54689@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54690
54691 if (debug_level >= DEBUG_LEVEL_INFO)
54692 printk("%s(%d):%s close() entry, count=%d\n",
54693- __FILE__,__LINE__, info->device_name, info->port.count);
54694+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54695
54696 if (tty_port_close_start(&info->port, tty, filp) == 0)
54697 goto cleanup;
54698@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54699 cleanup:
54700 if (debug_level >= DEBUG_LEVEL_INFO)
54701 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
54702- tty->driver->name, info->port.count);
54703+ tty->driver->name, atomic_read(&info->port.count));
54704 }
54705
54706 /* Called by tty_hangup() when a hangup is signaled.
54707@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
54708 shutdown(info);
54709
54710 spin_lock_irqsave(&info->port.lock, flags);
54711- info->port.count = 0;
54712+ atomic_set(&info->port.count, 0);
54713 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54714 info->port.tty = NULL;
54715 spin_unlock_irqrestore(&info->port.lock, flags);
54716@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54717 unsigned short new_crctype;
54718
54719 /* return error if TTY interface open */
54720- if (info->port.count)
54721+ if (atomic_read(&info->port.count))
54722 return -EBUSY;
54723
54724 switch (encoding)
54725@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
54726
54727 /* arbitrate between network and tty opens */
54728 spin_lock_irqsave(&info->netlock, flags);
54729- if (info->port.count != 0 || info->netcount != 0) {
54730+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54731 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54732 spin_unlock_irqrestore(&info->netlock, flags);
54733 return -EBUSY;
54734@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54735 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54736
54737 /* return error if TTY interface open */
54738- if (info->port.count)
54739+ if (atomic_read(&info->port.count))
54740 return -EBUSY;
54741
54742 if (cmd != SIOCWANDEV)
54743@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
54744 * do not request bottom half processing if the
54745 * device is not open in a normal mode.
54746 */
54747- if ( port && (port->port.count || port->netcount) &&
54748+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
54749 port->pending_bh && !port->bh_running &&
54750 !port->bh_requested ) {
54751 if ( debug_level >= DEBUG_LEVEL_ISR )
54752@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54753
54754 if (debug_level >= DEBUG_LEVEL_INFO)
54755 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
54756- __FILE__,__LINE__, tty->driver->name, port->count );
54757+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54758
54759 spin_lock_irqsave(&info->lock, flags);
54760- port->count--;
54761+ atomic_dec(&port->count);
54762 spin_unlock_irqrestore(&info->lock, flags);
54763 port->blocked_open++;
54764
54765@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54766
54767 if (debug_level >= DEBUG_LEVEL_INFO)
54768 printk("%s(%d):%s block_til_ready() count=%d\n",
54769- __FILE__,__LINE__, tty->driver->name, port->count );
54770+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54771
54772 tty_unlock(tty);
54773 schedule();
54774@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54775 set_current_state(TASK_RUNNING);
54776 remove_wait_queue(&port->open_wait, &wait);
54777 if (!tty_hung_up_p(filp))
54778- port->count++;
54779+ atomic_inc(&port->count);
54780 port->blocked_open--;
54781
54782 if (debug_level >= DEBUG_LEVEL_INFO)
54783 printk("%s(%d):%s block_til_ready() after, count=%d\n",
54784- __FILE__,__LINE__, tty->driver->name, port->count );
54785+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54786
54787 if (!retval)
54788 port->flags |= ASYNC_NORMAL_ACTIVE;
54789diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
54790index 259a4d5..9b0c9e7 100644
54791--- a/drivers/tty/sysrq.c
54792+++ b/drivers/tty/sysrq.c
54793@@ -1085,7 +1085,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
54794 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
54795 size_t count, loff_t *ppos)
54796 {
54797- if (count) {
54798+ if (count && capable(CAP_SYS_ADMIN)) {
54799 char c;
54800
54801 if (get_user(c, buf))
54802diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
54803index 2bb4dfc..a7f6e86 100644
54804--- a/drivers/tty/tty_io.c
54805+++ b/drivers/tty/tty_io.c
54806@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
54807
54808 void tty_default_fops(struct file_operations *fops)
54809 {
54810- *fops = tty_fops;
54811+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
54812 }
54813
54814 /*
54815diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
54816index 3737f55..7cef448 100644
54817--- a/drivers/tty/tty_ldisc.c
54818+++ b/drivers/tty/tty_ldisc.c
54819@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
54820 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54821 tty_ldiscs[disc] = new_ldisc;
54822 new_ldisc->num = disc;
54823- new_ldisc->refcount = 0;
54824+ atomic_set(&new_ldisc->refcount, 0);
54825 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
54826
54827 return ret;
54828@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
54829 return -EINVAL;
54830
54831 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54832- if (tty_ldiscs[disc]->refcount)
54833+ if (atomic_read(&tty_ldiscs[disc]->refcount))
54834 ret = -EBUSY;
54835 else
54836 tty_ldiscs[disc] = NULL;
54837@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
54838 if (ldops) {
54839 ret = ERR_PTR(-EAGAIN);
54840 if (try_module_get(ldops->owner)) {
54841- ldops->refcount++;
54842+ atomic_inc(&ldops->refcount);
54843 ret = ldops;
54844 }
54845 }
54846@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
54847 unsigned long flags;
54848
54849 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54850- ldops->refcount--;
54851+ atomic_dec(&ldops->refcount);
54852 module_put(ldops->owner);
54853 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
54854 }
54855diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
54856index 40b31835..94d92ae 100644
54857--- a/drivers/tty/tty_port.c
54858+++ b/drivers/tty/tty_port.c
54859@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
54860 unsigned long flags;
54861
54862 spin_lock_irqsave(&port->lock, flags);
54863- port->count = 0;
54864+ atomic_set(&port->count, 0);
54865 port->flags &= ~ASYNC_NORMAL_ACTIVE;
54866 tty = port->tty;
54867 if (tty)
54868@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54869
54870 /* The port lock protects the port counts */
54871 spin_lock_irqsave(&port->lock, flags);
54872- port->count--;
54873+ atomic_dec(&port->count);
54874 port->blocked_open++;
54875 spin_unlock_irqrestore(&port->lock, flags);
54876
54877@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54878 we must not mess that up further */
54879 spin_lock_irqsave(&port->lock, flags);
54880 if (!tty_hung_up_p(filp))
54881- port->count++;
54882+ atomic_inc(&port->count);
54883 port->blocked_open--;
54884 if (retval == 0)
54885 port->flags |= ASYNC_NORMAL_ACTIVE;
54886@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
54887 return 0;
54888
54889 spin_lock_irqsave(&port->lock, flags);
54890- if (tty->count == 1 && port->count != 1) {
54891+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
54892 printk(KERN_WARNING
54893 "tty_port_close_start: tty->count = 1 port count = %d.\n",
54894- port->count);
54895- port->count = 1;
54896+ atomic_read(&port->count));
54897+ atomic_set(&port->count, 1);
54898 }
54899- if (--port->count < 0) {
54900+ if (atomic_dec_return(&port->count) < 0) {
54901 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
54902- port->count);
54903- port->count = 0;
54904+ atomic_read(&port->count));
54905+ atomic_set(&port->count, 0);
54906 }
54907
54908- if (port->count) {
54909+ if (atomic_read(&port->count)) {
54910 spin_unlock_irqrestore(&port->lock, flags);
54911 return 0;
54912 }
54913@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
54914 struct file *filp)
54915 {
54916 spin_lock_irq(&port->lock);
54917- ++port->count;
54918+ atomic_inc(&port->count);
54919 spin_unlock_irq(&port->lock);
54920 tty_port_tty_set(port, tty);
54921
54922diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
54923index 8a89f6e..50b32af 100644
54924--- a/drivers/tty/vt/keyboard.c
54925+++ b/drivers/tty/vt/keyboard.c
54926@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
54927 kbd->kbdmode == VC_OFF) &&
54928 value != KVAL(K_SAK))
54929 return; /* SAK is allowed even in raw mode */
54930+
54931+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54932+ {
54933+ void *func = fn_handler[value];
54934+ if (func == fn_show_state || func == fn_show_ptregs ||
54935+ func == fn_show_mem)
54936+ return;
54937+ }
54938+#endif
54939+
54940 fn_handler[value](vc);
54941 }
54942
54943@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54944 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
54945 return -EFAULT;
54946
54947- if (!capable(CAP_SYS_TTY_CONFIG))
54948- perm = 0;
54949-
54950 switch (cmd) {
54951 case KDGKBENT:
54952 /* Ensure another thread doesn't free it under us */
54953@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54954 spin_unlock_irqrestore(&kbd_event_lock, flags);
54955 return put_user(val, &user_kbe->kb_value);
54956 case KDSKBENT:
54957+ if (!capable(CAP_SYS_TTY_CONFIG))
54958+ perm = 0;
54959+
54960 if (!perm)
54961 return -EPERM;
54962 if (!i && v == K_NOSUCHMAP) {
54963@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54964 int i, j, k;
54965 int ret;
54966
54967- if (!capable(CAP_SYS_TTY_CONFIG))
54968- perm = 0;
54969-
54970 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
54971 if (!kbs) {
54972 ret = -ENOMEM;
54973@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54974 kfree(kbs);
54975 return ((p && *p) ? -EOVERFLOW : 0);
54976 case KDSKBSENT:
54977+ if (!capable(CAP_SYS_TTY_CONFIG))
54978+ perm = 0;
54979+
54980 if (!perm) {
54981 ret = -EPERM;
54982 goto reterr;
54983diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
54984index 6276f13..84f2449 100644
54985--- a/drivers/uio/uio.c
54986+++ b/drivers/uio/uio.c
54987@@ -25,6 +25,7 @@
54988 #include <linux/kobject.h>
54989 #include <linux/cdev.h>
54990 #include <linux/uio_driver.h>
54991+#include <asm/local.h>
54992
54993 #define UIO_MAX_DEVICES (1U << MINORBITS)
54994
54995@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
54996 struct device_attribute *attr, char *buf)
54997 {
54998 struct uio_device *idev = dev_get_drvdata(dev);
54999- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
55000+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
55001 }
55002 static DEVICE_ATTR_RO(event);
55003
55004@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
55005 {
55006 struct uio_device *idev = info->uio_dev;
55007
55008- atomic_inc(&idev->event);
55009+ atomic_inc_unchecked(&idev->event);
55010 wake_up_interruptible(&idev->wait);
55011 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
55012 }
55013@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
55014 }
55015
55016 listener->dev = idev;
55017- listener->event_count = atomic_read(&idev->event);
55018+ listener->event_count = atomic_read_unchecked(&idev->event);
55019 filep->private_data = listener;
55020
55021 if (idev->info->open) {
55022@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
55023 return -EIO;
55024
55025 poll_wait(filep, &idev->wait, wait);
55026- if (listener->event_count != atomic_read(&idev->event))
55027+ if (listener->event_count != atomic_read_unchecked(&idev->event))
55028 return POLLIN | POLLRDNORM;
55029 return 0;
55030 }
55031@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
55032 do {
55033 set_current_state(TASK_INTERRUPTIBLE);
55034
55035- event_count = atomic_read(&idev->event);
55036+ event_count = atomic_read_unchecked(&idev->event);
55037 if (event_count != listener->event_count) {
55038 if (copy_to_user(buf, &event_count, count))
55039 retval = -EFAULT;
55040@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
55041 static int uio_find_mem_index(struct vm_area_struct *vma)
55042 {
55043 struct uio_device *idev = vma->vm_private_data;
55044+ unsigned long size;
55045
55046 if (vma->vm_pgoff < MAX_UIO_MAPS) {
55047- if (idev->info->mem[vma->vm_pgoff].size == 0)
55048+ size = idev->info->mem[vma->vm_pgoff].size;
55049+ if (size == 0)
55050+ return -1;
55051+ if (vma->vm_end - vma->vm_start > size)
55052 return -1;
55053 return (int)vma->vm_pgoff;
55054 }
55055@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
55056 idev->owner = owner;
55057 idev->info = info;
55058 init_waitqueue_head(&idev->wait);
55059- atomic_set(&idev->event, 0);
55060+ atomic_set_unchecked(&idev->event, 0);
55061
55062 ret = uio_get_minor(idev);
55063 if (ret)
55064diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
55065index 813d4d3..a71934f 100644
55066--- a/drivers/usb/atm/cxacru.c
55067+++ b/drivers/usb/atm/cxacru.c
55068@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
55069 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
55070 if (ret < 2)
55071 return -EINVAL;
55072- if (index < 0 || index > 0x7f)
55073+ if (index > 0x7f)
55074 return -EINVAL;
55075 pos += tmp;
55076
55077diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
55078index dada014..1d0d517 100644
55079--- a/drivers/usb/atm/usbatm.c
55080+++ b/drivers/usb/atm/usbatm.c
55081@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55082 if (printk_ratelimit())
55083 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
55084 __func__, vpi, vci);
55085- atomic_inc(&vcc->stats->rx_err);
55086+ atomic_inc_unchecked(&vcc->stats->rx_err);
55087 return;
55088 }
55089
55090@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55091 if (length > ATM_MAX_AAL5_PDU) {
55092 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
55093 __func__, length, vcc);
55094- atomic_inc(&vcc->stats->rx_err);
55095+ atomic_inc_unchecked(&vcc->stats->rx_err);
55096 goto out;
55097 }
55098
55099@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55100 if (sarb->len < pdu_length) {
55101 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
55102 __func__, pdu_length, sarb->len, vcc);
55103- atomic_inc(&vcc->stats->rx_err);
55104+ atomic_inc_unchecked(&vcc->stats->rx_err);
55105 goto out;
55106 }
55107
55108 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
55109 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
55110 __func__, vcc);
55111- atomic_inc(&vcc->stats->rx_err);
55112+ atomic_inc_unchecked(&vcc->stats->rx_err);
55113 goto out;
55114 }
55115
55116@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55117 if (printk_ratelimit())
55118 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
55119 __func__, length);
55120- atomic_inc(&vcc->stats->rx_drop);
55121+ atomic_inc_unchecked(&vcc->stats->rx_drop);
55122 goto out;
55123 }
55124
55125@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55126
55127 vcc->push(vcc, skb);
55128
55129- atomic_inc(&vcc->stats->rx);
55130+ atomic_inc_unchecked(&vcc->stats->rx);
55131 out:
55132 skb_trim(sarb, 0);
55133 }
55134@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
55135 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
55136
55137 usbatm_pop(vcc, skb);
55138- atomic_inc(&vcc->stats->tx);
55139+ atomic_inc_unchecked(&vcc->stats->tx);
55140
55141 skb = skb_dequeue(&instance->sndqueue);
55142 }
55143@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
55144 if (!left--)
55145 return sprintf(page,
55146 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
55147- atomic_read(&atm_dev->stats.aal5.tx),
55148- atomic_read(&atm_dev->stats.aal5.tx_err),
55149- atomic_read(&atm_dev->stats.aal5.rx),
55150- atomic_read(&atm_dev->stats.aal5.rx_err),
55151- atomic_read(&atm_dev->stats.aal5.rx_drop));
55152+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
55153+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
55154+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
55155+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
55156+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
55157
55158 if (!left--) {
55159 if (instance->disconnected)
55160diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
55161index 2a3bbdf..91d72cf 100644
55162--- a/drivers/usb/core/devices.c
55163+++ b/drivers/usb/core/devices.c
55164@@ -126,7 +126,7 @@ static const char format_endpt[] =
55165 * time it gets called.
55166 */
55167 static struct device_connect_event {
55168- atomic_t count;
55169+ atomic_unchecked_t count;
55170 wait_queue_head_t wait;
55171 } device_event = {
55172 .count = ATOMIC_INIT(1),
55173@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
55174
55175 void usbfs_conn_disc_event(void)
55176 {
55177- atomic_add(2, &device_event.count);
55178+ atomic_add_unchecked(2, &device_event.count);
55179 wake_up(&device_event.wait);
55180 }
55181
55182@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
55183
55184 poll_wait(file, &device_event.wait, wait);
55185
55186- event_count = atomic_read(&device_event.count);
55187+ event_count = atomic_read_unchecked(&device_event.count);
55188 if (file->f_version != event_count) {
55189 file->f_version = event_count;
55190 return POLLIN | POLLRDNORM;
55191diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
55192index 1163553..f292679 100644
55193--- a/drivers/usb/core/devio.c
55194+++ b/drivers/usb/core/devio.c
55195@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55196 struct usb_dev_state *ps = file->private_data;
55197 struct usb_device *dev = ps->dev;
55198 ssize_t ret = 0;
55199- unsigned len;
55200+ size_t len;
55201 loff_t pos;
55202 int i;
55203
55204@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55205 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
55206 struct usb_config_descriptor *config =
55207 (struct usb_config_descriptor *)dev->rawdescriptors[i];
55208- unsigned int length = le16_to_cpu(config->wTotalLength);
55209+ size_t length = le16_to_cpu(config->wTotalLength);
55210
55211 if (*ppos < pos + length) {
55212
55213 /* The descriptor may claim to be longer than it
55214 * really is. Here is the actual allocated length. */
55215- unsigned alloclen =
55216+ size_t alloclen =
55217 le16_to_cpu(dev->config[i].desc.wTotalLength);
55218
55219- len = length - (*ppos - pos);
55220+ len = length + pos - *ppos;
55221 if (len > nbytes)
55222 len = nbytes;
55223
55224 /* Simply don't write (skip over) unallocated parts */
55225 if (alloclen > (*ppos - pos)) {
55226- alloclen -= (*ppos - pos);
55227+ alloclen = alloclen + pos - *ppos;
55228 if (copy_to_user(buf,
55229 dev->rawdescriptors[i] + (*ppos - pos),
55230 min(len, alloclen))) {
55231diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
55232index 45a915c..09f9735 100644
55233--- a/drivers/usb/core/hcd.c
55234+++ b/drivers/usb/core/hcd.c
55235@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55236 */
55237 usb_get_urb(urb);
55238 atomic_inc(&urb->use_count);
55239- atomic_inc(&urb->dev->urbnum);
55240+ atomic_inc_unchecked(&urb->dev->urbnum);
55241 usbmon_urb_submit(&hcd->self, urb);
55242
55243 /* NOTE requirements on root-hub callers (usbfs and the hub
55244@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55245 urb->hcpriv = NULL;
55246 INIT_LIST_HEAD(&urb->urb_list);
55247 atomic_dec(&urb->use_count);
55248- atomic_dec(&urb->dev->urbnum);
55249+ atomic_dec_unchecked(&urb->dev->urbnum);
55250 if (atomic_read(&urb->reject))
55251 wake_up(&usb_kill_urb_queue);
55252 usb_put_urb(urb);
55253diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
55254index 3b71516..1f26579 100644
55255--- a/drivers/usb/core/hub.c
55256+++ b/drivers/usb/core/hub.c
55257@@ -26,6 +26,7 @@
55258 #include <linux/mutex.h>
55259 #include <linux/random.h>
55260 #include <linux/pm_qos.h>
55261+#include <linux/grsecurity.h>
55262
55263 #include <asm/uaccess.h>
55264 #include <asm/byteorder.h>
55265@@ -4665,6 +4666,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
55266 goto done;
55267 return;
55268 }
55269+
55270+ if (gr_handle_new_usb())
55271+ goto done;
55272+
55273 if (hub_is_superspeed(hub->hdev))
55274 unit_load = 150;
55275 else
55276diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
55277index f368d20..0c30ac5 100644
55278--- a/drivers/usb/core/message.c
55279+++ b/drivers/usb/core/message.c
55280@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
55281 * Return: If successful, the number of bytes transferred. Otherwise, a negative
55282 * error number.
55283 */
55284-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55285+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55286 __u8 requesttype, __u16 value, __u16 index, void *data,
55287 __u16 size, int timeout)
55288 {
55289@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
55290 * If successful, 0. Otherwise a negative error number. The number of actual
55291 * bytes transferred will be stored in the @actual_length parameter.
55292 */
55293-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55294+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55295 void *data, int len, int *actual_length, int timeout)
55296 {
55297 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
55298@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
55299 * bytes transferred will be stored in the @actual_length parameter.
55300 *
55301 */
55302-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55303+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55304 void *data, int len, int *actual_length, int timeout)
55305 {
55306 struct urb *urb;
55307diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
55308index d269738..7340cd7 100644
55309--- a/drivers/usb/core/sysfs.c
55310+++ b/drivers/usb/core/sysfs.c
55311@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
55312 struct usb_device *udev;
55313
55314 udev = to_usb_device(dev);
55315- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
55316+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
55317 }
55318 static DEVICE_ATTR_RO(urbnum);
55319
55320diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
55321index b1fb9ae..4224885 100644
55322--- a/drivers/usb/core/usb.c
55323+++ b/drivers/usb/core/usb.c
55324@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
55325 set_dev_node(&dev->dev, dev_to_node(bus->controller));
55326 dev->state = USB_STATE_ATTACHED;
55327 dev->lpm_disable_count = 1;
55328- atomic_set(&dev->urbnum, 0);
55329+ atomic_set_unchecked(&dev->urbnum, 0);
55330
55331 INIT_LIST_HEAD(&dev->ep0.urb_list);
55332 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
55333diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
55334index 8cfc319..4868255 100644
55335--- a/drivers/usb/early/ehci-dbgp.c
55336+++ b/drivers/usb/early/ehci-dbgp.c
55337@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
55338
55339 #ifdef CONFIG_KGDB
55340 static struct kgdb_io kgdbdbgp_io_ops;
55341-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
55342+static struct kgdb_io kgdbdbgp_io_ops_console;
55343+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
55344 #else
55345 #define dbgp_kgdb_mode (0)
55346 #endif
55347@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
55348 .write_char = kgdbdbgp_write_char,
55349 };
55350
55351+static struct kgdb_io kgdbdbgp_io_ops_console = {
55352+ .name = "kgdbdbgp",
55353+ .read_char = kgdbdbgp_read_char,
55354+ .write_char = kgdbdbgp_write_char,
55355+ .is_console = 1
55356+};
55357+
55358 static int kgdbdbgp_wait_time;
55359
55360 static int __init kgdbdbgp_parse_config(char *str)
55361@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
55362 ptr++;
55363 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
55364 }
55365- kgdb_register_io_module(&kgdbdbgp_io_ops);
55366- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
55367+ if (early_dbgp_console.index != -1)
55368+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
55369+ else
55370+ kgdb_register_io_module(&kgdbdbgp_io_ops);
55371
55372 return 0;
55373 }
55374diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
55375index 9719abf..789d5d9 100644
55376--- a/drivers/usb/gadget/function/f_uac1.c
55377+++ b/drivers/usb/gadget/function/f_uac1.c
55378@@ -14,6 +14,7 @@
55379 #include <linux/module.h>
55380 #include <linux/device.h>
55381 #include <linux/atomic.h>
55382+#include <linux/module.h>
55383
55384 #include "u_uac1.h"
55385
55386diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
55387index 491082a..dfd7d17 100644
55388--- a/drivers/usb/gadget/function/u_serial.c
55389+++ b/drivers/usb/gadget/function/u_serial.c
55390@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55391 spin_lock_irq(&port->port_lock);
55392
55393 /* already open? Great. */
55394- if (port->port.count) {
55395+ if (atomic_read(&port->port.count)) {
55396 status = 0;
55397- port->port.count++;
55398+ atomic_inc(&port->port.count);
55399
55400 /* currently opening/closing? wait ... */
55401 } else if (port->openclose) {
55402@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55403 tty->driver_data = port;
55404 port->port.tty = tty;
55405
55406- port->port.count = 1;
55407+ atomic_set(&port->port.count, 1);
55408 port->openclose = false;
55409
55410 /* if connected, start the I/O stream */
55411@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55412
55413 spin_lock_irq(&port->port_lock);
55414
55415- if (port->port.count != 1) {
55416- if (port->port.count == 0)
55417+ if (atomic_read(&port->port.count) != 1) {
55418+ if (atomic_read(&port->port.count) == 0)
55419 WARN_ON(1);
55420 else
55421- --port->port.count;
55422+ atomic_dec(&port->port.count);
55423 goto exit;
55424 }
55425
55426@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55427 * and sleep if necessary
55428 */
55429 port->openclose = true;
55430- port->port.count = 0;
55431+ atomic_set(&port->port.count, 0);
55432
55433 gser = port->port_usb;
55434 if (gser && gser->disconnect)
55435@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
55436 int cond;
55437
55438 spin_lock_irq(&port->port_lock);
55439- cond = (port->port.count == 0) && !port->openclose;
55440+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
55441 spin_unlock_irq(&port->port_lock);
55442 return cond;
55443 }
55444@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
55445 /* if it's already open, start I/O ... and notify the serial
55446 * protocol about open/close status (connect/disconnect).
55447 */
55448- if (port->port.count) {
55449+ if (atomic_read(&port->port.count)) {
55450 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
55451 gs_start_io(port);
55452 if (gser->connect)
55453@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
55454
55455 port->port_usb = NULL;
55456 gser->ioport = NULL;
55457- if (port->port.count > 0 || port->openclose) {
55458+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
55459 wake_up_interruptible(&port->drain_wait);
55460 if (port->port.tty)
55461 tty_hangup(port->port.tty);
55462@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
55463
55464 /* finally, free any unused/unusable I/O buffers */
55465 spin_lock_irqsave(&port->port_lock, flags);
55466- if (port->port.count == 0 && !port->openclose)
55467+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
55468 gs_buf_free(&port->port_write_buf);
55469 gs_free_requests(gser->out, &port->read_pool, NULL);
55470 gs_free_requests(gser->out, &port->read_queue, NULL);
55471diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
55472index c78c841..48fd281 100644
55473--- a/drivers/usb/gadget/function/u_uac1.c
55474+++ b/drivers/usb/gadget/function/u_uac1.c
55475@@ -17,6 +17,7 @@
55476 #include <linux/ctype.h>
55477 #include <linux/random.h>
55478 #include <linux/syscalls.h>
55479+#include <linux/module.h>
55480
55481 #include "u_uac1.h"
55482
55483diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
55484index 7354d01..299478e 100644
55485--- a/drivers/usb/host/ehci-hub.c
55486+++ b/drivers/usb/host/ehci-hub.c
55487@@ -772,7 +772,7 @@ static struct urb *request_single_step_set_feature_urb(
55488 urb->transfer_flags = URB_DIR_IN;
55489 usb_get_urb(urb);
55490 atomic_inc(&urb->use_count);
55491- atomic_inc(&urb->dev->urbnum);
55492+ atomic_inc_unchecked(&urb->dev->urbnum);
55493 urb->setup_dma = dma_map_single(
55494 hcd->self.controller,
55495 urb->setup_packet,
55496@@ -839,7 +839,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
55497 urb->status = -EINPROGRESS;
55498 usb_get_urb(urb);
55499 atomic_inc(&urb->use_count);
55500- atomic_inc(&urb->dev->urbnum);
55501+ atomic_inc_unchecked(&urb->dev->urbnum);
55502 retval = submit_single_step_set_feature(hcd, urb, 0);
55503 if (!retval && !wait_for_completion_timeout(&done,
55504 msecs_to_jiffies(2000))) {
55505diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
55506index 1db0626..4948782 100644
55507--- a/drivers/usb/host/hwa-hc.c
55508+++ b/drivers/usb/host/hwa-hc.c
55509@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55510 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
55511 struct wahc *wa = &hwahc->wa;
55512 struct device *dev = &wa->usb_iface->dev;
55513- u8 mas_le[UWB_NUM_MAS/8];
55514+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
55515+
55516+ if (mas_le == NULL)
55517+ return -ENOMEM;
55518
55519 /* Set the stream index */
55520 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
55521@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55522 WUSB_REQ_SET_WUSB_MAS,
55523 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
55524 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
55525- mas_le, 32, USB_CTRL_SET_TIMEOUT);
55526+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
55527 if (result < 0)
55528 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
55529 out:
55530+ kfree(mas_le);
55531+
55532 return result;
55533 }
55534
55535diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
55536index b3d245e..99549ed 100644
55537--- a/drivers/usb/misc/appledisplay.c
55538+++ b/drivers/usb/misc/appledisplay.c
55539@@ -84,7 +84,7 @@ struct appledisplay {
55540 struct mutex sysfslock; /* concurrent read and write */
55541 };
55542
55543-static atomic_t count_displays = ATOMIC_INIT(0);
55544+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
55545 static struct workqueue_struct *wq;
55546
55547 static void appledisplay_complete(struct urb *urb)
55548@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
55549
55550 /* Register backlight device */
55551 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
55552- atomic_inc_return(&count_displays) - 1);
55553+ atomic_inc_return_unchecked(&count_displays) - 1);
55554 memset(&props, 0, sizeof(struct backlight_properties));
55555 props.type = BACKLIGHT_RAW;
55556 props.max_brightness = 0xff;
55557diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
55558index 3806e70..55c508b 100644
55559--- a/drivers/usb/serial/console.c
55560+++ b/drivers/usb/serial/console.c
55561@@ -126,7 +126,7 @@ static int usb_console_setup(struct console *co, char *options)
55562
55563 info->port = port;
55564
55565- ++port->port.count;
55566+ atomic_inc(&port->port.count);
55567 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
55568 if (serial->type->set_termios) {
55569 /*
55570@@ -175,7 +175,7 @@ static int usb_console_setup(struct console *co, char *options)
55571 }
55572 /* Now that any required fake tty operations are completed restore
55573 * the tty port count */
55574- --port->port.count;
55575+ atomic_dec(&port->port.count);
55576 /* The console is special in terms of closing the device so
55577 * indicate this port is now acting as a system console. */
55578 port->port.console = 1;
55579@@ -188,7 +188,7 @@ static int usb_console_setup(struct console *co, char *options)
55580 put_tty:
55581 tty_kref_put(tty);
55582 reset_open_count:
55583- port->port.count = 0;
55584+ atomic_set(&port->port.count, 0);
55585 usb_autopm_put_interface(serial->interface);
55586 error_get_interface:
55587 usb_serial_put(serial);
55588@@ -199,7 +199,7 @@ static int usb_console_setup(struct console *co, char *options)
55589 static void usb_console_write(struct console *co,
55590 const char *buf, unsigned count)
55591 {
55592- static struct usbcons_info *info = &usbcons_info;
55593+ struct usbcons_info *info = &usbcons_info;
55594 struct usb_serial_port *port = info->port;
55595 struct usb_serial *serial;
55596 int retval = -ENODEV;
55597diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
55598index 307e339..6aa97cb 100644
55599--- a/drivers/usb/storage/usb.h
55600+++ b/drivers/usb/storage/usb.h
55601@@ -63,7 +63,7 @@ struct us_unusual_dev {
55602 __u8 useProtocol;
55603 __u8 useTransport;
55604 int (*initFunction)(struct us_data *);
55605-};
55606+} __do_const;
55607
55608
55609 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
55610diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
55611index a863a98..d272795 100644
55612--- a/drivers/usb/usbip/vhci.h
55613+++ b/drivers/usb/usbip/vhci.h
55614@@ -83,7 +83,7 @@ struct vhci_hcd {
55615 unsigned resuming:1;
55616 unsigned long re_timeout;
55617
55618- atomic_t seqnum;
55619+ atomic_unchecked_t seqnum;
55620
55621 /*
55622 * NOTE:
55623diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
55624index 11f6f61..1087910 100644
55625--- a/drivers/usb/usbip/vhci_hcd.c
55626+++ b/drivers/usb/usbip/vhci_hcd.c
55627@@ -440,7 +440,7 @@ static void vhci_tx_urb(struct urb *urb)
55628
55629 spin_lock(&vdev->priv_lock);
55630
55631- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
55632+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
55633 if (priv->seqnum == 0xffff)
55634 dev_info(&urb->dev->dev, "seqnum max\n");
55635
55636@@ -685,7 +685,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
55637 return -ENOMEM;
55638 }
55639
55640- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
55641+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
55642 if (unlink->seqnum == 0xffff)
55643 pr_info("seqnum max\n");
55644
55645@@ -889,7 +889,7 @@ static int vhci_start(struct usb_hcd *hcd)
55646 vdev->rhport = rhport;
55647 }
55648
55649- atomic_set(&vhci->seqnum, 0);
55650+ atomic_set_unchecked(&vhci->seqnum, 0);
55651 spin_lock_init(&vhci->lock);
55652
55653 hcd->power_budget = 0; /* no limit */
55654diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
55655index 00e4a54..d676f85 100644
55656--- a/drivers/usb/usbip/vhci_rx.c
55657+++ b/drivers/usb/usbip/vhci_rx.c
55658@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
55659 if (!urb) {
55660 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
55661 pr_info("max seqnum %d\n",
55662- atomic_read(&the_controller->seqnum));
55663+ atomic_read_unchecked(&the_controller->seqnum));
55664 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
55665 return;
55666 }
55667diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
55668index edc7267..9f65ce2 100644
55669--- a/drivers/usb/wusbcore/wa-hc.h
55670+++ b/drivers/usb/wusbcore/wa-hc.h
55671@@ -240,7 +240,7 @@ struct wahc {
55672 spinlock_t xfer_list_lock;
55673 struct work_struct xfer_enqueue_work;
55674 struct work_struct xfer_error_work;
55675- atomic_t xfer_id_count;
55676+ atomic_unchecked_t xfer_id_count;
55677
55678 kernel_ulong_t quirks;
55679 };
55680@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
55681 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
55682 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
55683 wa->dto_in_use = 0;
55684- atomic_set(&wa->xfer_id_count, 1);
55685+ atomic_set_unchecked(&wa->xfer_id_count, 1);
55686 /* init the buf in URBs */
55687 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
55688 usb_init_urb(&(wa->buf_in_urbs[index]));
55689diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
55690index 69af4fd..da390d7 100644
55691--- a/drivers/usb/wusbcore/wa-xfer.c
55692+++ b/drivers/usb/wusbcore/wa-xfer.c
55693@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
55694 */
55695 static void wa_xfer_id_init(struct wa_xfer *xfer)
55696 {
55697- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
55698+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
55699 }
55700
55701 /* Return the xfer's ID. */
55702diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
55703index 837d177..170724af 100644
55704--- a/drivers/vfio/vfio.c
55705+++ b/drivers/vfio/vfio.c
55706@@ -518,7 +518,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
55707 return 0;
55708
55709 /* TODO Prevent device auto probing */
55710- WARN("Device %s added to live group %d!\n", dev_name(dev),
55711+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
55712 iommu_group_id(group->iommu_group));
55713
55714 return 0;
55715diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
55716index 3bb02c6..a01ff38 100644
55717--- a/drivers/vhost/vringh.c
55718+++ b/drivers/vhost/vringh.c
55719@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
55720 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
55721 {
55722 __virtio16 v = 0;
55723- int rc = get_user(v, (__force __virtio16 __user *)p);
55724+ int rc = get_user(v, (__force_user __virtio16 *)p);
55725 *val = vringh16_to_cpu(vrh, v);
55726 return rc;
55727 }
55728@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
55729 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
55730 {
55731 __virtio16 v = cpu_to_vringh16(vrh, val);
55732- return put_user(v, (__force __virtio16 __user *)p);
55733+ return put_user(v, (__force_user __virtio16 *)p);
55734 }
55735
55736 static inline int copydesc_user(void *dst, const void *src, size_t len)
55737 {
55738- return copy_from_user(dst, (__force void __user *)src, len) ?
55739+ return copy_from_user(dst, (void __force_user *)src, len) ?
55740 -EFAULT : 0;
55741 }
55742
55743@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
55744 const struct vring_used_elem *src,
55745 unsigned int num)
55746 {
55747- return copy_to_user((__force void __user *)dst, src,
55748+ return copy_to_user((void __force_user *)dst, src,
55749 sizeof(*dst) * num) ? -EFAULT : 0;
55750 }
55751
55752 static inline int xfer_from_user(void *src, void *dst, size_t len)
55753 {
55754- return copy_from_user(dst, (__force void __user *)src, len) ?
55755+ return copy_from_user(dst, (void __force_user *)src, len) ?
55756 -EFAULT : 0;
55757 }
55758
55759 static inline int xfer_to_user(void *dst, void *src, size_t len)
55760 {
55761- return copy_to_user((__force void __user *)dst, src, len) ?
55762+ return copy_to_user((void __force_user *)dst, src, len) ?
55763 -EFAULT : 0;
55764 }
55765
55766@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
55767 vrh->last_used_idx = 0;
55768 vrh->vring.num = num;
55769 /* vring expects kernel addresses, but only used via accessors. */
55770- vrh->vring.desc = (__force struct vring_desc *)desc;
55771- vrh->vring.avail = (__force struct vring_avail *)avail;
55772- vrh->vring.used = (__force struct vring_used *)used;
55773+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
55774+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
55775+ vrh->vring.used = (__force_kernel struct vring_used *)used;
55776 return 0;
55777 }
55778 EXPORT_SYMBOL(vringh_init_user);
55779@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
55780
55781 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
55782 {
55783- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
55784+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
55785 return 0;
55786 }
55787
55788diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
55789index 84a110a..96312c3 100644
55790--- a/drivers/video/backlight/kb3886_bl.c
55791+++ b/drivers/video/backlight/kb3886_bl.c
55792@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
55793 static unsigned long kb3886bl_flags;
55794 #define KB3886BL_SUSPENDED 0x01
55795
55796-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
55797+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
55798 {
55799 .ident = "Sahara Touch-iT",
55800 .matches = {
55801diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
55802index 1b0b233..6f34c2c 100644
55803--- a/drivers/video/fbdev/arcfb.c
55804+++ b/drivers/video/fbdev/arcfb.c
55805@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
55806 return -ENOSPC;
55807
55808 err = 0;
55809- if ((count + p) > fbmemlength) {
55810+ if (count > (fbmemlength - p)) {
55811 count = fbmemlength - p;
55812 err = -ENOSPC;
55813 }
55814diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
55815index aedf2fb..47c9aca 100644
55816--- a/drivers/video/fbdev/aty/aty128fb.c
55817+++ b/drivers/video/fbdev/aty/aty128fb.c
55818@@ -149,7 +149,7 @@ enum {
55819 };
55820
55821 /* Must match above enum */
55822-static char * const r128_family[] = {
55823+static const char * const r128_family[] = {
55824 "AGP",
55825 "PCI",
55826 "PRO AGP",
55827diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
55828index 8789e48..698fe4c 100644
55829--- a/drivers/video/fbdev/aty/atyfb_base.c
55830+++ b/drivers/video/fbdev/aty/atyfb_base.c
55831@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
55832 par->accel_flags = var->accel_flags; /* hack */
55833
55834 if (var->accel_flags) {
55835- info->fbops->fb_sync = atyfb_sync;
55836+ pax_open_kernel();
55837+ *(void **)&info->fbops->fb_sync = atyfb_sync;
55838+ pax_close_kernel();
55839 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55840 } else {
55841- info->fbops->fb_sync = NULL;
55842+ pax_open_kernel();
55843+ *(void **)&info->fbops->fb_sync = NULL;
55844+ pax_close_kernel();
55845 info->flags |= FBINFO_HWACCEL_DISABLED;
55846 }
55847
55848diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
55849index 2fa0317..4983f2a 100644
55850--- a/drivers/video/fbdev/aty/mach64_cursor.c
55851+++ b/drivers/video/fbdev/aty/mach64_cursor.c
55852@@ -8,6 +8,7 @@
55853 #include "../core/fb_draw.h"
55854
55855 #include <asm/io.h>
55856+#include <asm/pgtable.h>
55857
55858 #ifdef __sparc__
55859 #include <asm/fbio.h>
55860@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
55861 info->sprite.buf_align = 16; /* and 64 lines tall. */
55862 info->sprite.flags = FB_PIXMAP_IO;
55863
55864- info->fbops->fb_cursor = atyfb_cursor;
55865+ pax_open_kernel();
55866+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
55867+ pax_close_kernel();
55868
55869 return 0;
55870 }
55871diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
55872index d6cab1f..112f680 100644
55873--- a/drivers/video/fbdev/core/fb_defio.c
55874+++ b/drivers/video/fbdev/core/fb_defio.c
55875@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
55876
55877 BUG_ON(!fbdefio);
55878 mutex_init(&fbdefio->lock);
55879- info->fbops->fb_mmap = fb_deferred_io_mmap;
55880+ pax_open_kernel();
55881+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
55882+ pax_close_kernel();
55883 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
55884 INIT_LIST_HEAD(&fbdefio->pagelist);
55885 if (fbdefio->delay == 0) /* set a default of 1 s */
55886@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
55887 page->mapping = NULL;
55888 }
55889
55890- info->fbops->fb_mmap = NULL;
55891+ *(void **)&info->fbops->fb_mmap = NULL;
55892 mutex_destroy(&fbdefio->lock);
55893 }
55894 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
55895diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
55896index 0705d88..d9429bf 100644
55897--- a/drivers/video/fbdev/core/fbmem.c
55898+++ b/drivers/video/fbdev/core/fbmem.c
55899@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
55900 __u32 data;
55901 int err;
55902
55903- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
55904+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
55905
55906 data = (__u32) (unsigned long) fix->smem_start;
55907 err |= put_user(data, &fix32->smem_start);
55908diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
55909index 4254336..282567e 100644
55910--- a/drivers/video/fbdev/hyperv_fb.c
55911+++ b/drivers/video/fbdev/hyperv_fb.c
55912@@ -240,7 +240,7 @@ static uint screen_fb_size;
55913 static inline int synthvid_send(struct hv_device *hdev,
55914 struct synthvid_msg *msg)
55915 {
55916- static atomic64_t request_id = ATOMIC64_INIT(0);
55917+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
55918 int ret;
55919
55920 msg->pipe_hdr.type = PIPE_MSG_DATA;
55921@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
55922
55923 ret = vmbus_sendpacket(hdev->channel, msg,
55924 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
55925- atomic64_inc_return(&request_id),
55926+ atomic64_inc_return_unchecked(&request_id),
55927 VM_PKT_DATA_INBAND, 0);
55928
55929 if (ret)
55930diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
55931index 7672d2e..b56437f 100644
55932--- a/drivers/video/fbdev/i810/i810_accel.c
55933+++ b/drivers/video/fbdev/i810/i810_accel.c
55934@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
55935 }
55936 }
55937 printk("ringbuffer lockup!!!\n");
55938+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
55939 i810_report_error(mmio);
55940 par->dev_flags |= LOCKUP;
55941 info->pixmap.scan_align = 1;
55942diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55943index a01147f..5d896f8 100644
55944--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55945+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55946@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
55947
55948 #ifdef CONFIG_FB_MATROX_MYSTIQUE
55949 struct matrox_switch matrox_mystique = {
55950- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
55951+ .preinit = MGA1064_preinit,
55952+ .reset = MGA1064_reset,
55953+ .init = MGA1064_init,
55954+ .restore = MGA1064_restore,
55955 };
55956 EXPORT_SYMBOL(matrox_mystique);
55957 #endif
55958
55959 #ifdef CONFIG_FB_MATROX_G
55960 struct matrox_switch matrox_G100 = {
55961- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
55962+ .preinit = MGAG100_preinit,
55963+ .reset = MGAG100_reset,
55964+ .init = MGAG100_init,
55965+ .restore = MGAG100_restore,
55966 };
55967 EXPORT_SYMBOL(matrox_G100);
55968 #endif
55969diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55970index 195ad7c..09743fc 100644
55971--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55972+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55973@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
55974 }
55975
55976 struct matrox_switch matrox_millennium = {
55977- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
55978+ .preinit = Ti3026_preinit,
55979+ .reset = Ti3026_reset,
55980+ .init = Ti3026_init,
55981+ .restore = Ti3026_restore
55982 };
55983 EXPORT_SYMBOL(matrox_millennium);
55984 #endif
55985diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55986index fe92eed..106e085 100644
55987--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55988+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55989@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
55990 struct mb862xxfb_par *par = info->par;
55991
55992 if (info->var.bits_per_pixel == 32) {
55993- info->fbops->fb_fillrect = cfb_fillrect;
55994- info->fbops->fb_copyarea = cfb_copyarea;
55995- info->fbops->fb_imageblit = cfb_imageblit;
55996+ pax_open_kernel();
55997+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55998+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55999+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56000+ pax_close_kernel();
56001 } else {
56002 outreg(disp, GC_L0EM, 3);
56003- info->fbops->fb_fillrect = mb86290fb_fillrect;
56004- info->fbops->fb_copyarea = mb86290fb_copyarea;
56005- info->fbops->fb_imageblit = mb86290fb_imageblit;
56006+ pax_open_kernel();
56007+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
56008+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
56009+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
56010+ pax_close_kernel();
56011 }
56012 outreg(draw, GDC_REG_DRAW_BASE, 0);
56013 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
56014diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
56015index def0412..fed6529 100644
56016--- a/drivers/video/fbdev/nvidia/nvidia.c
56017+++ b/drivers/video/fbdev/nvidia/nvidia.c
56018@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
56019 info->fix.line_length = (info->var.xres_virtual *
56020 info->var.bits_per_pixel) >> 3;
56021 if (info->var.accel_flags) {
56022- info->fbops->fb_imageblit = nvidiafb_imageblit;
56023- info->fbops->fb_fillrect = nvidiafb_fillrect;
56024- info->fbops->fb_copyarea = nvidiafb_copyarea;
56025- info->fbops->fb_sync = nvidiafb_sync;
56026+ pax_open_kernel();
56027+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
56028+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
56029+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
56030+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
56031+ pax_close_kernel();
56032 info->pixmap.scan_align = 4;
56033 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56034 info->flags |= FBINFO_READS_FAST;
56035 NVResetGraphics(info);
56036 } else {
56037- info->fbops->fb_imageblit = cfb_imageblit;
56038- info->fbops->fb_fillrect = cfb_fillrect;
56039- info->fbops->fb_copyarea = cfb_copyarea;
56040- info->fbops->fb_sync = NULL;
56041+ pax_open_kernel();
56042+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56043+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56044+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56045+ *(void **)&info->fbops->fb_sync = NULL;
56046+ pax_close_kernel();
56047 info->pixmap.scan_align = 1;
56048 info->flags |= FBINFO_HWACCEL_DISABLED;
56049 info->flags &= ~FBINFO_READS_FAST;
56050@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
56051 info->pixmap.size = 8 * 1024;
56052 info->pixmap.flags = FB_PIXMAP_SYSTEM;
56053
56054- if (!hwcur)
56055- info->fbops->fb_cursor = NULL;
56056+ if (!hwcur) {
56057+ pax_open_kernel();
56058+ *(void **)&info->fbops->fb_cursor = NULL;
56059+ pax_close_kernel();
56060+ }
56061
56062 info->var.accel_flags = (!noaccel);
56063
56064diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
56065index 2412a0d..294215b 100644
56066--- a/drivers/video/fbdev/omap2/dss/display.c
56067+++ b/drivers/video/fbdev/omap2/dss/display.c
56068@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
56069 if (dssdev->name == NULL)
56070 dssdev->name = dssdev->alias;
56071
56072+ pax_open_kernel();
56073 if (drv && drv->get_resolution == NULL)
56074- drv->get_resolution = omapdss_default_get_resolution;
56075+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
56076 if (drv && drv->get_recommended_bpp == NULL)
56077- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56078+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56079 if (drv && drv->get_timings == NULL)
56080- drv->get_timings = omapdss_default_get_timings;
56081+ *(void **)&drv->get_timings = omapdss_default_get_timings;
56082+ pax_close_kernel();
56083
56084 mutex_lock(&panel_list_mutex);
56085 list_add_tail(&dssdev->panel_list, &panel_list);
56086diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
56087index 83433cb..71e9b98 100644
56088--- a/drivers/video/fbdev/s1d13xxxfb.c
56089+++ b/drivers/video/fbdev/s1d13xxxfb.c
56090@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
56091
56092 switch(prod_id) {
56093 case S1D13506_PROD_ID: /* activate acceleration */
56094- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56095- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56096+ pax_open_kernel();
56097+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56098+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56099+ pax_close_kernel();
56100 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
56101 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
56102 break;
56103diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56104index d3013cd..95b8285 100644
56105--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
56106+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56107@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
56108 }
56109
56110 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
56111- lcdc_sys_write_index,
56112- lcdc_sys_write_data,
56113- lcdc_sys_read_data,
56114+ .write_index = lcdc_sys_write_index,
56115+ .write_data = lcdc_sys_write_data,
56116+ .read_data = lcdc_sys_read_data,
56117 };
56118
56119 static int sh_mobile_lcdc_sginit(struct fb_info *info,
56120diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
56121index 9279e5f..d5f5276 100644
56122--- a/drivers/video/fbdev/smscufx.c
56123+++ b/drivers/video/fbdev/smscufx.c
56124@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
56125 fb_deferred_io_cleanup(info);
56126 kfree(info->fbdefio);
56127 info->fbdefio = NULL;
56128- info->fbops->fb_mmap = ufx_ops_mmap;
56129+ pax_open_kernel();
56130+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
56131+ pax_close_kernel();
56132 }
56133
56134 pr_debug("released /dev/fb%d user=%d count=%d",
56135diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
56136index ff2b873..626a8d5 100644
56137--- a/drivers/video/fbdev/udlfb.c
56138+++ b/drivers/video/fbdev/udlfb.c
56139@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
56140 dlfb_urb_completion(urb);
56141
56142 error:
56143- atomic_add(bytes_sent, &dev->bytes_sent);
56144- atomic_add(bytes_identical, &dev->bytes_identical);
56145- atomic_add(width*height*2, &dev->bytes_rendered);
56146+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56147+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56148+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
56149 end_cycles = get_cycles();
56150- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56151+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56152 >> 10)), /* Kcycles */
56153 &dev->cpu_kcycles_used);
56154
56155@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
56156 dlfb_urb_completion(urb);
56157
56158 error:
56159- atomic_add(bytes_sent, &dev->bytes_sent);
56160- atomic_add(bytes_identical, &dev->bytes_identical);
56161- atomic_add(bytes_rendered, &dev->bytes_rendered);
56162+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56163+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56164+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
56165 end_cycles = get_cycles();
56166- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56167+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56168 >> 10)), /* Kcycles */
56169 &dev->cpu_kcycles_used);
56170 }
56171@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
56172 fb_deferred_io_cleanup(info);
56173 kfree(info->fbdefio);
56174 info->fbdefio = NULL;
56175- info->fbops->fb_mmap = dlfb_ops_mmap;
56176+ pax_open_kernel();
56177+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
56178+ pax_close_kernel();
56179 }
56180
56181 pr_warn("released /dev/fb%d user=%d count=%d\n",
56182@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
56183 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56184 struct dlfb_data *dev = fb_info->par;
56185 return snprintf(buf, PAGE_SIZE, "%u\n",
56186- atomic_read(&dev->bytes_rendered));
56187+ atomic_read_unchecked(&dev->bytes_rendered));
56188 }
56189
56190 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56191@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56192 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56193 struct dlfb_data *dev = fb_info->par;
56194 return snprintf(buf, PAGE_SIZE, "%u\n",
56195- atomic_read(&dev->bytes_identical));
56196+ atomic_read_unchecked(&dev->bytes_identical));
56197 }
56198
56199 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56200@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56201 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56202 struct dlfb_data *dev = fb_info->par;
56203 return snprintf(buf, PAGE_SIZE, "%u\n",
56204- atomic_read(&dev->bytes_sent));
56205+ atomic_read_unchecked(&dev->bytes_sent));
56206 }
56207
56208 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56209@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56210 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56211 struct dlfb_data *dev = fb_info->par;
56212 return snprintf(buf, PAGE_SIZE, "%u\n",
56213- atomic_read(&dev->cpu_kcycles_used));
56214+ atomic_read_unchecked(&dev->cpu_kcycles_used));
56215 }
56216
56217 static ssize_t edid_show(
56218@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
56219 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56220 struct dlfb_data *dev = fb_info->par;
56221
56222- atomic_set(&dev->bytes_rendered, 0);
56223- atomic_set(&dev->bytes_identical, 0);
56224- atomic_set(&dev->bytes_sent, 0);
56225- atomic_set(&dev->cpu_kcycles_used, 0);
56226+ atomic_set_unchecked(&dev->bytes_rendered, 0);
56227+ atomic_set_unchecked(&dev->bytes_identical, 0);
56228+ atomic_set_unchecked(&dev->bytes_sent, 0);
56229+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
56230
56231 return count;
56232 }
56233diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
56234index d32d1c4..46722e6 100644
56235--- a/drivers/video/fbdev/uvesafb.c
56236+++ b/drivers/video/fbdev/uvesafb.c
56237@@ -19,6 +19,7 @@
56238 #include <linux/io.h>
56239 #include <linux/mutex.h>
56240 #include <linux/slab.h>
56241+#include <linux/moduleloader.h>
56242 #include <video/edid.h>
56243 #include <video/uvesafb.h>
56244 #ifdef CONFIG_X86
56245@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
56246 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
56247 par->pmi_setpal = par->ypan = 0;
56248 } else {
56249+
56250+#ifdef CONFIG_PAX_KERNEXEC
56251+#ifdef CONFIG_MODULES
56252+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
56253+#endif
56254+ if (!par->pmi_code) {
56255+ par->pmi_setpal = par->ypan = 0;
56256+ return 0;
56257+ }
56258+#endif
56259+
56260 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
56261 + task->t.regs.edi);
56262+
56263+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56264+ pax_open_kernel();
56265+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
56266+ pax_close_kernel();
56267+
56268+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
56269+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
56270+#else
56271 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
56272 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
56273+#endif
56274+
56275 printk(KERN_INFO "uvesafb: protected mode interface info at "
56276 "%04x:%04x\n",
56277 (u16)task->t.regs.es, (u16)task->t.regs.edi);
56278@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
56279 par->ypan = ypan;
56280
56281 if (par->pmi_setpal || par->ypan) {
56282+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
56283 if (__supported_pte_mask & _PAGE_NX) {
56284 par->pmi_setpal = par->ypan = 0;
56285 printk(KERN_WARNING "uvesafb: NX protection is active, "
56286 "better not use the PMI.\n");
56287- } else {
56288+ } else
56289+#endif
56290 uvesafb_vbe_getpmi(task, par);
56291- }
56292 }
56293 #else
56294 /* The protected mode interface is not available on non-x86. */
56295@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56296 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
56297
56298 /* Disable blanking if the user requested so. */
56299- if (!blank)
56300- info->fbops->fb_blank = NULL;
56301+ if (!blank) {
56302+ pax_open_kernel();
56303+ *(void **)&info->fbops->fb_blank = NULL;
56304+ pax_close_kernel();
56305+ }
56306
56307 /*
56308 * Find out how much IO memory is required for the mode with
56309@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56310 info->flags = FBINFO_FLAG_DEFAULT |
56311 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
56312
56313- if (!par->ypan)
56314- info->fbops->fb_pan_display = NULL;
56315+ if (!par->ypan) {
56316+ pax_open_kernel();
56317+ *(void **)&info->fbops->fb_pan_display = NULL;
56318+ pax_close_kernel();
56319+ }
56320 }
56321
56322 static void uvesafb_init_mtrr(struct fb_info *info)
56323@@ -1786,6 +1816,11 @@ out_mode:
56324 out:
56325 kfree(par->vbe_modes);
56326
56327+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56328+ if (par->pmi_code)
56329+ module_memfree_exec(par->pmi_code);
56330+#endif
56331+
56332 framebuffer_release(info);
56333 return err;
56334 }
56335@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
56336 kfree(par->vbe_state_orig);
56337 kfree(par->vbe_state_saved);
56338
56339+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56340+ if (par->pmi_code)
56341+ module_memfree_exec(par->pmi_code);
56342+#endif
56343+
56344 framebuffer_release(info);
56345 }
56346 return 0;
56347diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
56348index d79a0ac..2d0c3d4 100644
56349--- a/drivers/video/fbdev/vesafb.c
56350+++ b/drivers/video/fbdev/vesafb.c
56351@@ -9,6 +9,7 @@
56352 */
56353
56354 #include <linux/module.h>
56355+#include <linux/moduleloader.h>
56356 #include <linux/kernel.h>
56357 #include <linux/errno.h>
56358 #include <linux/string.h>
56359@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
56360 static int vram_total; /* Set total amount of memory */
56361 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
56362 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
56363-static void (*pmi_start)(void) __read_mostly;
56364-static void (*pmi_pal) (void) __read_mostly;
56365+static void (*pmi_start)(void) __read_only;
56366+static void (*pmi_pal) (void) __read_only;
56367 static int depth __read_mostly;
56368 static int vga_compat __read_mostly;
56369 /* --------------------------------------------------------------------- */
56370@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
56371 unsigned int size_remap;
56372 unsigned int size_total;
56373 char *option = NULL;
56374+ void *pmi_code = NULL;
56375
56376 /* ignore error return of fb_get_options */
56377 fb_get_options("vesafb", &option);
56378@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
56379 size_remap = size_total;
56380 vesafb_fix.smem_len = size_remap;
56381
56382-#ifndef __i386__
56383- screen_info.vesapm_seg = 0;
56384-#endif
56385-
56386 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
56387 printk(KERN_WARNING
56388 "vesafb: cannot reserve video memory at 0x%lx\n",
56389@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
56390 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
56391 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
56392
56393+#ifdef __i386__
56394+
56395+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56396+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
56397+ if (!pmi_code)
56398+#elif !defined(CONFIG_PAX_KERNEXEC)
56399+ if (0)
56400+#endif
56401+
56402+#endif
56403+ screen_info.vesapm_seg = 0;
56404+
56405 if (screen_info.vesapm_seg) {
56406- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
56407- screen_info.vesapm_seg,screen_info.vesapm_off);
56408+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
56409+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
56410 }
56411
56412 if (screen_info.vesapm_seg < 0xc000)
56413@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
56414
56415 if (ypan || pmi_setpal) {
56416 unsigned short *pmi_base;
56417+
56418 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
56419- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
56420- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
56421+
56422+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56423+ pax_open_kernel();
56424+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
56425+#else
56426+ pmi_code = pmi_base;
56427+#endif
56428+
56429+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
56430+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
56431+
56432+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56433+ pmi_start = ktva_ktla(pmi_start);
56434+ pmi_pal = ktva_ktla(pmi_pal);
56435+ pax_close_kernel();
56436+#endif
56437+
56438 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
56439 if (pmi_base[3]) {
56440 printk(KERN_INFO "vesafb: pmi: ports = ");
56441@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
56442 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
56443 (ypan ? FBINFO_HWACCEL_YPAN : 0);
56444
56445- if (!ypan)
56446- info->fbops->fb_pan_display = NULL;
56447+ if (!ypan) {
56448+ pax_open_kernel();
56449+ *(void **)&info->fbops->fb_pan_display = NULL;
56450+ pax_close_kernel();
56451+ }
56452
56453 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
56454 err = -ENOMEM;
56455@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
56456 fb_info(info, "%s frame buffer device\n", info->fix.id);
56457 return 0;
56458 err:
56459+
56460+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56461+ module_memfree_exec(pmi_code);
56462+#endif
56463+
56464 if (info->screen_base)
56465 iounmap(info->screen_base);
56466 framebuffer_release(info);
56467diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
56468index 88714ae..16c2e11 100644
56469--- a/drivers/video/fbdev/via/via_clock.h
56470+++ b/drivers/video/fbdev/via/via_clock.h
56471@@ -56,7 +56,7 @@ struct via_clock {
56472
56473 void (*set_engine_pll_state)(u8 state);
56474 void (*set_engine_pll)(struct via_pll_config config);
56475-};
56476+} __no_const;
56477
56478
56479 static inline u32 get_pll_internal_frequency(u32 ref_freq,
56480diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
56481index 3c14e43..2630570 100644
56482--- a/drivers/video/logo/logo_linux_clut224.ppm
56483+++ b/drivers/video/logo/logo_linux_clut224.ppm
56484@@ -2,1603 +2,1123 @@ P3
56485 # Standard 224-color Linux logo
56486 80 80
56487 255
56488- 0 0 0 0 0 0 0 0 0 0 0 0
56489- 0 0 0 0 0 0 0 0 0 0 0 0
56490- 0 0 0 0 0 0 0 0 0 0 0 0
56491- 0 0 0 0 0 0 0 0 0 0 0 0
56492- 0 0 0 0 0 0 0 0 0 0 0 0
56493- 0 0 0 0 0 0 0 0 0 0 0 0
56494- 0 0 0 0 0 0 0 0 0 0 0 0
56495- 0 0 0 0 0 0 0 0 0 0 0 0
56496- 0 0 0 0 0 0 0 0 0 0 0 0
56497- 6 6 6 6 6 6 10 10 10 10 10 10
56498- 10 10 10 6 6 6 6 6 6 6 6 6
56499- 0 0 0 0 0 0 0 0 0 0 0 0
56500- 0 0 0 0 0 0 0 0 0 0 0 0
56501- 0 0 0 0 0 0 0 0 0 0 0 0
56502- 0 0 0 0 0 0 0 0 0 0 0 0
56503- 0 0 0 0 0 0 0 0 0 0 0 0
56504- 0 0 0 0 0 0 0 0 0 0 0 0
56505- 0 0 0 0 0 0 0 0 0 0 0 0
56506- 0 0 0 0 0 0 0 0 0 0 0 0
56507- 0 0 0 0 0 0 0 0 0 0 0 0
56508- 0 0 0 0 0 0 0 0 0 0 0 0
56509- 0 0 0 0 0 0 0 0 0 0 0 0
56510- 0 0 0 0 0 0 0 0 0 0 0 0
56511- 0 0 0 0 0 0 0 0 0 0 0 0
56512- 0 0 0 0 0 0 0 0 0 0 0 0
56513- 0 0 0 0 0 0 0 0 0 0 0 0
56514- 0 0 0 0 0 0 0 0 0 0 0 0
56515- 0 0 0 0 0 0 0 0 0 0 0 0
56516- 0 0 0 6 6 6 10 10 10 14 14 14
56517- 22 22 22 26 26 26 30 30 30 34 34 34
56518- 30 30 30 30 30 30 26 26 26 18 18 18
56519- 14 14 14 10 10 10 6 6 6 0 0 0
56520- 0 0 0 0 0 0 0 0 0 0 0 0
56521- 0 0 0 0 0 0 0 0 0 0 0 0
56522- 0 0 0 0 0 0 0 0 0 0 0 0
56523- 0 0 0 0 0 0 0 0 0 0 0 0
56524- 0 0 0 0 0 0 0 0 0 0 0 0
56525- 0 0 0 0 0 0 0 0 0 0 0 0
56526- 0 0 0 0 0 0 0 0 0 0 0 0
56527- 0 0 0 0 0 0 0 0 0 0 0 0
56528- 0 0 0 0 0 0 0 0 0 0 0 0
56529- 0 0 0 0 0 1 0 0 1 0 0 0
56530- 0 0 0 0 0 0 0 0 0 0 0 0
56531- 0 0 0 0 0 0 0 0 0 0 0 0
56532- 0 0 0 0 0 0 0 0 0 0 0 0
56533- 0 0 0 0 0 0 0 0 0 0 0 0
56534- 0 0 0 0 0 0 0 0 0 0 0 0
56535- 0 0 0 0 0 0 0 0 0 0 0 0
56536- 6 6 6 14 14 14 26 26 26 42 42 42
56537- 54 54 54 66 66 66 78 78 78 78 78 78
56538- 78 78 78 74 74 74 66 66 66 54 54 54
56539- 42 42 42 26 26 26 18 18 18 10 10 10
56540- 6 6 6 0 0 0 0 0 0 0 0 0
56541- 0 0 0 0 0 0 0 0 0 0 0 0
56542- 0 0 0 0 0 0 0 0 0 0 0 0
56543- 0 0 0 0 0 0 0 0 0 0 0 0
56544- 0 0 0 0 0 0 0 0 0 0 0 0
56545- 0 0 0 0 0 0 0 0 0 0 0 0
56546- 0 0 0 0 0 0 0 0 0 0 0 0
56547- 0 0 0 0 0 0 0 0 0 0 0 0
56548- 0 0 0 0 0 0 0 0 0 0 0 0
56549- 0 0 1 0 0 0 0 0 0 0 0 0
56550- 0 0 0 0 0 0 0 0 0 0 0 0
56551- 0 0 0 0 0 0 0 0 0 0 0 0
56552- 0 0 0 0 0 0 0 0 0 0 0 0
56553- 0 0 0 0 0 0 0 0 0 0 0 0
56554- 0 0 0 0 0 0 0 0 0 0 0 0
56555- 0 0 0 0 0 0 0 0 0 10 10 10
56556- 22 22 22 42 42 42 66 66 66 86 86 86
56557- 66 66 66 38 38 38 38 38 38 22 22 22
56558- 26 26 26 34 34 34 54 54 54 66 66 66
56559- 86 86 86 70 70 70 46 46 46 26 26 26
56560- 14 14 14 6 6 6 0 0 0 0 0 0
56561- 0 0 0 0 0 0 0 0 0 0 0 0
56562- 0 0 0 0 0 0 0 0 0 0 0 0
56563- 0 0 0 0 0 0 0 0 0 0 0 0
56564- 0 0 0 0 0 0 0 0 0 0 0 0
56565- 0 0 0 0 0 0 0 0 0 0 0 0
56566- 0 0 0 0 0 0 0 0 0 0 0 0
56567- 0 0 0 0 0 0 0 0 0 0 0 0
56568- 0 0 0 0 0 0 0 0 0 0 0 0
56569- 0 0 1 0 0 1 0 0 1 0 0 0
56570- 0 0 0 0 0 0 0 0 0 0 0 0
56571- 0 0 0 0 0 0 0 0 0 0 0 0
56572- 0 0 0 0 0 0 0 0 0 0 0 0
56573- 0 0 0 0 0 0 0 0 0 0 0 0
56574- 0 0 0 0 0 0 0 0 0 0 0 0
56575- 0 0 0 0 0 0 10 10 10 26 26 26
56576- 50 50 50 82 82 82 58 58 58 6 6 6
56577- 2 2 6 2 2 6 2 2 6 2 2 6
56578- 2 2 6 2 2 6 2 2 6 2 2 6
56579- 6 6 6 54 54 54 86 86 86 66 66 66
56580- 38 38 38 18 18 18 6 6 6 0 0 0
56581- 0 0 0 0 0 0 0 0 0 0 0 0
56582- 0 0 0 0 0 0 0 0 0 0 0 0
56583- 0 0 0 0 0 0 0 0 0 0 0 0
56584- 0 0 0 0 0 0 0 0 0 0 0 0
56585- 0 0 0 0 0 0 0 0 0 0 0 0
56586- 0 0 0 0 0 0 0 0 0 0 0 0
56587- 0 0 0 0 0 0 0 0 0 0 0 0
56588- 0 0 0 0 0 0 0 0 0 0 0 0
56589- 0 0 0 0 0 0 0 0 0 0 0 0
56590- 0 0 0 0 0 0 0 0 0 0 0 0
56591- 0 0 0 0 0 0 0 0 0 0 0 0
56592- 0 0 0 0 0 0 0 0 0 0 0 0
56593- 0 0 0 0 0 0 0 0 0 0 0 0
56594- 0 0 0 0 0 0 0 0 0 0 0 0
56595- 0 0 0 6 6 6 22 22 22 50 50 50
56596- 78 78 78 34 34 34 2 2 6 2 2 6
56597- 2 2 6 2 2 6 2 2 6 2 2 6
56598- 2 2 6 2 2 6 2 2 6 2 2 6
56599- 2 2 6 2 2 6 6 6 6 70 70 70
56600- 78 78 78 46 46 46 22 22 22 6 6 6
56601- 0 0 0 0 0 0 0 0 0 0 0 0
56602- 0 0 0 0 0 0 0 0 0 0 0 0
56603- 0 0 0 0 0 0 0 0 0 0 0 0
56604- 0 0 0 0 0 0 0 0 0 0 0 0
56605- 0 0 0 0 0 0 0 0 0 0 0 0
56606- 0 0 0 0 0 0 0 0 0 0 0 0
56607- 0 0 0 0 0 0 0 0 0 0 0 0
56608- 0 0 0 0 0 0 0 0 0 0 0 0
56609- 0 0 1 0 0 1 0 0 1 0 0 0
56610- 0 0 0 0 0 0 0 0 0 0 0 0
56611- 0 0 0 0 0 0 0 0 0 0 0 0
56612- 0 0 0 0 0 0 0 0 0 0 0 0
56613- 0 0 0 0 0 0 0 0 0 0 0 0
56614- 0 0 0 0 0 0 0 0 0 0 0 0
56615- 6 6 6 18 18 18 42 42 42 82 82 82
56616- 26 26 26 2 2 6 2 2 6 2 2 6
56617- 2 2 6 2 2 6 2 2 6 2 2 6
56618- 2 2 6 2 2 6 2 2 6 14 14 14
56619- 46 46 46 34 34 34 6 6 6 2 2 6
56620- 42 42 42 78 78 78 42 42 42 18 18 18
56621- 6 6 6 0 0 0 0 0 0 0 0 0
56622- 0 0 0 0 0 0 0 0 0 0 0 0
56623- 0 0 0 0 0 0 0 0 0 0 0 0
56624- 0 0 0 0 0 0 0 0 0 0 0 0
56625- 0 0 0 0 0 0 0 0 0 0 0 0
56626- 0 0 0 0 0 0 0 0 0 0 0 0
56627- 0 0 0 0 0 0 0 0 0 0 0 0
56628- 0 0 0 0 0 0 0 0 0 0 0 0
56629- 0 0 1 0 0 0 0 0 1 0 0 0
56630- 0 0 0 0 0 0 0 0 0 0 0 0
56631- 0 0 0 0 0 0 0 0 0 0 0 0
56632- 0 0 0 0 0 0 0 0 0 0 0 0
56633- 0 0 0 0 0 0 0 0 0 0 0 0
56634- 0 0 0 0 0 0 0 0 0 0 0 0
56635- 10 10 10 30 30 30 66 66 66 58 58 58
56636- 2 2 6 2 2 6 2 2 6 2 2 6
56637- 2 2 6 2 2 6 2 2 6 2 2 6
56638- 2 2 6 2 2 6 2 2 6 26 26 26
56639- 86 86 86 101 101 101 46 46 46 10 10 10
56640- 2 2 6 58 58 58 70 70 70 34 34 34
56641- 10 10 10 0 0 0 0 0 0 0 0 0
56642- 0 0 0 0 0 0 0 0 0 0 0 0
56643- 0 0 0 0 0 0 0 0 0 0 0 0
56644- 0 0 0 0 0 0 0 0 0 0 0 0
56645- 0 0 0 0 0 0 0 0 0 0 0 0
56646- 0 0 0 0 0 0 0 0 0 0 0 0
56647- 0 0 0 0 0 0 0 0 0 0 0 0
56648- 0 0 0 0 0 0 0 0 0 0 0 0
56649- 0 0 1 0 0 1 0 0 1 0 0 0
56650- 0 0 0 0 0 0 0 0 0 0 0 0
56651- 0 0 0 0 0 0 0 0 0 0 0 0
56652- 0 0 0 0 0 0 0 0 0 0 0 0
56653- 0 0 0 0 0 0 0 0 0 0 0 0
56654- 0 0 0 0 0 0 0 0 0 0 0 0
56655- 14 14 14 42 42 42 86 86 86 10 10 10
56656- 2 2 6 2 2 6 2 2 6 2 2 6
56657- 2 2 6 2 2 6 2 2 6 2 2 6
56658- 2 2 6 2 2 6 2 2 6 30 30 30
56659- 94 94 94 94 94 94 58 58 58 26 26 26
56660- 2 2 6 6 6 6 78 78 78 54 54 54
56661- 22 22 22 6 6 6 0 0 0 0 0 0
56662- 0 0 0 0 0 0 0 0 0 0 0 0
56663- 0 0 0 0 0 0 0 0 0 0 0 0
56664- 0 0 0 0 0 0 0 0 0 0 0 0
56665- 0 0 0 0 0 0 0 0 0 0 0 0
56666- 0 0 0 0 0 0 0 0 0 0 0 0
56667- 0 0 0 0 0 0 0 0 0 0 0 0
56668- 0 0 0 0 0 0 0 0 0 0 0 0
56669- 0 0 0 0 0 0 0 0 0 0 0 0
56670- 0 0 0 0 0 0 0 0 0 0 0 0
56671- 0 0 0 0 0 0 0 0 0 0 0 0
56672- 0 0 0 0 0 0 0 0 0 0 0 0
56673- 0 0 0 0 0 0 0 0 0 0 0 0
56674- 0 0 0 0 0 0 0 0 0 6 6 6
56675- 22 22 22 62 62 62 62 62 62 2 2 6
56676- 2 2 6 2 2 6 2 2 6 2 2 6
56677- 2 2 6 2 2 6 2 2 6 2 2 6
56678- 2 2 6 2 2 6 2 2 6 26 26 26
56679- 54 54 54 38 38 38 18 18 18 10 10 10
56680- 2 2 6 2 2 6 34 34 34 82 82 82
56681- 38 38 38 14 14 14 0 0 0 0 0 0
56682- 0 0 0 0 0 0 0 0 0 0 0 0
56683- 0 0 0 0 0 0 0 0 0 0 0 0
56684- 0 0 0 0 0 0 0 0 0 0 0 0
56685- 0 0 0 0 0 0 0 0 0 0 0 0
56686- 0 0 0 0 0 0 0 0 0 0 0 0
56687- 0 0 0 0 0 0 0 0 0 0 0 0
56688- 0 0 0 0 0 0 0 0 0 0 0 0
56689- 0 0 0 0 0 1 0 0 1 0 0 0
56690- 0 0 0 0 0 0 0 0 0 0 0 0
56691- 0 0 0 0 0 0 0 0 0 0 0 0
56692- 0 0 0 0 0 0 0 0 0 0 0 0
56693- 0 0 0 0 0 0 0 0 0 0 0 0
56694- 0 0 0 0 0 0 0 0 0 6 6 6
56695- 30 30 30 78 78 78 30 30 30 2 2 6
56696- 2 2 6 2 2 6 2 2 6 2 2 6
56697- 2 2 6 2 2 6 2 2 6 2 2 6
56698- 2 2 6 2 2 6 2 2 6 10 10 10
56699- 10 10 10 2 2 6 2 2 6 2 2 6
56700- 2 2 6 2 2 6 2 2 6 78 78 78
56701- 50 50 50 18 18 18 6 6 6 0 0 0
56702- 0 0 0 0 0 0 0 0 0 0 0 0
56703- 0 0 0 0 0 0 0 0 0 0 0 0
56704- 0 0 0 0 0 0 0 0 0 0 0 0
56705- 0 0 0 0 0 0 0 0 0 0 0 0
56706- 0 0 0 0 0 0 0 0 0 0 0 0
56707- 0 0 0 0 0 0 0 0 0 0 0 0
56708- 0 0 0 0 0 0 0 0 0 0 0 0
56709- 0 0 1 0 0 0 0 0 0 0 0 0
56710- 0 0 0 0 0 0 0 0 0 0 0 0
56711- 0 0 0 0 0 0 0 0 0 0 0 0
56712- 0 0 0 0 0 0 0 0 0 0 0 0
56713- 0 0 0 0 0 0 0 0 0 0 0 0
56714- 0 0 0 0 0 0 0 0 0 10 10 10
56715- 38 38 38 86 86 86 14 14 14 2 2 6
56716- 2 2 6 2 2 6 2 2 6 2 2 6
56717- 2 2 6 2 2 6 2 2 6 2 2 6
56718- 2 2 6 2 2 6 2 2 6 2 2 6
56719- 2 2 6 2 2 6 2 2 6 2 2 6
56720- 2 2 6 2 2 6 2 2 6 54 54 54
56721- 66 66 66 26 26 26 6 6 6 0 0 0
56722- 0 0 0 0 0 0 0 0 0 0 0 0
56723- 0 0 0 0 0 0 0 0 0 0 0 0
56724- 0 0 0 0 0 0 0 0 0 0 0 0
56725- 0 0 0 0 0 0 0 0 0 0 0 0
56726- 0 0 0 0 0 0 0 0 0 0 0 0
56727- 0 0 0 0 0 0 0 0 0 0 0 0
56728- 0 0 0 0 0 0 0 0 0 0 0 0
56729- 0 0 0 0 0 1 0 0 1 0 0 0
56730- 0 0 0 0 0 0 0 0 0 0 0 0
56731- 0 0 0 0 0 0 0 0 0 0 0 0
56732- 0 0 0 0 0 0 0 0 0 0 0 0
56733- 0 0 0 0 0 0 0 0 0 0 0 0
56734- 0 0 0 0 0 0 0 0 0 14 14 14
56735- 42 42 42 82 82 82 2 2 6 2 2 6
56736- 2 2 6 6 6 6 10 10 10 2 2 6
56737- 2 2 6 2 2 6 2 2 6 2 2 6
56738- 2 2 6 2 2 6 2 2 6 6 6 6
56739- 14 14 14 10 10 10 2 2 6 2 2 6
56740- 2 2 6 2 2 6 2 2 6 18 18 18
56741- 82 82 82 34 34 34 10 10 10 0 0 0
56742- 0 0 0 0 0 0 0 0 0 0 0 0
56743- 0 0 0 0 0 0 0 0 0 0 0 0
56744- 0 0 0 0 0 0 0 0 0 0 0 0
56745- 0 0 0 0 0 0 0 0 0 0 0 0
56746- 0 0 0 0 0 0 0 0 0 0 0 0
56747- 0 0 0 0 0 0 0 0 0 0 0 0
56748- 0 0 0 0 0 0 0 0 0 0 0 0
56749- 0 0 1 0 0 0 0 0 0 0 0 0
56750- 0 0 0 0 0 0 0 0 0 0 0 0
56751- 0 0 0 0 0 0 0 0 0 0 0 0
56752- 0 0 0 0 0 0 0 0 0 0 0 0
56753- 0 0 0 0 0 0 0 0 0 0 0 0
56754- 0 0 0 0 0 0 0 0 0 14 14 14
56755- 46 46 46 86 86 86 2 2 6 2 2 6
56756- 6 6 6 6 6 6 22 22 22 34 34 34
56757- 6 6 6 2 2 6 2 2 6 2 2 6
56758- 2 2 6 2 2 6 18 18 18 34 34 34
56759- 10 10 10 50 50 50 22 22 22 2 2 6
56760- 2 2 6 2 2 6 2 2 6 10 10 10
56761- 86 86 86 42 42 42 14 14 14 0 0 0
56762- 0 0 0 0 0 0 0 0 0 0 0 0
56763- 0 0 0 0 0 0 0 0 0 0 0 0
56764- 0 0 0 0 0 0 0 0 0 0 0 0
56765- 0 0 0 0 0 0 0 0 0 0 0 0
56766- 0 0 0 0 0 0 0 0 0 0 0 0
56767- 0 0 0 0 0 0 0 0 0 0 0 0
56768- 0 0 0 0 0 0 0 0 0 0 0 0
56769- 0 0 1 0 0 1 0 0 1 0 0 0
56770- 0 0 0 0 0 0 0 0 0 0 0 0
56771- 0 0 0 0 0 0 0 0 0 0 0 0
56772- 0 0 0 0 0 0 0 0 0 0 0 0
56773- 0 0 0 0 0 0 0 0 0 0 0 0
56774- 0 0 0 0 0 0 0 0 0 14 14 14
56775- 46 46 46 86 86 86 2 2 6 2 2 6
56776- 38 38 38 116 116 116 94 94 94 22 22 22
56777- 22 22 22 2 2 6 2 2 6 2 2 6
56778- 14 14 14 86 86 86 138 138 138 162 162 162
56779-154 154 154 38 38 38 26 26 26 6 6 6
56780- 2 2 6 2 2 6 2 2 6 2 2 6
56781- 86 86 86 46 46 46 14 14 14 0 0 0
56782- 0 0 0 0 0 0 0 0 0 0 0 0
56783- 0 0 0 0 0 0 0 0 0 0 0 0
56784- 0 0 0 0 0 0 0 0 0 0 0 0
56785- 0 0 0 0 0 0 0 0 0 0 0 0
56786- 0 0 0 0 0 0 0 0 0 0 0 0
56787- 0 0 0 0 0 0 0 0 0 0 0 0
56788- 0 0 0 0 0 0 0 0 0 0 0 0
56789- 0 0 0 0 0 0 0 0 0 0 0 0
56790- 0 0 0 0 0 0 0 0 0 0 0 0
56791- 0 0 0 0 0 0 0 0 0 0 0 0
56792- 0 0 0 0 0 0 0 0 0 0 0 0
56793- 0 0 0 0 0 0 0 0 0 0 0 0
56794- 0 0 0 0 0 0 0 0 0 14 14 14
56795- 46 46 46 86 86 86 2 2 6 14 14 14
56796-134 134 134 198 198 198 195 195 195 116 116 116
56797- 10 10 10 2 2 6 2 2 6 6 6 6
56798-101 98 89 187 187 187 210 210 210 218 218 218
56799-214 214 214 134 134 134 14 14 14 6 6 6
56800- 2 2 6 2 2 6 2 2 6 2 2 6
56801- 86 86 86 50 50 50 18 18 18 6 6 6
56802- 0 0 0 0 0 0 0 0 0 0 0 0
56803- 0 0 0 0 0 0 0 0 0 0 0 0
56804- 0 0 0 0 0 0 0 0 0 0 0 0
56805- 0 0 0 0 0 0 0 0 0 0 0 0
56806- 0 0 0 0 0 0 0 0 0 0 0 0
56807- 0 0 0 0 0 0 0 0 0 0 0 0
56808- 0 0 0 0 0 0 0 0 1 0 0 0
56809- 0 0 1 0 0 1 0 0 1 0 0 0
56810- 0 0 0 0 0 0 0 0 0 0 0 0
56811- 0 0 0 0 0 0 0 0 0 0 0 0
56812- 0 0 0 0 0 0 0 0 0 0 0 0
56813- 0 0 0 0 0 0 0 0 0 0 0 0
56814- 0 0 0 0 0 0 0 0 0 14 14 14
56815- 46 46 46 86 86 86 2 2 6 54 54 54
56816-218 218 218 195 195 195 226 226 226 246 246 246
56817- 58 58 58 2 2 6 2 2 6 30 30 30
56818-210 210 210 253 253 253 174 174 174 123 123 123
56819-221 221 221 234 234 234 74 74 74 2 2 6
56820- 2 2 6 2 2 6 2 2 6 2 2 6
56821- 70 70 70 58 58 58 22 22 22 6 6 6
56822- 0 0 0 0 0 0 0 0 0 0 0 0
56823- 0 0 0 0 0 0 0 0 0 0 0 0
56824- 0 0 0 0 0 0 0 0 0 0 0 0
56825- 0 0 0 0 0 0 0 0 0 0 0 0
56826- 0 0 0 0 0 0 0 0 0 0 0 0
56827- 0 0 0 0 0 0 0 0 0 0 0 0
56828- 0 0 0 0 0 0 0 0 0 0 0 0
56829- 0 0 0 0 0 0 0 0 0 0 0 0
56830- 0 0 0 0 0 0 0 0 0 0 0 0
56831- 0 0 0 0 0 0 0 0 0 0 0 0
56832- 0 0 0 0 0 0 0 0 0 0 0 0
56833- 0 0 0 0 0 0 0 0 0 0 0 0
56834- 0 0 0 0 0 0 0 0 0 14 14 14
56835- 46 46 46 82 82 82 2 2 6 106 106 106
56836-170 170 170 26 26 26 86 86 86 226 226 226
56837-123 123 123 10 10 10 14 14 14 46 46 46
56838-231 231 231 190 190 190 6 6 6 70 70 70
56839- 90 90 90 238 238 238 158 158 158 2 2 6
56840- 2 2 6 2 2 6 2 2 6 2 2 6
56841- 70 70 70 58 58 58 22 22 22 6 6 6
56842- 0 0 0 0 0 0 0 0 0 0 0 0
56843- 0 0 0 0 0 0 0 0 0 0 0 0
56844- 0 0 0 0 0 0 0 0 0 0 0 0
56845- 0 0 0 0 0 0 0 0 0 0 0 0
56846- 0 0 0 0 0 0 0 0 0 0 0 0
56847- 0 0 0 0 0 0 0 0 0 0 0 0
56848- 0 0 0 0 0 0 0 0 1 0 0 0
56849- 0 0 1 0 0 1 0 0 1 0 0 0
56850- 0 0 0 0 0 0 0 0 0 0 0 0
56851- 0 0 0 0 0 0 0 0 0 0 0 0
56852- 0 0 0 0 0 0 0 0 0 0 0 0
56853- 0 0 0 0 0 0 0 0 0 0 0 0
56854- 0 0 0 0 0 0 0 0 0 14 14 14
56855- 42 42 42 86 86 86 6 6 6 116 116 116
56856-106 106 106 6 6 6 70 70 70 149 149 149
56857-128 128 128 18 18 18 38 38 38 54 54 54
56858-221 221 221 106 106 106 2 2 6 14 14 14
56859- 46 46 46 190 190 190 198 198 198 2 2 6
56860- 2 2 6 2 2 6 2 2 6 2 2 6
56861- 74 74 74 62 62 62 22 22 22 6 6 6
56862- 0 0 0 0 0 0 0 0 0 0 0 0
56863- 0 0 0 0 0 0 0 0 0 0 0 0
56864- 0 0 0 0 0 0 0 0 0 0 0 0
56865- 0 0 0 0 0 0 0 0 0 0 0 0
56866- 0 0 0 0 0 0 0 0 0 0 0 0
56867- 0 0 0 0 0 0 0 0 0 0 0 0
56868- 0 0 0 0 0 0 0 0 1 0 0 0
56869- 0 0 1 0 0 0 0 0 1 0 0 0
56870- 0 0 0 0 0 0 0 0 0 0 0 0
56871- 0 0 0 0 0 0 0 0 0 0 0 0
56872- 0 0 0 0 0 0 0 0 0 0 0 0
56873- 0 0 0 0 0 0 0 0 0 0 0 0
56874- 0 0 0 0 0 0 0 0 0 14 14 14
56875- 42 42 42 94 94 94 14 14 14 101 101 101
56876-128 128 128 2 2 6 18 18 18 116 116 116
56877-118 98 46 121 92 8 121 92 8 98 78 10
56878-162 162 162 106 106 106 2 2 6 2 2 6
56879- 2 2 6 195 195 195 195 195 195 6 6 6
56880- 2 2 6 2 2 6 2 2 6 2 2 6
56881- 74 74 74 62 62 62 22 22 22 6 6 6
56882- 0 0 0 0 0 0 0 0 0 0 0 0
56883- 0 0 0 0 0 0 0 0 0 0 0 0
56884- 0 0 0 0 0 0 0 0 0 0 0 0
56885- 0 0 0 0 0 0 0 0 0 0 0 0
56886- 0 0 0 0 0 0 0 0 0 0 0 0
56887- 0 0 0 0 0 0 0 0 0 0 0 0
56888- 0 0 0 0 0 0 0 0 1 0 0 1
56889- 0 0 1 0 0 0 0 0 1 0 0 0
56890- 0 0 0 0 0 0 0 0 0 0 0 0
56891- 0 0 0 0 0 0 0 0 0 0 0 0
56892- 0 0 0 0 0 0 0 0 0 0 0 0
56893- 0 0 0 0 0 0 0 0 0 0 0 0
56894- 0 0 0 0 0 0 0 0 0 10 10 10
56895- 38 38 38 90 90 90 14 14 14 58 58 58
56896-210 210 210 26 26 26 54 38 6 154 114 10
56897-226 170 11 236 186 11 225 175 15 184 144 12
56898-215 174 15 175 146 61 37 26 9 2 2 6
56899- 70 70 70 246 246 246 138 138 138 2 2 6
56900- 2 2 6 2 2 6 2 2 6 2 2 6
56901- 70 70 70 66 66 66 26 26 26 6 6 6
56902- 0 0 0 0 0 0 0 0 0 0 0 0
56903- 0 0 0 0 0 0 0 0 0 0 0 0
56904- 0 0 0 0 0 0 0 0 0 0 0 0
56905- 0 0 0 0 0 0 0 0 0 0 0 0
56906- 0 0 0 0 0 0 0 0 0 0 0 0
56907- 0 0 0 0 0 0 0 0 0 0 0 0
56908- 0 0 0 0 0 0 0 0 0 0 0 0
56909- 0 0 0 0 0 0 0 0 0 0 0 0
56910- 0 0 0 0 0 0 0 0 0 0 0 0
56911- 0 0 0 0 0 0 0 0 0 0 0 0
56912- 0 0 0 0 0 0 0 0 0 0 0 0
56913- 0 0 0 0 0 0 0 0 0 0 0 0
56914- 0 0 0 0 0 0 0 0 0 10 10 10
56915- 38 38 38 86 86 86 14 14 14 10 10 10
56916-195 195 195 188 164 115 192 133 9 225 175 15
56917-239 182 13 234 190 10 232 195 16 232 200 30
56918-245 207 45 241 208 19 232 195 16 184 144 12
56919-218 194 134 211 206 186 42 42 42 2 2 6
56920- 2 2 6 2 2 6 2 2 6 2 2 6
56921- 50 50 50 74 74 74 30 30 30 6 6 6
56922- 0 0 0 0 0 0 0 0 0 0 0 0
56923- 0 0 0 0 0 0 0 0 0 0 0 0
56924- 0 0 0 0 0 0 0 0 0 0 0 0
56925- 0 0 0 0 0 0 0 0 0 0 0 0
56926- 0 0 0 0 0 0 0 0 0 0 0 0
56927- 0 0 0 0 0 0 0 0 0 0 0 0
56928- 0 0 0 0 0 0 0 0 0 0 0 0
56929- 0 0 0 0 0 0 0 0 0 0 0 0
56930- 0 0 0 0 0 0 0 0 0 0 0 0
56931- 0 0 0 0 0 0 0 0 0 0 0 0
56932- 0 0 0 0 0 0 0 0 0 0 0 0
56933- 0 0 0 0 0 0 0 0 0 0 0 0
56934- 0 0 0 0 0 0 0 0 0 10 10 10
56935- 34 34 34 86 86 86 14 14 14 2 2 6
56936-121 87 25 192 133 9 219 162 10 239 182 13
56937-236 186 11 232 195 16 241 208 19 244 214 54
56938-246 218 60 246 218 38 246 215 20 241 208 19
56939-241 208 19 226 184 13 121 87 25 2 2 6
56940- 2 2 6 2 2 6 2 2 6 2 2 6
56941- 50 50 50 82 82 82 34 34 34 10 10 10
56942- 0 0 0 0 0 0 0 0 0 0 0 0
56943- 0 0 0 0 0 0 0 0 0 0 0 0
56944- 0 0 0 0 0 0 0 0 0 0 0 0
56945- 0 0 0 0 0 0 0 0 0 0 0 0
56946- 0 0 0 0 0 0 0 0 0 0 0 0
56947- 0 0 0 0 0 0 0 0 0 0 0 0
56948- 0 0 0 0 0 0 0 0 0 0 0 0
56949- 0 0 0 0 0 0 0 0 0 0 0 0
56950- 0 0 0 0 0 0 0 0 0 0 0 0
56951- 0 0 0 0 0 0 0 0 0 0 0 0
56952- 0 0 0 0 0 0 0 0 0 0 0 0
56953- 0 0 0 0 0 0 0 0 0 0 0 0
56954- 0 0 0 0 0 0 0 0 0 10 10 10
56955- 34 34 34 82 82 82 30 30 30 61 42 6
56956-180 123 7 206 145 10 230 174 11 239 182 13
56957-234 190 10 238 202 15 241 208 19 246 218 74
56958-246 218 38 246 215 20 246 215 20 246 215 20
56959-226 184 13 215 174 15 184 144 12 6 6 6
56960- 2 2 6 2 2 6 2 2 6 2 2 6
56961- 26 26 26 94 94 94 42 42 42 14 14 14
56962- 0 0 0 0 0 0 0 0 0 0 0 0
56963- 0 0 0 0 0 0 0 0 0 0 0 0
56964- 0 0 0 0 0 0 0 0 0 0 0 0
56965- 0 0 0 0 0 0 0 0 0 0 0 0
56966- 0 0 0 0 0 0 0 0 0 0 0 0
56967- 0 0 0 0 0 0 0 0 0 0 0 0
56968- 0 0 0 0 0 0 0 0 0 0 0 0
56969- 0 0 0 0 0 0 0 0 0 0 0 0
56970- 0 0 0 0 0 0 0 0 0 0 0 0
56971- 0 0 0 0 0 0 0 0 0 0 0 0
56972- 0 0 0 0 0 0 0 0 0 0 0 0
56973- 0 0 0 0 0 0 0 0 0 0 0 0
56974- 0 0 0 0 0 0 0 0 0 10 10 10
56975- 30 30 30 78 78 78 50 50 50 104 69 6
56976-192 133 9 216 158 10 236 178 12 236 186 11
56977-232 195 16 241 208 19 244 214 54 245 215 43
56978-246 215 20 246 215 20 241 208 19 198 155 10
56979-200 144 11 216 158 10 156 118 10 2 2 6
56980- 2 2 6 2 2 6 2 2 6 2 2 6
56981- 6 6 6 90 90 90 54 54 54 18 18 18
56982- 6 6 6 0 0 0 0 0 0 0 0 0
56983- 0 0 0 0 0 0 0 0 0 0 0 0
56984- 0 0 0 0 0 0 0 0 0 0 0 0
56985- 0 0 0 0 0 0 0 0 0 0 0 0
56986- 0 0 0 0 0 0 0 0 0 0 0 0
56987- 0 0 0 0 0 0 0 0 0 0 0 0
56988- 0 0 0 0 0 0 0 0 0 0 0 0
56989- 0 0 0 0 0 0 0 0 0 0 0 0
56990- 0 0 0 0 0 0 0 0 0 0 0 0
56991- 0 0 0 0 0 0 0 0 0 0 0 0
56992- 0 0 0 0 0 0 0 0 0 0 0 0
56993- 0 0 0 0 0 0 0 0 0 0 0 0
56994- 0 0 0 0 0 0 0 0 0 10 10 10
56995- 30 30 30 78 78 78 46 46 46 22 22 22
56996-137 92 6 210 162 10 239 182 13 238 190 10
56997-238 202 15 241 208 19 246 215 20 246 215 20
56998-241 208 19 203 166 17 185 133 11 210 150 10
56999-216 158 10 210 150 10 102 78 10 2 2 6
57000- 6 6 6 54 54 54 14 14 14 2 2 6
57001- 2 2 6 62 62 62 74 74 74 30 30 30
57002- 10 10 10 0 0 0 0 0 0 0 0 0
57003- 0 0 0 0 0 0 0 0 0 0 0 0
57004- 0 0 0 0 0 0 0 0 0 0 0 0
57005- 0 0 0 0 0 0 0 0 0 0 0 0
57006- 0 0 0 0 0 0 0 0 0 0 0 0
57007- 0 0 0 0 0 0 0 0 0 0 0 0
57008- 0 0 0 0 0 0 0 0 0 0 0 0
57009- 0 0 0 0 0 0 0 0 0 0 0 0
57010- 0 0 0 0 0 0 0 0 0 0 0 0
57011- 0 0 0 0 0 0 0 0 0 0 0 0
57012- 0 0 0 0 0 0 0 0 0 0 0 0
57013- 0 0 0 0 0 0 0 0 0 0 0 0
57014- 0 0 0 0 0 0 0 0 0 10 10 10
57015- 34 34 34 78 78 78 50 50 50 6 6 6
57016- 94 70 30 139 102 15 190 146 13 226 184 13
57017-232 200 30 232 195 16 215 174 15 190 146 13
57018-168 122 10 192 133 9 210 150 10 213 154 11
57019-202 150 34 182 157 106 101 98 89 2 2 6
57020- 2 2 6 78 78 78 116 116 116 58 58 58
57021- 2 2 6 22 22 22 90 90 90 46 46 46
57022- 18 18 18 6 6 6 0 0 0 0 0 0
57023- 0 0 0 0 0 0 0 0 0 0 0 0
57024- 0 0 0 0 0 0 0 0 0 0 0 0
57025- 0 0 0 0 0 0 0 0 0 0 0 0
57026- 0 0 0 0 0 0 0 0 0 0 0 0
57027- 0 0 0 0 0 0 0 0 0 0 0 0
57028- 0 0 0 0 0 0 0 0 0 0 0 0
57029- 0 0 0 0 0 0 0 0 0 0 0 0
57030- 0 0 0 0 0 0 0 0 0 0 0 0
57031- 0 0 0 0 0 0 0 0 0 0 0 0
57032- 0 0 0 0 0 0 0 0 0 0 0 0
57033- 0 0 0 0 0 0 0 0 0 0 0 0
57034- 0 0 0 0 0 0 0 0 0 10 10 10
57035- 38 38 38 86 86 86 50 50 50 6 6 6
57036-128 128 128 174 154 114 156 107 11 168 122 10
57037-198 155 10 184 144 12 197 138 11 200 144 11
57038-206 145 10 206 145 10 197 138 11 188 164 115
57039-195 195 195 198 198 198 174 174 174 14 14 14
57040- 2 2 6 22 22 22 116 116 116 116 116 116
57041- 22 22 22 2 2 6 74 74 74 70 70 70
57042- 30 30 30 10 10 10 0 0 0 0 0 0
57043- 0 0 0 0 0 0 0 0 0 0 0 0
57044- 0 0 0 0 0 0 0 0 0 0 0 0
57045- 0 0 0 0 0 0 0 0 0 0 0 0
57046- 0 0 0 0 0 0 0 0 0 0 0 0
57047- 0 0 0 0 0 0 0 0 0 0 0 0
57048- 0 0 0 0 0 0 0 0 0 0 0 0
57049- 0 0 0 0 0 0 0 0 0 0 0 0
57050- 0 0 0 0 0 0 0 0 0 0 0 0
57051- 0 0 0 0 0 0 0 0 0 0 0 0
57052- 0 0 0 0 0 0 0 0 0 0 0 0
57053- 0 0 0 0 0 0 0 0 0 0 0 0
57054- 0 0 0 0 0 0 6 6 6 18 18 18
57055- 50 50 50 101 101 101 26 26 26 10 10 10
57056-138 138 138 190 190 190 174 154 114 156 107 11
57057-197 138 11 200 144 11 197 138 11 192 133 9
57058-180 123 7 190 142 34 190 178 144 187 187 187
57059-202 202 202 221 221 221 214 214 214 66 66 66
57060- 2 2 6 2 2 6 50 50 50 62 62 62
57061- 6 6 6 2 2 6 10 10 10 90 90 90
57062- 50 50 50 18 18 18 6 6 6 0 0 0
57063- 0 0 0 0 0 0 0 0 0 0 0 0
57064- 0 0 0 0 0 0 0 0 0 0 0 0
57065- 0 0 0 0 0 0 0 0 0 0 0 0
57066- 0 0 0 0 0 0 0 0 0 0 0 0
57067- 0 0 0 0 0 0 0 0 0 0 0 0
57068- 0 0 0 0 0 0 0 0 0 0 0 0
57069- 0 0 0 0 0 0 0 0 0 0 0 0
57070- 0 0 0 0 0 0 0 0 0 0 0 0
57071- 0 0 0 0 0 0 0 0 0 0 0 0
57072- 0 0 0 0 0 0 0 0 0 0 0 0
57073- 0 0 0 0 0 0 0 0 0 0 0 0
57074- 0 0 0 0 0 0 10 10 10 34 34 34
57075- 74 74 74 74 74 74 2 2 6 6 6 6
57076-144 144 144 198 198 198 190 190 190 178 166 146
57077-154 121 60 156 107 11 156 107 11 168 124 44
57078-174 154 114 187 187 187 190 190 190 210 210 210
57079-246 246 246 253 253 253 253 253 253 182 182 182
57080- 6 6 6 2 2 6 2 2 6 2 2 6
57081- 2 2 6 2 2 6 2 2 6 62 62 62
57082- 74 74 74 34 34 34 14 14 14 0 0 0
57083- 0 0 0 0 0 0 0 0 0 0 0 0
57084- 0 0 0 0 0 0 0 0 0 0 0 0
57085- 0 0 0 0 0 0 0 0 0 0 0 0
57086- 0 0 0 0 0 0 0 0 0 0 0 0
57087- 0 0 0 0 0 0 0 0 0 0 0 0
57088- 0 0 0 0 0 0 0 0 0 0 0 0
57089- 0 0 0 0 0 0 0 0 0 0 0 0
57090- 0 0 0 0 0 0 0 0 0 0 0 0
57091- 0 0 0 0 0 0 0 0 0 0 0 0
57092- 0 0 0 0 0 0 0 0 0 0 0 0
57093- 0 0 0 0 0 0 0 0 0 0 0 0
57094- 0 0 0 10 10 10 22 22 22 54 54 54
57095- 94 94 94 18 18 18 2 2 6 46 46 46
57096-234 234 234 221 221 221 190 190 190 190 190 190
57097-190 190 190 187 187 187 187 187 187 190 190 190
57098-190 190 190 195 195 195 214 214 214 242 242 242
57099-253 253 253 253 253 253 253 253 253 253 253 253
57100- 82 82 82 2 2 6 2 2 6 2 2 6
57101- 2 2 6 2 2 6 2 2 6 14 14 14
57102- 86 86 86 54 54 54 22 22 22 6 6 6
57103- 0 0 0 0 0 0 0 0 0 0 0 0
57104- 0 0 0 0 0 0 0 0 0 0 0 0
57105- 0 0 0 0 0 0 0 0 0 0 0 0
57106- 0 0 0 0 0 0 0 0 0 0 0 0
57107- 0 0 0 0 0 0 0 0 0 0 0 0
57108- 0 0 0 0 0 0 0 0 0 0 0 0
57109- 0 0 0 0 0 0 0 0 0 0 0 0
57110- 0 0 0 0 0 0 0 0 0 0 0 0
57111- 0 0 0 0 0 0 0 0 0 0 0 0
57112- 0 0 0 0 0 0 0 0 0 0 0 0
57113- 0 0 0 0 0 0 0 0 0 0 0 0
57114- 6 6 6 18 18 18 46 46 46 90 90 90
57115- 46 46 46 18 18 18 6 6 6 182 182 182
57116-253 253 253 246 246 246 206 206 206 190 190 190
57117-190 190 190 190 190 190 190 190 190 190 190 190
57118-206 206 206 231 231 231 250 250 250 253 253 253
57119-253 253 253 253 253 253 253 253 253 253 253 253
57120-202 202 202 14 14 14 2 2 6 2 2 6
57121- 2 2 6 2 2 6 2 2 6 2 2 6
57122- 42 42 42 86 86 86 42 42 42 18 18 18
57123- 6 6 6 0 0 0 0 0 0 0 0 0
57124- 0 0 0 0 0 0 0 0 0 0 0 0
57125- 0 0 0 0 0 0 0 0 0 0 0 0
57126- 0 0 0 0 0 0 0 0 0 0 0 0
57127- 0 0 0 0 0 0 0 0 0 0 0 0
57128- 0 0 0 0 0 0 0 0 0 0 0 0
57129- 0 0 0 0 0 0 0 0 0 0 0 0
57130- 0 0 0 0 0 0 0 0 0 0 0 0
57131- 0 0 0 0 0 0 0 0 0 0 0 0
57132- 0 0 0 0 0 0 0 0 0 0 0 0
57133- 0 0 0 0 0 0 0 0 0 6 6 6
57134- 14 14 14 38 38 38 74 74 74 66 66 66
57135- 2 2 6 6 6 6 90 90 90 250 250 250
57136-253 253 253 253 253 253 238 238 238 198 198 198
57137-190 190 190 190 190 190 195 195 195 221 221 221
57138-246 246 246 253 253 253 253 253 253 253 253 253
57139-253 253 253 253 253 253 253 253 253 253 253 253
57140-253 253 253 82 82 82 2 2 6 2 2 6
57141- 2 2 6 2 2 6 2 2 6 2 2 6
57142- 2 2 6 78 78 78 70 70 70 34 34 34
57143- 14 14 14 6 6 6 0 0 0 0 0 0
57144- 0 0 0 0 0 0 0 0 0 0 0 0
57145- 0 0 0 0 0 0 0 0 0 0 0 0
57146- 0 0 0 0 0 0 0 0 0 0 0 0
57147- 0 0 0 0 0 0 0 0 0 0 0 0
57148- 0 0 0 0 0 0 0 0 0 0 0 0
57149- 0 0 0 0 0 0 0 0 0 0 0 0
57150- 0 0 0 0 0 0 0 0 0 0 0 0
57151- 0 0 0 0 0 0 0 0 0 0 0 0
57152- 0 0 0 0 0 0 0 0 0 0 0 0
57153- 0 0 0 0 0 0 0 0 0 14 14 14
57154- 34 34 34 66 66 66 78 78 78 6 6 6
57155- 2 2 6 18 18 18 218 218 218 253 253 253
57156-253 253 253 253 253 253 253 253 253 246 246 246
57157-226 226 226 231 231 231 246 246 246 253 253 253
57158-253 253 253 253 253 253 253 253 253 253 253 253
57159-253 253 253 253 253 253 253 253 253 253 253 253
57160-253 253 253 178 178 178 2 2 6 2 2 6
57161- 2 2 6 2 2 6 2 2 6 2 2 6
57162- 2 2 6 18 18 18 90 90 90 62 62 62
57163- 30 30 30 10 10 10 0 0 0 0 0 0
57164- 0 0 0 0 0 0 0 0 0 0 0 0
57165- 0 0 0 0 0 0 0 0 0 0 0 0
57166- 0 0 0 0 0 0 0 0 0 0 0 0
57167- 0 0 0 0 0 0 0 0 0 0 0 0
57168- 0 0 0 0 0 0 0 0 0 0 0 0
57169- 0 0 0 0 0 0 0 0 0 0 0 0
57170- 0 0 0 0 0 0 0 0 0 0 0 0
57171- 0 0 0 0 0 0 0 0 0 0 0 0
57172- 0 0 0 0 0 0 0 0 0 0 0 0
57173- 0 0 0 0 0 0 10 10 10 26 26 26
57174- 58 58 58 90 90 90 18 18 18 2 2 6
57175- 2 2 6 110 110 110 253 253 253 253 253 253
57176-253 253 253 253 253 253 253 253 253 253 253 253
57177-250 250 250 253 253 253 253 253 253 253 253 253
57178-253 253 253 253 253 253 253 253 253 253 253 253
57179-253 253 253 253 253 253 253 253 253 253 253 253
57180-253 253 253 231 231 231 18 18 18 2 2 6
57181- 2 2 6 2 2 6 2 2 6 2 2 6
57182- 2 2 6 2 2 6 18 18 18 94 94 94
57183- 54 54 54 26 26 26 10 10 10 0 0 0
57184- 0 0 0 0 0 0 0 0 0 0 0 0
57185- 0 0 0 0 0 0 0 0 0 0 0 0
57186- 0 0 0 0 0 0 0 0 0 0 0 0
57187- 0 0 0 0 0 0 0 0 0 0 0 0
57188- 0 0 0 0 0 0 0 0 0 0 0 0
57189- 0 0 0 0 0 0 0 0 0 0 0 0
57190- 0 0 0 0 0 0 0 0 0 0 0 0
57191- 0 0 0 0 0 0 0 0 0 0 0 0
57192- 0 0 0 0 0 0 0 0 0 0 0 0
57193- 0 0 0 6 6 6 22 22 22 50 50 50
57194- 90 90 90 26 26 26 2 2 6 2 2 6
57195- 14 14 14 195 195 195 250 250 250 253 253 253
57196-253 253 253 253 253 253 253 253 253 253 253 253
57197-253 253 253 253 253 253 253 253 253 253 253 253
57198-253 253 253 253 253 253 253 253 253 253 253 253
57199-253 253 253 253 253 253 253 253 253 253 253 253
57200-250 250 250 242 242 242 54 54 54 2 2 6
57201- 2 2 6 2 2 6 2 2 6 2 2 6
57202- 2 2 6 2 2 6 2 2 6 38 38 38
57203- 86 86 86 50 50 50 22 22 22 6 6 6
57204- 0 0 0 0 0 0 0 0 0 0 0 0
57205- 0 0 0 0 0 0 0 0 0 0 0 0
57206- 0 0 0 0 0 0 0 0 0 0 0 0
57207- 0 0 0 0 0 0 0 0 0 0 0 0
57208- 0 0 0 0 0 0 0 0 0 0 0 0
57209- 0 0 0 0 0 0 0 0 0 0 0 0
57210- 0 0 0 0 0 0 0 0 0 0 0 0
57211- 0 0 0 0 0 0 0 0 0 0 0 0
57212- 0 0 0 0 0 0 0 0 0 0 0 0
57213- 6 6 6 14 14 14 38 38 38 82 82 82
57214- 34 34 34 2 2 6 2 2 6 2 2 6
57215- 42 42 42 195 195 195 246 246 246 253 253 253
57216-253 253 253 253 253 253 253 253 253 250 250 250
57217-242 242 242 242 242 242 250 250 250 253 253 253
57218-253 253 253 253 253 253 253 253 253 253 253 253
57219-253 253 253 250 250 250 246 246 246 238 238 238
57220-226 226 226 231 231 231 101 101 101 6 6 6
57221- 2 2 6 2 2 6 2 2 6 2 2 6
57222- 2 2 6 2 2 6 2 2 6 2 2 6
57223- 38 38 38 82 82 82 42 42 42 14 14 14
57224- 6 6 6 0 0 0 0 0 0 0 0 0
57225- 0 0 0 0 0 0 0 0 0 0 0 0
57226- 0 0 0 0 0 0 0 0 0 0 0 0
57227- 0 0 0 0 0 0 0 0 0 0 0 0
57228- 0 0 0 0 0 0 0 0 0 0 0 0
57229- 0 0 0 0 0 0 0 0 0 0 0 0
57230- 0 0 0 0 0 0 0 0 0 0 0 0
57231- 0 0 0 0 0 0 0 0 0 0 0 0
57232- 0 0 0 0 0 0 0 0 0 0 0 0
57233- 10 10 10 26 26 26 62 62 62 66 66 66
57234- 2 2 6 2 2 6 2 2 6 6 6 6
57235- 70 70 70 170 170 170 206 206 206 234 234 234
57236-246 246 246 250 250 250 250 250 250 238 238 238
57237-226 226 226 231 231 231 238 238 238 250 250 250
57238-250 250 250 250 250 250 246 246 246 231 231 231
57239-214 214 214 206 206 206 202 202 202 202 202 202
57240-198 198 198 202 202 202 182 182 182 18 18 18
57241- 2 2 6 2 2 6 2 2 6 2 2 6
57242- 2 2 6 2 2 6 2 2 6 2 2 6
57243- 2 2 6 62 62 62 66 66 66 30 30 30
57244- 10 10 10 0 0 0 0 0 0 0 0 0
57245- 0 0 0 0 0 0 0 0 0 0 0 0
57246- 0 0 0 0 0 0 0 0 0 0 0 0
57247- 0 0 0 0 0 0 0 0 0 0 0 0
57248- 0 0 0 0 0 0 0 0 0 0 0 0
57249- 0 0 0 0 0 0 0 0 0 0 0 0
57250- 0 0 0 0 0 0 0 0 0 0 0 0
57251- 0 0 0 0 0 0 0 0 0 0 0 0
57252- 0 0 0 0 0 0 0 0 0 0 0 0
57253- 14 14 14 42 42 42 82 82 82 18 18 18
57254- 2 2 6 2 2 6 2 2 6 10 10 10
57255- 94 94 94 182 182 182 218 218 218 242 242 242
57256-250 250 250 253 253 253 253 253 253 250 250 250
57257-234 234 234 253 253 253 253 253 253 253 253 253
57258-253 253 253 253 253 253 253 253 253 246 246 246
57259-238 238 238 226 226 226 210 210 210 202 202 202
57260-195 195 195 195 195 195 210 210 210 158 158 158
57261- 6 6 6 14 14 14 50 50 50 14 14 14
57262- 2 2 6 2 2 6 2 2 6 2 2 6
57263- 2 2 6 6 6 6 86 86 86 46 46 46
57264- 18 18 18 6 6 6 0 0 0 0 0 0
57265- 0 0 0 0 0 0 0 0 0 0 0 0
57266- 0 0 0 0 0 0 0 0 0 0 0 0
57267- 0 0 0 0 0 0 0 0 0 0 0 0
57268- 0 0 0 0 0 0 0 0 0 0 0 0
57269- 0 0 0 0 0 0 0 0 0 0 0 0
57270- 0 0 0 0 0 0 0 0 0 0 0 0
57271- 0 0 0 0 0 0 0 0 0 0 0 0
57272- 0 0 0 0 0 0 0 0 0 6 6 6
57273- 22 22 22 54 54 54 70 70 70 2 2 6
57274- 2 2 6 10 10 10 2 2 6 22 22 22
57275-166 166 166 231 231 231 250 250 250 253 253 253
57276-253 253 253 253 253 253 253 253 253 250 250 250
57277-242 242 242 253 253 253 253 253 253 253 253 253
57278-253 253 253 253 253 253 253 253 253 253 253 253
57279-253 253 253 253 253 253 253 253 253 246 246 246
57280-231 231 231 206 206 206 198 198 198 226 226 226
57281- 94 94 94 2 2 6 6 6 6 38 38 38
57282- 30 30 30 2 2 6 2 2 6 2 2 6
57283- 2 2 6 2 2 6 62 62 62 66 66 66
57284- 26 26 26 10 10 10 0 0 0 0 0 0
57285- 0 0 0 0 0 0 0 0 0 0 0 0
57286- 0 0 0 0 0 0 0 0 0 0 0 0
57287- 0 0 0 0 0 0 0 0 0 0 0 0
57288- 0 0 0 0 0 0 0 0 0 0 0 0
57289- 0 0 0 0 0 0 0 0 0 0 0 0
57290- 0 0 0 0 0 0 0 0 0 0 0 0
57291- 0 0 0 0 0 0 0 0 0 0 0 0
57292- 0 0 0 0 0 0 0 0 0 10 10 10
57293- 30 30 30 74 74 74 50 50 50 2 2 6
57294- 26 26 26 26 26 26 2 2 6 106 106 106
57295-238 238 238 253 253 253 253 253 253 253 253 253
57296-253 253 253 253 253 253 253 253 253 253 253 253
57297-253 253 253 253 253 253 253 253 253 253 253 253
57298-253 253 253 253 253 253 253 253 253 253 253 253
57299-253 253 253 253 253 253 253 253 253 253 253 253
57300-253 253 253 246 246 246 218 218 218 202 202 202
57301-210 210 210 14 14 14 2 2 6 2 2 6
57302- 30 30 30 22 22 22 2 2 6 2 2 6
57303- 2 2 6 2 2 6 18 18 18 86 86 86
57304- 42 42 42 14 14 14 0 0 0 0 0 0
57305- 0 0 0 0 0 0 0 0 0 0 0 0
57306- 0 0 0 0 0 0 0 0 0 0 0 0
57307- 0 0 0 0 0 0 0 0 0 0 0 0
57308- 0 0 0 0 0 0 0 0 0 0 0 0
57309- 0 0 0 0 0 0 0 0 0 0 0 0
57310- 0 0 0 0 0 0 0 0 0 0 0 0
57311- 0 0 0 0 0 0 0 0 0 0 0 0
57312- 0 0 0 0 0 0 0 0 0 14 14 14
57313- 42 42 42 90 90 90 22 22 22 2 2 6
57314- 42 42 42 2 2 6 18 18 18 218 218 218
57315-253 253 253 253 253 253 253 253 253 253 253 253
57316-253 253 253 253 253 253 253 253 253 253 253 253
57317-253 253 253 253 253 253 253 253 253 253 253 253
57318-253 253 253 253 253 253 253 253 253 253 253 253
57319-253 253 253 253 253 253 253 253 253 253 253 253
57320-253 253 253 253 253 253 250 250 250 221 221 221
57321-218 218 218 101 101 101 2 2 6 14 14 14
57322- 18 18 18 38 38 38 10 10 10 2 2 6
57323- 2 2 6 2 2 6 2 2 6 78 78 78
57324- 58 58 58 22 22 22 6 6 6 0 0 0
57325- 0 0 0 0 0 0 0 0 0 0 0 0
57326- 0 0 0 0 0 0 0 0 0 0 0 0
57327- 0 0 0 0 0 0 0 0 0 0 0 0
57328- 0 0 0 0 0 0 0 0 0 0 0 0
57329- 0 0 0 0 0 0 0 0 0 0 0 0
57330- 0 0 0 0 0 0 0 0 0 0 0 0
57331- 0 0 0 0 0 0 0 0 0 0 0 0
57332- 0 0 0 0 0 0 6 6 6 18 18 18
57333- 54 54 54 82 82 82 2 2 6 26 26 26
57334- 22 22 22 2 2 6 123 123 123 253 253 253
57335-253 253 253 253 253 253 253 253 253 253 253 253
57336-253 253 253 253 253 253 253 253 253 253 253 253
57337-253 253 253 253 253 253 253 253 253 253 253 253
57338-253 253 253 253 253 253 253 253 253 253 253 253
57339-253 253 253 253 253 253 253 253 253 253 253 253
57340-253 253 253 253 253 253 253 253 253 250 250 250
57341-238 238 238 198 198 198 6 6 6 38 38 38
57342- 58 58 58 26 26 26 38 38 38 2 2 6
57343- 2 2 6 2 2 6 2 2 6 46 46 46
57344- 78 78 78 30 30 30 10 10 10 0 0 0
57345- 0 0 0 0 0 0 0 0 0 0 0 0
57346- 0 0 0 0 0 0 0 0 0 0 0 0
57347- 0 0 0 0 0 0 0 0 0 0 0 0
57348- 0 0 0 0 0 0 0 0 0 0 0 0
57349- 0 0 0 0 0 0 0 0 0 0 0 0
57350- 0 0 0 0 0 0 0 0 0 0 0 0
57351- 0 0 0 0 0 0 0 0 0 0 0 0
57352- 0 0 0 0 0 0 10 10 10 30 30 30
57353- 74 74 74 58 58 58 2 2 6 42 42 42
57354- 2 2 6 22 22 22 231 231 231 253 253 253
57355-253 253 253 253 253 253 253 253 253 253 253 253
57356-253 253 253 253 253 253 253 253 253 250 250 250
57357-253 253 253 253 253 253 253 253 253 253 253 253
57358-253 253 253 253 253 253 253 253 253 253 253 253
57359-253 253 253 253 253 253 253 253 253 253 253 253
57360-253 253 253 253 253 253 253 253 253 253 253 253
57361-253 253 253 246 246 246 46 46 46 38 38 38
57362- 42 42 42 14 14 14 38 38 38 14 14 14
57363- 2 2 6 2 2 6 2 2 6 6 6 6
57364- 86 86 86 46 46 46 14 14 14 0 0 0
57365- 0 0 0 0 0 0 0 0 0 0 0 0
57366- 0 0 0 0 0 0 0 0 0 0 0 0
57367- 0 0 0 0 0 0 0 0 0 0 0 0
57368- 0 0 0 0 0 0 0 0 0 0 0 0
57369- 0 0 0 0 0 0 0 0 0 0 0 0
57370- 0 0 0 0 0 0 0 0 0 0 0 0
57371- 0 0 0 0 0 0 0 0 0 0 0 0
57372- 0 0 0 6 6 6 14 14 14 42 42 42
57373- 90 90 90 18 18 18 18 18 18 26 26 26
57374- 2 2 6 116 116 116 253 253 253 253 253 253
57375-253 253 253 253 253 253 253 253 253 253 253 253
57376-253 253 253 253 253 253 250 250 250 238 238 238
57377-253 253 253 253 253 253 253 253 253 253 253 253
57378-253 253 253 253 253 253 253 253 253 253 253 253
57379-253 253 253 253 253 253 253 253 253 253 253 253
57380-253 253 253 253 253 253 253 253 253 253 253 253
57381-253 253 253 253 253 253 94 94 94 6 6 6
57382- 2 2 6 2 2 6 10 10 10 34 34 34
57383- 2 2 6 2 2 6 2 2 6 2 2 6
57384- 74 74 74 58 58 58 22 22 22 6 6 6
57385- 0 0 0 0 0 0 0 0 0 0 0 0
57386- 0 0 0 0 0 0 0 0 0 0 0 0
57387- 0 0 0 0 0 0 0 0 0 0 0 0
57388- 0 0 0 0 0 0 0 0 0 0 0 0
57389- 0 0 0 0 0 0 0 0 0 0 0 0
57390- 0 0 0 0 0 0 0 0 0 0 0 0
57391- 0 0 0 0 0 0 0 0 0 0 0 0
57392- 0 0 0 10 10 10 26 26 26 66 66 66
57393- 82 82 82 2 2 6 38 38 38 6 6 6
57394- 14 14 14 210 210 210 253 253 253 253 253 253
57395-253 253 253 253 253 253 253 253 253 253 253 253
57396-253 253 253 253 253 253 246 246 246 242 242 242
57397-253 253 253 253 253 253 253 253 253 253 253 253
57398-253 253 253 253 253 253 253 253 253 253 253 253
57399-253 253 253 253 253 253 253 253 253 253 253 253
57400-253 253 253 253 253 253 253 253 253 253 253 253
57401-253 253 253 253 253 253 144 144 144 2 2 6
57402- 2 2 6 2 2 6 2 2 6 46 46 46
57403- 2 2 6 2 2 6 2 2 6 2 2 6
57404- 42 42 42 74 74 74 30 30 30 10 10 10
57405- 0 0 0 0 0 0 0 0 0 0 0 0
57406- 0 0 0 0 0 0 0 0 0 0 0 0
57407- 0 0 0 0 0 0 0 0 0 0 0 0
57408- 0 0 0 0 0 0 0 0 0 0 0 0
57409- 0 0 0 0 0 0 0 0 0 0 0 0
57410- 0 0 0 0 0 0 0 0 0 0 0 0
57411- 0 0 0 0 0 0 0 0 0 0 0 0
57412- 6 6 6 14 14 14 42 42 42 90 90 90
57413- 26 26 26 6 6 6 42 42 42 2 2 6
57414- 74 74 74 250 250 250 253 253 253 253 253 253
57415-253 253 253 253 253 253 253 253 253 253 253 253
57416-253 253 253 253 253 253 242 242 242 242 242 242
57417-253 253 253 253 253 253 253 253 253 253 253 253
57418-253 253 253 253 253 253 253 253 253 253 253 253
57419-253 253 253 253 253 253 253 253 253 253 253 253
57420-253 253 253 253 253 253 253 253 253 253 253 253
57421-253 253 253 253 253 253 182 182 182 2 2 6
57422- 2 2 6 2 2 6 2 2 6 46 46 46
57423- 2 2 6 2 2 6 2 2 6 2 2 6
57424- 10 10 10 86 86 86 38 38 38 10 10 10
57425- 0 0 0 0 0 0 0 0 0 0 0 0
57426- 0 0 0 0 0 0 0 0 0 0 0 0
57427- 0 0 0 0 0 0 0 0 0 0 0 0
57428- 0 0 0 0 0 0 0 0 0 0 0 0
57429- 0 0 0 0 0 0 0 0 0 0 0 0
57430- 0 0 0 0 0 0 0 0 0 0 0 0
57431- 0 0 0 0 0 0 0 0 0 0 0 0
57432- 10 10 10 26 26 26 66 66 66 82 82 82
57433- 2 2 6 22 22 22 18 18 18 2 2 6
57434-149 149 149 253 253 253 253 253 253 253 253 253
57435-253 253 253 253 253 253 253 253 253 253 253 253
57436-253 253 253 253 253 253 234 234 234 242 242 242
57437-253 253 253 253 253 253 253 253 253 253 253 253
57438-253 253 253 253 253 253 253 253 253 253 253 253
57439-253 253 253 253 253 253 253 253 253 253 253 253
57440-253 253 253 253 253 253 253 253 253 253 253 253
57441-253 253 253 253 253 253 206 206 206 2 2 6
57442- 2 2 6 2 2 6 2 2 6 38 38 38
57443- 2 2 6 2 2 6 2 2 6 2 2 6
57444- 6 6 6 86 86 86 46 46 46 14 14 14
57445- 0 0 0 0 0 0 0 0 0 0 0 0
57446- 0 0 0 0 0 0 0 0 0 0 0 0
57447- 0 0 0 0 0 0 0 0 0 0 0 0
57448- 0 0 0 0 0 0 0 0 0 0 0 0
57449- 0 0 0 0 0 0 0 0 0 0 0 0
57450- 0 0 0 0 0 0 0 0 0 0 0 0
57451- 0 0 0 0 0 0 0 0 0 6 6 6
57452- 18 18 18 46 46 46 86 86 86 18 18 18
57453- 2 2 6 34 34 34 10 10 10 6 6 6
57454-210 210 210 253 253 253 253 253 253 253 253 253
57455-253 253 253 253 253 253 253 253 253 253 253 253
57456-253 253 253 253 253 253 234 234 234 242 242 242
57457-253 253 253 253 253 253 253 253 253 253 253 253
57458-253 253 253 253 253 253 253 253 253 253 253 253
57459-253 253 253 253 253 253 253 253 253 253 253 253
57460-253 253 253 253 253 253 253 253 253 253 253 253
57461-253 253 253 253 253 253 221 221 221 6 6 6
57462- 2 2 6 2 2 6 6 6 6 30 30 30
57463- 2 2 6 2 2 6 2 2 6 2 2 6
57464- 2 2 6 82 82 82 54 54 54 18 18 18
57465- 6 6 6 0 0 0 0 0 0 0 0 0
57466- 0 0 0 0 0 0 0 0 0 0 0 0
57467- 0 0 0 0 0 0 0 0 0 0 0 0
57468- 0 0 0 0 0 0 0 0 0 0 0 0
57469- 0 0 0 0 0 0 0 0 0 0 0 0
57470- 0 0 0 0 0 0 0 0 0 0 0 0
57471- 0 0 0 0 0 0 0 0 0 10 10 10
57472- 26 26 26 66 66 66 62 62 62 2 2 6
57473- 2 2 6 38 38 38 10 10 10 26 26 26
57474-238 238 238 253 253 253 253 253 253 253 253 253
57475-253 253 253 253 253 253 253 253 253 253 253 253
57476-253 253 253 253 253 253 231 231 231 238 238 238
57477-253 253 253 253 253 253 253 253 253 253 253 253
57478-253 253 253 253 253 253 253 253 253 253 253 253
57479-253 253 253 253 253 253 253 253 253 253 253 253
57480-253 253 253 253 253 253 253 253 253 253 253 253
57481-253 253 253 253 253 253 231 231 231 6 6 6
57482- 2 2 6 2 2 6 10 10 10 30 30 30
57483- 2 2 6 2 2 6 2 2 6 2 2 6
57484- 2 2 6 66 66 66 58 58 58 22 22 22
57485- 6 6 6 0 0 0 0 0 0 0 0 0
57486- 0 0 0 0 0 0 0 0 0 0 0 0
57487- 0 0 0 0 0 0 0 0 0 0 0 0
57488- 0 0 0 0 0 0 0 0 0 0 0 0
57489- 0 0 0 0 0 0 0 0 0 0 0 0
57490- 0 0 0 0 0 0 0 0 0 0 0 0
57491- 0 0 0 0 0 0 0 0 0 10 10 10
57492- 38 38 38 78 78 78 6 6 6 2 2 6
57493- 2 2 6 46 46 46 14 14 14 42 42 42
57494-246 246 246 253 253 253 253 253 253 253 253 253
57495-253 253 253 253 253 253 253 253 253 253 253 253
57496-253 253 253 253 253 253 231 231 231 242 242 242
57497-253 253 253 253 253 253 253 253 253 253 253 253
57498-253 253 253 253 253 253 253 253 253 253 253 253
57499-253 253 253 253 253 253 253 253 253 253 253 253
57500-253 253 253 253 253 253 253 253 253 253 253 253
57501-253 253 253 253 253 253 234 234 234 10 10 10
57502- 2 2 6 2 2 6 22 22 22 14 14 14
57503- 2 2 6 2 2 6 2 2 6 2 2 6
57504- 2 2 6 66 66 66 62 62 62 22 22 22
57505- 6 6 6 0 0 0 0 0 0 0 0 0
57506- 0 0 0 0 0 0 0 0 0 0 0 0
57507- 0 0 0 0 0 0 0 0 0 0 0 0
57508- 0 0 0 0 0 0 0 0 0 0 0 0
57509- 0 0 0 0 0 0 0 0 0 0 0 0
57510- 0 0 0 0 0 0 0 0 0 0 0 0
57511- 0 0 0 0 0 0 6 6 6 18 18 18
57512- 50 50 50 74 74 74 2 2 6 2 2 6
57513- 14 14 14 70 70 70 34 34 34 62 62 62
57514-250 250 250 253 253 253 253 253 253 253 253 253
57515-253 253 253 253 253 253 253 253 253 253 253 253
57516-253 253 253 253 253 253 231 231 231 246 246 246
57517-253 253 253 253 253 253 253 253 253 253 253 253
57518-253 253 253 253 253 253 253 253 253 253 253 253
57519-253 253 253 253 253 253 253 253 253 253 253 253
57520-253 253 253 253 253 253 253 253 253 253 253 253
57521-253 253 253 253 253 253 234 234 234 14 14 14
57522- 2 2 6 2 2 6 30 30 30 2 2 6
57523- 2 2 6 2 2 6 2 2 6 2 2 6
57524- 2 2 6 66 66 66 62 62 62 22 22 22
57525- 6 6 6 0 0 0 0 0 0 0 0 0
57526- 0 0 0 0 0 0 0 0 0 0 0 0
57527- 0 0 0 0 0 0 0 0 0 0 0 0
57528- 0 0 0 0 0 0 0 0 0 0 0 0
57529- 0 0 0 0 0 0 0 0 0 0 0 0
57530- 0 0 0 0 0 0 0 0 0 0 0 0
57531- 0 0 0 0 0 0 6 6 6 18 18 18
57532- 54 54 54 62 62 62 2 2 6 2 2 6
57533- 2 2 6 30 30 30 46 46 46 70 70 70
57534-250 250 250 253 253 253 253 253 253 253 253 253
57535-253 253 253 253 253 253 253 253 253 253 253 253
57536-253 253 253 253 253 253 231 231 231 246 246 246
57537-253 253 253 253 253 253 253 253 253 253 253 253
57538-253 253 253 253 253 253 253 253 253 253 253 253
57539-253 253 253 253 253 253 253 253 253 253 253 253
57540-253 253 253 253 253 253 253 253 253 253 253 253
57541-253 253 253 253 253 253 226 226 226 10 10 10
57542- 2 2 6 6 6 6 30 30 30 2 2 6
57543- 2 2 6 2 2 6 2 2 6 2 2 6
57544- 2 2 6 66 66 66 58 58 58 22 22 22
57545- 6 6 6 0 0 0 0 0 0 0 0 0
57546- 0 0 0 0 0 0 0 0 0 0 0 0
57547- 0 0 0 0 0 0 0 0 0 0 0 0
57548- 0 0 0 0 0 0 0 0 0 0 0 0
57549- 0 0 0 0 0 0 0 0 0 0 0 0
57550- 0 0 0 0 0 0 0 0 0 0 0 0
57551- 0 0 0 0 0 0 6 6 6 22 22 22
57552- 58 58 58 62 62 62 2 2 6 2 2 6
57553- 2 2 6 2 2 6 30 30 30 78 78 78
57554-250 250 250 253 253 253 253 253 253 253 253 253
57555-253 253 253 253 253 253 253 253 253 253 253 253
57556-253 253 253 253 253 253 231 231 231 246 246 246
57557-253 253 253 253 253 253 253 253 253 253 253 253
57558-253 253 253 253 253 253 253 253 253 253 253 253
57559-253 253 253 253 253 253 253 253 253 253 253 253
57560-253 253 253 253 253 253 253 253 253 253 253 253
57561-253 253 253 253 253 253 206 206 206 2 2 6
57562- 22 22 22 34 34 34 18 14 6 22 22 22
57563- 26 26 26 18 18 18 6 6 6 2 2 6
57564- 2 2 6 82 82 82 54 54 54 18 18 18
57565- 6 6 6 0 0 0 0 0 0 0 0 0
57566- 0 0 0 0 0 0 0 0 0 0 0 0
57567- 0 0 0 0 0 0 0 0 0 0 0 0
57568- 0 0 0 0 0 0 0 0 0 0 0 0
57569- 0 0 0 0 0 0 0 0 0 0 0 0
57570- 0 0 0 0 0 0 0 0 0 0 0 0
57571- 0 0 0 0 0 0 6 6 6 26 26 26
57572- 62 62 62 106 106 106 74 54 14 185 133 11
57573-210 162 10 121 92 8 6 6 6 62 62 62
57574-238 238 238 253 253 253 253 253 253 253 253 253
57575-253 253 253 253 253 253 253 253 253 253 253 253
57576-253 253 253 253 253 253 231 231 231 246 246 246
57577-253 253 253 253 253 253 253 253 253 253 253 253
57578-253 253 253 253 253 253 253 253 253 253 253 253
57579-253 253 253 253 253 253 253 253 253 253 253 253
57580-253 253 253 253 253 253 253 253 253 253 253 253
57581-253 253 253 253 253 253 158 158 158 18 18 18
57582- 14 14 14 2 2 6 2 2 6 2 2 6
57583- 6 6 6 18 18 18 66 66 66 38 38 38
57584- 6 6 6 94 94 94 50 50 50 18 18 18
57585- 6 6 6 0 0 0 0 0 0 0 0 0
57586- 0 0 0 0 0 0 0 0 0 0 0 0
57587- 0 0 0 0 0 0 0 0 0 0 0 0
57588- 0 0 0 0 0 0 0 0 0 0 0 0
57589- 0 0 0 0 0 0 0 0 0 0 0 0
57590- 0 0 0 0 0 0 0 0 0 6 6 6
57591- 10 10 10 10 10 10 18 18 18 38 38 38
57592- 78 78 78 142 134 106 216 158 10 242 186 14
57593-246 190 14 246 190 14 156 118 10 10 10 10
57594- 90 90 90 238 238 238 253 253 253 253 253 253
57595-253 253 253 253 253 253 253 253 253 253 253 253
57596-253 253 253 253 253 253 231 231 231 250 250 250
57597-253 253 253 253 253 253 253 253 253 253 253 253
57598-253 253 253 253 253 253 253 253 253 253 253 253
57599-253 253 253 253 253 253 253 253 253 253 253 253
57600-253 253 253 253 253 253 253 253 253 246 230 190
57601-238 204 91 238 204 91 181 142 44 37 26 9
57602- 2 2 6 2 2 6 2 2 6 2 2 6
57603- 2 2 6 2 2 6 38 38 38 46 46 46
57604- 26 26 26 106 106 106 54 54 54 18 18 18
57605- 6 6 6 0 0 0 0 0 0 0 0 0
57606- 0 0 0 0 0 0 0 0 0 0 0 0
57607- 0 0 0 0 0 0 0 0 0 0 0 0
57608- 0 0 0 0 0 0 0 0 0 0 0 0
57609- 0 0 0 0 0 0 0 0 0 0 0 0
57610- 0 0 0 6 6 6 14 14 14 22 22 22
57611- 30 30 30 38 38 38 50 50 50 70 70 70
57612-106 106 106 190 142 34 226 170 11 242 186 14
57613-246 190 14 246 190 14 246 190 14 154 114 10
57614- 6 6 6 74 74 74 226 226 226 253 253 253
57615-253 253 253 253 253 253 253 253 253 253 253 253
57616-253 253 253 253 253 253 231 231 231 250 250 250
57617-253 253 253 253 253 253 253 253 253 253 253 253
57618-253 253 253 253 253 253 253 253 253 253 253 253
57619-253 253 253 253 253 253 253 253 253 253 253 253
57620-253 253 253 253 253 253 253 253 253 228 184 62
57621-241 196 14 241 208 19 232 195 16 38 30 10
57622- 2 2 6 2 2 6 2 2 6 2 2 6
57623- 2 2 6 6 6 6 30 30 30 26 26 26
57624-203 166 17 154 142 90 66 66 66 26 26 26
57625- 6 6 6 0 0 0 0 0 0 0 0 0
57626- 0 0 0 0 0 0 0 0 0 0 0 0
57627- 0 0 0 0 0 0 0 0 0 0 0 0
57628- 0 0 0 0 0 0 0 0 0 0 0 0
57629- 0 0 0 0 0 0 0 0 0 0 0 0
57630- 6 6 6 18 18 18 38 38 38 58 58 58
57631- 78 78 78 86 86 86 101 101 101 123 123 123
57632-175 146 61 210 150 10 234 174 13 246 186 14
57633-246 190 14 246 190 14 246 190 14 238 190 10
57634-102 78 10 2 2 6 46 46 46 198 198 198
57635-253 253 253 253 253 253 253 253 253 253 253 253
57636-253 253 253 253 253 253 234 234 234 242 242 242
57637-253 253 253 253 253 253 253 253 253 253 253 253
57638-253 253 253 253 253 253 253 253 253 253 253 253
57639-253 253 253 253 253 253 253 253 253 253 253 253
57640-253 253 253 253 253 253 253 253 253 224 178 62
57641-242 186 14 241 196 14 210 166 10 22 18 6
57642- 2 2 6 2 2 6 2 2 6 2 2 6
57643- 2 2 6 2 2 6 6 6 6 121 92 8
57644-238 202 15 232 195 16 82 82 82 34 34 34
57645- 10 10 10 0 0 0 0 0 0 0 0 0
57646- 0 0 0 0 0 0 0 0 0 0 0 0
57647- 0 0 0 0 0 0 0 0 0 0 0 0
57648- 0 0 0 0 0 0 0 0 0 0 0 0
57649- 0 0 0 0 0 0 0 0 0 0 0 0
57650- 14 14 14 38 38 38 70 70 70 154 122 46
57651-190 142 34 200 144 11 197 138 11 197 138 11
57652-213 154 11 226 170 11 242 186 14 246 190 14
57653-246 190 14 246 190 14 246 190 14 246 190 14
57654-225 175 15 46 32 6 2 2 6 22 22 22
57655-158 158 158 250 250 250 253 253 253 253 253 253
57656-253 253 253 253 253 253 253 253 253 253 253 253
57657-253 253 253 253 253 253 253 253 253 253 253 253
57658-253 253 253 253 253 253 253 253 253 253 253 253
57659-253 253 253 253 253 253 253 253 253 253 253 253
57660-253 253 253 250 250 250 242 242 242 224 178 62
57661-239 182 13 236 186 11 213 154 11 46 32 6
57662- 2 2 6 2 2 6 2 2 6 2 2 6
57663- 2 2 6 2 2 6 61 42 6 225 175 15
57664-238 190 10 236 186 11 112 100 78 42 42 42
57665- 14 14 14 0 0 0 0 0 0 0 0 0
57666- 0 0 0 0 0 0 0 0 0 0 0 0
57667- 0 0 0 0 0 0 0 0 0 0 0 0
57668- 0 0 0 0 0 0 0 0 0 0 0 0
57669- 0 0 0 0 0 0 0 0 0 6 6 6
57670- 22 22 22 54 54 54 154 122 46 213 154 11
57671-226 170 11 230 174 11 226 170 11 226 170 11
57672-236 178 12 242 186 14 246 190 14 246 190 14
57673-246 190 14 246 190 14 246 190 14 246 190 14
57674-241 196 14 184 144 12 10 10 10 2 2 6
57675- 6 6 6 116 116 116 242 242 242 253 253 253
57676-253 253 253 253 253 253 253 253 253 253 253 253
57677-253 253 253 253 253 253 253 253 253 253 253 253
57678-253 253 253 253 253 253 253 253 253 253 253 253
57679-253 253 253 253 253 253 253 253 253 253 253 253
57680-253 253 253 231 231 231 198 198 198 214 170 54
57681-236 178 12 236 178 12 210 150 10 137 92 6
57682- 18 14 6 2 2 6 2 2 6 2 2 6
57683- 6 6 6 70 47 6 200 144 11 236 178 12
57684-239 182 13 239 182 13 124 112 88 58 58 58
57685- 22 22 22 6 6 6 0 0 0 0 0 0
57686- 0 0 0 0 0 0 0 0 0 0 0 0
57687- 0 0 0 0 0 0 0 0 0 0 0 0
57688- 0 0 0 0 0 0 0 0 0 0 0 0
57689- 0 0 0 0 0 0 0 0 0 10 10 10
57690- 30 30 30 70 70 70 180 133 36 226 170 11
57691-239 182 13 242 186 14 242 186 14 246 186 14
57692-246 190 14 246 190 14 246 190 14 246 190 14
57693-246 190 14 246 190 14 246 190 14 246 190 14
57694-246 190 14 232 195 16 98 70 6 2 2 6
57695- 2 2 6 2 2 6 66 66 66 221 221 221
57696-253 253 253 253 253 253 253 253 253 253 253 253
57697-253 253 253 253 253 253 253 253 253 253 253 253
57698-253 253 253 253 253 253 253 253 253 253 253 253
57699-253 253 253 253 253 253 253 253 253 253 253 253
57700-253 253 253 206 206 206 198 198 198 214 166 58
57701-230 174 11 230 174 11 216 158 10 192 133 9
57702-163 110 8 116 81 8 102 78 10 116 81 8
57703-167 114 7 197 138 11 226 170 11 239 182 13
57704-242 186 14 242 186 14 162 146 94 78 78 78
57705- 34 34 34 14 14 14 6 6 6 0 0 0
57706- 0 0 0 0 0 0 0 0 0 0 0 0
57707- 0 0 0 0 0 0 0 0 0 0 0 0
57708- 0 0 0 0 0 0 0 0 0 0 0 0
57709- 0 0 0 0 0 0 0 0 0 6 6 6
57710- 30 30 30 78 78 78 190 142 34 226 170 11
57711-239 182 13 246 190 14 246 190 14 246 190 14
57712-246 190 14 246 190 14 246 190 14 246 190 14
57713-246 190 14 246 190 14 246 190 14 246 190 14
57714-246 190 14 241 196 14 203 166 17 22 18 6
57715- 2 2 6 2 2 6 2 2 6 38 38 38
57716-218 218 218 253 253 253 253 253 253 253 253 253
57717-253 253 253 253 253 253 253 253 253 253 253 253
57718-253 253 253 253 253 253 253 253 253 253 253 253
57719-253 253 253 253 253 253 253 253 253 253 253 253
57720-250 250 250 206 206 206 198 198 198 202 162 69
57721-226 170 11 236 178 12 224 166 10 210 150 10
57722-200 144 11 197 138 11 192 133 9 197 138 11
57723-210 150 10 226 170 11 242 186 14 246 190 14
57724-246 190 14 246 186 14 225 175 15 124 112 88
57725- 62 62 62 30 30 30 14 14 14 6 6 6
57726- 0 0 0 0 0 0 0 0 0 0 0 0
57727- 0 0 0 0 0 0 0 0 0 0 0 0
57728- 0 0 0 0 0 0 0 0 0 0 0 0
57729- 0 0 0 0 0 0 0 0 0 10 10 10
57730- 30 30 30 78 78 78 174 135 50 224 166 10
57731-239 182 13 246 190 14 246 190 14 246 190 14
57732-246 190 14 246 190 14 246 190 14 246 190 14
57733-246 190 14 246 190 14 246 190 14 246 190 14
57734-246 190 14 246 190 14 241 196 14 139 102 15
57735- 2 2 6 2 2 6 2 2 6 2 2 6
57736- 78 78 78 250 250 250 253 253 253 253 253 253
57737-253 253 253 253 253 253 253 253 253 253 253 253
57738-253 253 253 253 253 253 253 253 253 253 253 253
57739-253 253 253 253 253 253 253 253 253 253 253 253
57740-250 250 250 214 214 214 198 198 198 190 150 46
57741-219 162 10 236 178 12 234 174 13 224 166 10
57742-216 158 10 213 154 11 213 154 11 216 158 10
57743-226 170 11 239 182 13 246 190 14 246 190 14
57744-246 190 14 246 190 14 242 186 14 206 162 42
57745-101 101 101 58 58 58 30 30 30 14 14 14
57746- 6 6 6 0 0 0 0 0 0 0 0 0
57747- 0 0 0 0 0 0 0 0 0 0 0 0
57748- 0 0 0 0 0 0 0 0 0 0 0 0
57749- 0 0 0 0 0 0 0 0 0 10 10 10
57750- 30 30 30 74 74 74 174 135 50 216 158 10
57751-236 178 12 246 190 14 246 190 14 246 190 14
57752-246 190 14 246 190 14 246 190 14 246 190 14
57753-246 190 14 246 190 14 246 190 14 246 190 14
57754-246 190 14 246 190 14 241 196 14 226 184 13
57755- 61 42 6 2 2 6 2 2 6 2 2 6
57756- 22 22 22 238 238 238 253 253 253 253 253 253
57757-253 253 253 253 253 253 253 253 253 253 253 253
57758-253 253 253 253 253 253 253 253 253 253 253 253
57759-253 253 253 253 253 253 253 253 253 253 253 253
57760-253 253 253 226 226 226 187 187 187 180 133 36
57761-216 158 10 236 178 12 239 182 13 236 178 12
57762-230 174 11 226 170 11 226 170 11 230 174 11
57763-236 178 12 242 186 14 246 190 14 246 190 14
57764-246 190 14 246 190 14 246 186 14 239 182 13
57765-206 162 42 106 106 106 66 66 66 34 34 34
57766- 14 14 14 6 6 6 0 0 0 0 0 0
57767- 0 0 0 0 0 0 0 0 0 0 0 0
57768- 0 0 0 0 0 0 0 0 0 0 0 0
57769- 0 0 0 0 0 0 0 0 0 6 6 6
57770- 26 26 26 70 70 70 163 133 67 213 154 11
57771-236 178 12 246 190 14 246 190 14 246 190 14
57772-246 190 14 246 190 14 246 190 14 246 190 14
57773-246 190 14 246 190 14 246 190 14 246 190 14
57774-246 190 14 246 190 14 246 190 14 241 196 14
57775-190 146 13 18 14 6 2 2 6 2 2 6
57776- 46 46 46 246 246 246 253 253 253 253 253 253
57777-253 253 253 253 253 253 253 253 253 253 253 253
57778-253 253 253 253 253 253 253 253 253 253 253 253
57779-253 253 253 253 253 253 253 253 253 253 253 253
57780-253 253 253 221 221 221 86 86 86 156 107 11
57781-216 158 10 236 178 12 242 186 14 246 186 14
57782-242 186 14 239 182 13 239 182 13 242 186 14
57783-242 186 14 246 186 14 246 190 14 246 190 14
57784-246 190 14 246 190 14 246 190 14 246 190 14
57785-242 186 14 225 175 15 142 122 72 66 66 66
57786- 30 30 30 10 10 10 0 0 0 0 0 0
57787- 0 0 0 0 0 0 0 0 0 0 0 0
57788- 0 0 0 0 0 0 0 0 0 0 0 0
57789- 0 0 0 0 0 0 0 0 0 6 6 6
57790- 26 26 26 70 70 70 163 133 67 210 150 10
57791-236 178 12 246 190 14 246 190 14 246 190 14
57792-246 190 14 246 190 14 246 190 14 246 190 14
57793-246 190 14 246 190 14 246 190 14 246 190 14
57794-246 190 14 246 190 14 246 190 14 246 190 14
57795-232 195 16 121 92 8 34 34 34 106 106 106
57796-221 221 221 253 253 253 253 253 253 253 253 253
57797-253 253 253 253 253 253 253 253 253 253 253 253
57798-253 253 253 253 253 253 253 253 253 253 253 253
57799-253 253 253 253 253 253 253 253 253 253 253 253
57800-242 242 242 82 82 82 18 14 6 163 110 8
57801-216 158 10 236 178 12 242 186 14 246 190 14
57802-246 190 14 246 190 14 246 190 14 246 190 14
57803-246 190 14 246 190 14 246 190 14 246 190 14
57804-246 190 14 246 190 14 246 190 14 246 190 14
57805-246 190 14 246 190 14 242 186 14 163 133 67
57806- 46 46 46 18 18 18 6 6 6 0 0 0
57807- 0 0 0 0 0 0 0 0 0 0 0 0
57808- 0 0 0 0 0 0 0 0 0 0 0 0
57809- 0 0 0 0 0 0 0 0 0 10 10 10
57810- 30 30 30 78 78 78 163 133 67 210 150 10
57811-236 178 12 246 186 14 246 190 14 246 190 14
57812-246 190 14 246 190 14 246 190 14 246 190 14
57813-246 190 14 246 190 14 246 190 14 246 190 14
57814-246 190 14 246 190 14 246 190 14 246 190 14
57815-241 196 14 215 174 15 190 178 144 253 253 253
57816-253 253 253 253 253 253 253 253 253 253 253 253
57817-253 253 253 253 253 253 253 253 253 253 253 253
57818-253 253 253 253 253 253 253 253 253 253 253 253
57819-253 253 253 253 253 253 253 253 253 218 218 218
57820- 58 58 58 2 2 6 22 18 6 167 114 7
57821-216 158 10 236 178 12 246 186 14 246 190 14
57822-246 190 14 246 190 14 246 190 14 246 190 14
57823-246 190 14 246 190 14 246 190 14 246 190 14
57824-246 190 14 246 190 14 246 190 14 246 190 14
57825-246 190 14 246 186 14 242 186 14 190 150 46
57826- 54 54 54 22 22 22 6 6 6 0 0 0
57827- 0 0 0 0 0 0 0 0 0 0 0 0
57828- 0 0 0 0 0 0 0 0 0 0 0 0
57829- 0 0 0 0 0 0 0 0 0 14 14 14
57830- 38 38 38 86 86 86 180 133 36 213 154 11
57831-236 178 12 246 186 14 246 190 14 246 190 14
57832-246 190 14 246 190 14 246 190 14 246 190 14
57833-246 190 14 246 190 14 246 190 14 246 190 14
57834-246 190 14 246 190 14 246 190 14 246 190 14
57835-246 190 14 232 195 16 190 146 13 214 214 214
57836-253 253 253 253 253 253 253 253 253 253 253 253
57837-253 253 253 253 253 253 253 253 253 253 253 253
57838-253 253 253 253 253 253 253 253 253 253 253 253
57839-253 253 253 250 250 250 170 170 170 26 26 26
57840- 2 2 6 2 2 6 37 26 9 163 110 8
57841-219 162 10 239 182 13 246 186 14 246 190 14
57842-246 190 14 246 190 14 246 190 14 246 190 14
57843-246 190 14 246 190 14 246 190 14 246 190 14
57844-246 190 14 246 190 14 246 190 14 246 190 14
57845-246 186 14 236 178 12 224 166 10 142 122 72
57846- 46 46 46 18 18 18 6 6 6 0 0 0
57847- 0 0 0 0 0 0 0 0 0 0 0 0
57848- 0 0 0 0 0 0 0 0 0 0 0 0
57849- 0 0 0 0 0 0 6 6 6 18 18 18
57850- 50 50 50 109 106 95 192 133 9 224 166 10
57851-242 186 14 246 190 14 246 190 14 246 190 14
57852-246 190 14 246 190 14 246 190 14 246 190 14
57853-246 190 14 246 190 14 246 190 14 246 190 14
57854-246 190 14 246 190 14 246 190 14 246 190 14
57855-242 186 14 226 184 13 210 162 10 142 110 46
57856-226 226 226 253 253 253 253 253 253 253 253 253
57857-253 253 253 253 253 253 253 253 253 253 253 253
57858-253 253 253 253 253 253 253 253 253 253 253 253
57859-198 198 198 66 66 66 2 2 6 2 2 6
57860- 2 2 6 2 2 6 50 34 6 156 107 11
57861-219 162 10 239 182 13 246 186 14 246 190 14
57862-246 190 14 246 190 14 246 190 14 246 190 14
57863-246 190 14 246 190 14 246 190 14 246 190 14
57864-246 190 14 246 190 14 246 190 14 242 186 14
57865-234 174 13 213 154 11 154 122 46 66 66 66
57866- 30 30 30 10 10 10 0 0 0 0 0 0
57867- 0 0 0 0 0 0 0 0 0 0 0 0
57868- 0 0 0 0 0 0 0 0 0 0 0 0
57869- 0 0 0 0 0 0 6 6 6 22 22 22
57870- 58 58 58 154 121 60 206 145 10 234 174 13
57871-242 186 14 246 186 14 246 190 14 246 190 14
57872-246 190 14 246 190 14 246 190 14 246 190 14
57873-246 190 14 246 190 14 246 190 14 246 190 14
57874-246 190 14 246 190 14 246 190 14 246 190 14
57875-246 186 14 236 178 12 210 162 10 163 110 8
57876- 61 42 6 138 138 138 218 218 218 250 250 250
57877-253 253 253 253 253 253 253 253 253 250 250 250
57878-242 242 242 210 210 210 144 144 144 66 66 66
57879- 6 6 6 2 2 6 2 2 6 2 2 6
57880- 2 2 6 2 2 6 61 42 6 163 110 8
57881-216 158 10 236 178 12 246 190 14 246 190 14
57882-246 190 14 246 190 14 246 190 14 246 190 14
57883-246 190 14 246 190 14 246 190 14 246 190 14
57884-246 190 14 239 182 13 230 174 11 216 158 10
57885-190 142 34 124 112 88 70 70 70 38 38 38
57886- 18 18 18 6 6 6 0 0 0 0 0 0
57887- 0 0 0 0 0 0 0 0 0 0 0 0
57888- 0 0 0 0 0 0 0 0 0 0 0 0
57889- 0 0 0 0 0 0 6 6 6 22 22 22
57890- 62 62 62 168 124 44 206 145 10 224 166 10
57891-236 178 12 239 182 13 242 186 14 242 186 14
57892-246 186 14 246 190 14 246 190 14 246 190 14
57893-246 190 14 246 190 14 246 190 14 246 190 14
57894-246 190 14 246 190 14 246 190 14 246 190 14
57895-246 190 14 236 178 12 216 158 10 175 118 6
57896- 80 54 7 2 2 6 6 6 6 30 30 30
57897- 54 54 54 62 62 62 50 50 50 38 38 38
57898- 14 14 14 2 2 6 2 2 6 2 2 6
57899- 2 2 6 2 2 6 2 2 6 2 2 6
57900- 2 2 6 6 6 6 80 54 7 167 114 7
57901-213 154 11 236 178 12 246 190 14 246 190 14
57902-246 190 14 246 190 14 246 190 14 246 190 14
57903-246 190 14 242 186 14 239 182 13 239 182 13
57904-230 174 11 210 150 10 174 135 50 124 112 88
57905- 82 82 82 54 54 54 34 34 34 18 18 18
57906- 6 6 6 0 0 0 0 0 0 0 0 0
57907- 0 0 0 0 0 0 0 0 0 0 0 0
57908- 0 0 0 0 0 0 0 0 0 0 0 0
57909- 0 0 0 0 0 0 6 6 6 18 18 18
57910- 50 50 50 158 118 36 192 133 9 200 144 11
57911-216 158 10 219 162 10 224 166 10 226 170 11
57912-230 174 11 236 178 12 239 182 13 239 182 13
57913-242 186 14 246 186 14 246 190 14 246 190 14
57914-246 190 14 246 190 14 246 190 14 246 190 14
57915-246 186 14 230 174 11 210 150 10 163 110 8
57916-104 69 6 10 10 10 2 2 6 2 2 6
57917- 2 2 6 2 2 6 2 2 6 2 2 6
57918- 2 2 6 2 2 6 2 2 6 2 2 6
57919- 2 2 6 2 2 6 2 2 6 2 2 6
57920- 2 2 6 6 6 6 91 60 6 167 114 7
57921-206 145 10 230 174 11 242 186 14 246 190 14
57922-246 190 14 246 190 14 246 186 14 242 186 14
57923-239 182 13 230 174 11 224 166 10 213 154 11
57924-180 133 36 124 112 88 86 86 86 58 58 58
57925- 38 38 38 22 22 22 10 10 10 6 6 6
57926- 0 0 0 0 0 0 0 0 0 0 0 0
57927- 0 0 0 0 0 0 0 0 0 0 0 0
57928- 0 0 0 0 0 0 0 0 0 0 0 0
57929- 0 0 0 0 0 0 0 0 0 14 14 14
57930- 34 34 34 70 70 70 138 110 50 158 118 36
57931-167 114 7 180 123 7 192 133 9 197 138 11
57932-200 144 11 206 145 10 213 154 11 219 162 10
57933-224 166 10 230 174 11 239 182 13 242 186 14
57934-246 186 14 246 186 14 246 186 14 246 186 14
57935-239 182 13 216 158 10 185 133 11 152 99 6
57936-104 69 6 18 14 6 2 2 6 2 2 6
57937- 2 2 6 2 2 6 2 2 6 2 2 6
57938- 2 2 6 2 2 6 2 2 6 2 2 6
57939- 2 2 6 2 2 6 2 2 6 2 2 6
57940- 2 2 6 6 6 6 80 54 7 152 99 6
57941-192 133 9 219 162 10 236 178 12 239 182 13
57942-246 186 14 242 186 14 239 182 13 236 178 12
57943-224 166 10 206 145 10 192 133 9 154 121 60
57944- 94 94 94 62 62 62 42 42 42 22 22 22
57945- 14 14 14 6 6 6 0 0 0 0 0 0
57946- 0 0 0 0 0 0 0 0 0 0 0 0
57947- 0 0 0 0 0 0 0 0 0 0 0 0
57948- 0 0 0 0 0 0 0 0 0 0 0 0
57949- 0 0 0 0 0 0 0 0 0 6 6 6
57950- 18 18 18 34 34 34 58 58 58 78 78 78
57951-101 98 89 124 112 88 142 110 46 156 107 11
57952-163 110 8 167 114 7 175 118 6 180 123 7
57953-185 133 11 197 138 11 210 150 10 219 162 10
57954-226 170 11 236 178 12 236 178 12 234 174 13
57955-219 162 10 197 138 11 163 110 8 130 83 6
57956- 91 60 6 10 10 10 2 2 6 2 2 6
57957- 18 18 18 38 38 38 38 38 38 38 38 38
57958- 38 38 38 38 38 38 38 38 38 38 38 38
57959- 38 38 38 38 38 38 26 26 26 2 2 6
57960- 2 2 6 6 6 6 70 47 6 137 92 6
57961-175 118 6 200 144 11 219 162 10 230 174 11
57962-234 174 13 230 174 11 219 162 10 210 150 10
57963-192 133 9 163 110 8 124 112 88 82 82 82
57964- 50 50 50 30 30 30 14 14 14 6 6 6
57965- 0 0 0 0 0 0 0 0 0 0 0 0
57966- 0 0 0 0 0 0 0 0 0 0 0 0
57967- 0 0 0 0 0 0 0 0 0 0 0 0
57968- 0 0 0 0 0 0 0 0 0 0 0 0
57969- 0 0 0 0 0 0 0 0 0 0 0 0
57970- 6 6 6 14 14 14 22 22 22 34 34 34
57971- 42 42 42 58 58 58 74 74 74 86 86 86
57972-101 98 89 122 102 70 130 98 46 121 87 25
57973-137 92 6 152 99 6 163 110 8 180 123 7
57974-185 133 11 197 138 11 206 145 10 200 144 11
57975-180 123 7 156 107 11 130 83 6 104 69 6
57976- 50 34 6 54 54 54 110 110 110 101 98 89
57977- 86 86 86 82 82 82 78 78 78 78 78 78
57978- 78 78 78 78 78 78 78 78 78 78 78 78
57979- 78 78 78 82 82 82 86 86 86 94 94 94
57980-106 106 106 101 101 101 86 66 34 124 80 6
57981-156 107 11 180 123 7 192 133 9 200 144 11
57982-206 145 10 200 144 11 192 133 9 175 118 6
57983-139 102 15 109 106 95 70 70 70 42 42 42
57984- 22 22 22 10 10 10 0 0 0 0 0 0
57985- 0 0 0 0 0 0 0 0 0 0 0 0
57986- 0 0 0 0 0 0 0 0 0 0 0 0
57987- 0 0 0 0 0 0 0 0 0 0 0 0
57988- 0 0 0 0 0 0 0 0 0 0 0 0
57989- 0 0 0 0 0 0 0 0 0 0 0 0
57990- 0 0 0 0 0 0 6 6 6 10 10 10
57991- 14 14 14 22 22 22 30 30 30 38 38 38
57992- 50 50 50 62 62 62 74 74 74 90 90 90
57993-101 98 89 112 100 78 121 87 25 124 80 6
57994-137 92 6 152 99 6 152 99 6 152 99 6
57995-138 86 6 124 80 6 98 70 6 86 66 30
57996-101 98 89 82 82 82 58 58 58 46 46 46
57997- 38 38 38 34 34 34 34 34 34 34 34 34
57998- 34 34 34 34 34 34 34 34 34 34 34 34
57999- 34 34 34 34 34 34 38 38 38 42 42 42
58000- 54 54 54 82 82 82 94 86 76 91 60 6
58001-134 86 6 156 107 11 167 114 7 175 118 6
58002-175 118 6 167 114 7 152 99 6 121 87 25
58003-101 98 89 62 62 62 34 34 34 18 18 18
58004- 6 6 6 0 0 0 0 0 0 0 0 0
58005- 0 0 0 0 0 0 0 0 0 0 0 0
58006- 0 0 0 0 0 0 0 0 0 0 0 0
58007- 0 0 0 0 0 0 0 0 0 0 0 0
58008- 0 0 0 0 0 0 0 0 0 0 0 0
58009- 0 0 0 0 0 0 0 0 0 0 0 0
58010- 0 0 0 0 0 0 0 0 0 0 0 0
58011- 0 0 0 6 6 6 6 6 6 10 10 10
58012- 18 18 18 22 22 22 30 30 30 42 42 42
58013- 50 50 50 66 66 66 86 86 86 101 98 89
58014-106 86 58 98 70 6 104 69 6 104 69 6
58015-104 69 6 91 60 6 82 62 34 90 90 90
58016- 62 62 62 38 38 38 22 22 22 14 14 14
58017- 10 10 10 10 10 10 10 10 10 10 10 10
58018- 10 10 10 10 10 10 6 6 6 10 10 10
58019- 10 10 10 10 10 10 10 10 10 14 14 14
58020- 22 22 22 42 42 42 70 70 70 89 81 66
58021- 80 54 7 104 69 6 124 80 6 137 92 6
58022-134 86 6 116 81 8 100 82 52 86 86 86
58023- 58 58 58 30 30 30 14 14 14 6 6 6
58024- 0 0 0 0 0 0 0 0 0 0 0 0
58025- 0 0 0 0 0 0 0 0 0 0 0 0
58026- 0 0 0 0 0 0 0 0 0 0 0 0
58027- 0 0 0 0 0 0 0 0 0 0 0 0
58028- 0 0 0 0 0 0 0 0 0 0 0 0
58029- 0 0 0 0 0 0 0 0 0 0 0 0
58030- 0 0 0 0 0 0 0 0 0 0 0 0
58031- 0 0 0 0 0 0 0 0 0 0 0 0
58032- 0 0 0 6 6 6 10 10 10 14 14 14
58033- 18 18 18 26 26 26 38 38 38 54 54 54
58034- 70 70 70 86 86 86 94 86 76 89 81 66
58035- 89 81 66 86 86 86 74 74 74 50 50 50
58036- 30 30 30 14 14 14 6 6 6 0 0 0
58037- 0 0 0 0 0 0 0 0 0 0 0 0
58038- 0 0 0 0 0 0 0 0 0 0 0 0
58039- 0 0 0 0 0 0 0 0 0 0 0 0
58040- 6 6 6 18 18 18 34 34 34 58 58 58
58041- 82 82 82 89 81 66 89 81 66 89 81 66
58042- 94 86 66 94 86 76 74 74 74 50 50 50
58043- 26 26 26 14 14 14 6 6 6 0 0 0
58044- 0 0 0 0 0 0 0 0 0 0 0 0
58045- 0 0 0 0 0 0 0 0 0 0 0 0
58046- 0 0 0 0 0 0 0 0 0 0 0 0
58047- 0 0 0 0 0 0 0 0 0 0 0 0
58048- 0 0 0 0 0 0 0 0 0 0 0 0
58049- 0 0 0 0 0 0 0 0 0 0 0 0
58050- 0 0 0 0 0 0 0 0 0 0 0 0
58051- 0 0 0 0 0 0 0 0 0 0 0 0
58052- 0 0 0 0 0 0 0 0 0 0 0 0
58053- 6 6 6 6 6 6 14 14 14 18 18 18
58054- 30 30 30 38 38 38 46 46 46 54 54 54
58055- 50 50 50 42 42 42 30 30 30 18 18 18
58056- 10 10 10 0 0 0 0 0 0 0 0 0
58057- 0 0 0 0 0 0 0 0 0 0 0 0
58058- 0 0 0 0 0 0 0 0 0 0 0 0
58059- 0 0 0 0 0 0 0 0 0 0 0 0
58060- 0 0 0 6 6 6 14 14 14 26 26 26
58061- 38 38 38 50 50 50 58 58 58 58 58 58
58062- 54 54 54 42 42 42 30 30 30 18 18 18
58063- 10 10 10 0 0 0 0 0 0 0 0 0
58064- 0 0 0 0 0 0 0 0 0 0 0 0
58065- 0 0 0 0 0 0 0 0 0 0 0 0
58066- 0 0 0 0 0 0 0 0 0 0 0 0
58067- 0 0 0 0 0 0 0 0 0 0 0 0
58068- 0 0 0 0 0 0 0 0 0 0 0 0
58069- 0 0 0 0 0 0 0 0 0 0 0 0
58070- 0 0 0 0 0 0 0 0 0 0 0 0
58071- 0 0 0 0 0 0 0 0 0 0 0 0
58072- 0 0 0 0 0 0 0 0 0 0 0 0
58073- 0 0 0 0 0 0 0 0 0 6 6 6
58074- 6 6 6 10 10 10 14 14 14 18 18 18
58075- 18 18 18 14 14 14 10 10 10 6 6 6
58076- 0 0 0 0 0 0 0 0 0 0 0 0
58077- 0 0 0 0 0 0 0 0 0 0 0 0
58078- 0 0 0 0 0 0 0 0 0 0 0 0
58079- 0 0 0 0 0 0 0 0 0 0 0 0
58080- 0 0 0 0 0 0 0 0 0 6 6 6
58081- 14 14 14 18 18 18 22 22 22 22 22 22
58082- 18 18 18 14 14 14 10 10 10 6 6 6
58083- 0 0 0 0 0 0 0 0 0 0 0 0
58084- 0 0 0 0 0 0 0 0 0 0 0 0
58085- 0 0 0 0 0 0 0 0 0 0 0 0
58086- 0 0 0 0 0 0 0 0 0 0 0 0
58087- 0 0 0 0 0 0 0 0 0 0 0 0
58088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58101+4 4 4 4 4 4
58102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58115+4 4 4 4 4 4
58116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58129+4 4 4 4 4 4
58130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58143+4 4 4 4 4 4
58144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58157+4 4 4 4 4 4
58158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58171+4 4 4 4 4 4
58172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58176+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
58177+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
58178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58181+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
58182+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58183+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
58184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58185+4 4 4 4 4 4
58186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58190+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
58191+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
58192+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58195+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
58196+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
58197+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
58198+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58199+4 4 4 4 4 4
58200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58204+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
58205+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
58206+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58209+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
58210+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
58211+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
58212+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
58213+4 4 4 4 4 4
58214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58217+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
58218+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
58219+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
58220+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
58221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58222+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58223+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
58224+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
58225+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
58226+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
58227+4 4 4 4 4 4
58228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58231+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
58232+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
58233+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
58234+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
58235+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58236+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
58237+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
58238+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
58239+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
58240+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
58241+4 4 4 4 4 4
58242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58245+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
58246+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
58247+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
58248+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
58249+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58250+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
58251+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
58252+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
58253+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
58254+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
58255+4 4 4 4 4 4
58256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58258+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
58259+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
58260+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
58261+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
58262+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
58263+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
58264+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
58265+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
58266+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
58267+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
58268+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
58269+4 4 4 4 4 4
58270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58272+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
58273+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
58274+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
58275+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
58276+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
58277+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
58278+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
58279+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
58280+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
58281+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
58282+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
58283+4 4 4 4 4 4
58284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58286+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
58287+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
58288+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
58289+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
58290+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
58291+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
58292+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
58293+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
58294+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
58295+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
58296+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58297+4 4 4 4 4 4
58298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58300+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
58301+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
58302+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
58303+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
58304+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
58305+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
58306+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
58307+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
58308+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
58309+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
58310+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
58311+4 4 4 4 4 4
58312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58313+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
58314+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
58315+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
58316+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
58317+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
58318+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
58319+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
58320+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
58321+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
58322+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
58323+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
58324+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
58325+4 4 4 4 4 4
58326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58327+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
58328+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
58329+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
58330+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58331+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
58332+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
58333+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
58334+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
58335+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
58336+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
58337+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
58338+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
58339+0 0 0 4 4 4
58340+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58341+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
58342+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
58343+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
58344+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
58345+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
58346+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
58347+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
58348+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
58349+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
58350+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
58351+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
58352+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
58353+2 0 0 0 0 0
58354+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
58355+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
58356+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
58357+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
58358+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
58359+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
58360+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
58361+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
58362+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
58363+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
58364+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
58365+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
58366+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
58367+37 38 37 0 0 0
58368+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58369+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
58370+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
58371+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
58372+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
58373+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
58374+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
58375+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
58376+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
58377+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
58378+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
58379+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
58380+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
58381+85 115 134 4 0 0
58382+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
58383+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
58384+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
58385+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
58386+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
58387+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
58388+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
58389+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
58390+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
58391+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
58392+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
58393+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
58394+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
58395+60 73 81 4 0 0
58396+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
58397+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
58398+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
58399+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
58400+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
58401+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
58402+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
58403+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
58404+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
58405+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
58406+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
58407+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
58408+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
58409+16 19 21 4 0 0
58410+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
58411+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
58412+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
58413+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
58414+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
58415+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
58416+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
58417+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
58418+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
58419+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
58420+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
58421+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
58422+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
58423+4 0 0 4 3 3
58424+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
58425+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
58426+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
58427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
58428+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
58429+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
58430+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
58431+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
58432+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
58433+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
58434+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
58435+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
58436+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
58437+3 2 2 4 4 4
58438+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
58439+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
58440+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
58441+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58442+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
58443+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
58444+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
58445+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
58446+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
58447+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
58448+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
58449+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
58450+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
58451+4 4 4 4 4 4
58452+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
58453+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
58454+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
58455+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
58456+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
58457+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
58458+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
58459+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
58460+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
58461+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
58462+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
58463+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
58464+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
58465+4 4 4 4 4 4
58466+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
58467+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
58468+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
58469+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
58470+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
58471+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58472+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
58473+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
58474+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
58475+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
58476+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
58477+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
58478+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
58479+5 5 5 5 5 5
58480+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
58481+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
58482+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
58483+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
58484+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
58485+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58486+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
58487+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
58488+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
58489+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
58490+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
58491+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
58492+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58493+5 5 5 4 4 4
58494+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
58495+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
58496+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
58497+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
58498+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58499+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
58500+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
58501+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
58502+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
58503+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
58504+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
58505+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58507+4 4 4 4 4 4
58508+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
58509+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
58510+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
58511+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
58512+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
58513+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58514+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58515+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
58516+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
58517+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
58518+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
58519+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
58520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58521+4 4 4 4 4 4
58522+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
58523+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
58524+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
58525+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
58526+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58527+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
58528+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
58529+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
58530+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
58531+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
58532+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
58533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58535+4 4 4 4 4 4
58536+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
58537+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
58538+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
58539+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
58540+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58541+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58542+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58543+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
58544+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
58545+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
58546+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
58547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58549+4 4 4 4 4 4
58550+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
58551+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
58552+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
58553+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
58554+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58555+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
58556+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58557+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
58558+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
58559+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
58560+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58563+4 4 4 4 4 4
58564+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
58565+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
58566+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
58567+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
58568+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58569+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
58570+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
58571+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
58572+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
58573+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
58574+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
58575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58577+4 4 4 4 4 4
58578+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
58579+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
58580+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
58581+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
58582+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58583+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
58584+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
58585+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
58586+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
58587+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
58588+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
58589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58591+4 4 4 4 4 4
58592+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
58593+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
58594+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
58595+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58596+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
58597+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
58598+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
58599+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
58600+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
58601+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
58602+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58605+4 4 4 4 4 4
58606+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
58607+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
58608+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
58609+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58610+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58611+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
58612+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
58613+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
58614+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
58615+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
58616+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58619+4 4 4 4 4 4
58620+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
58621+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
58622+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58623+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58624+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58625+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
58626+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
58627+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
58628+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
58629+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
58630+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58633+4 4 4 4 4 4
58634+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
58635+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
58636+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58637+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58638+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58639+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
58640+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
58641+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
58642+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58643+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58644+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58647+4 4 4 4 4 4
58648+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58649+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
58650+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58651+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
58652+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
58653+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
58654+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
58655+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
58656+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58657+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58658+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58661+4 4 4 4 4 4
58662+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58663+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
58664+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58665+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
58666+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58667+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
58668+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
58669+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
58670+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58671+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58672+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58675+4 4 4 4 4 4
58676+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
58677+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
58678+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58679+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
58680+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
58681+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
58682+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
58683+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
58684+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58685+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58686+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58689+4 4 4 4 4 4
58690+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
58691+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
58692+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58693+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
58694+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
58695+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
58696+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
58697+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
58698+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58699+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58700+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58703+4 4 4 4 4 4
58704+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
58705+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
58706+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58707+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
58708+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
58709+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
58710+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
58711+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
58712+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58713+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58714+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58717+4 4 4 4 4 4
58718+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
58719+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
58720+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58721+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
58722+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
58723+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
58724+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
58725+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
58726+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
58727+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58728+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58731+4 4 4 4 4 4
58732+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
58733+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
58734+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
58735+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
58736+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
58737+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
58738+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
58739+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
58740+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58741+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58742+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58745+4 4 4 4 4 4
58746+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58747+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
58748+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58749+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
58750+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
58751+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
58752+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
58753+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
58754+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58755+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58756+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58759+4 4 4 4 4 4
58760+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
58761+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
58762+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
58763+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
58764+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
58765+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
58766+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58767+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
58768+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58769+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58770+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58773+4 4 4 4 4 4
58774+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58775+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
58776+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
58777+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
58778+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
58779+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
58780+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58781+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
58782+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58783+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58784+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58787+4 4 4 4 4 4
58788+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
58789+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
58790+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
58791+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
58792+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
58793+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
58794+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
58795+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
58796+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
58797+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58798+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58801+4 4 4 4 4 4
58802+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58803+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
58804+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
58805+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
58806+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
58807+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
58808+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
58809+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
58810+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
58811+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58812+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58815+4 4 4 4 4 4
58816+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
58817+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
58818+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
58819+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
58820+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
58821+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
58822+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
58823+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
58824+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
58825+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58826+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58829+4 4 4 4 4 4
58830+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58831+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
58832+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
58833+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
58834+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
58835+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
58836+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
58837+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
58838+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
58839+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58840+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58843+4 4 4 4 4 4
58844+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
58845+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
58846+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
58847+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
58848+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
58849+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
58850+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
58851+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
58852+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
58853+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
58854+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58857+4 4 4 4 4 4
58858+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
58859+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58860+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
58861+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
58862+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
58863+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
58864+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
58865+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
58866+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
58867+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
58868+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58871+4 4 4 4 4 4
58872+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
58873+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58874+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
58875+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
58876+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
58877+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
58878+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58879+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
58880+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
58881+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
58882+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58885+4 4 4 4 4 4
58886+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
58887+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
58888+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
58889+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
58890+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
58891+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
58892+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
58893+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
58894+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
58895+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
58896+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58899+4 4 4 4 4 4
58900+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
58901+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
58902+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58903+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
58904+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
58905+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
58906+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
58907+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
58908+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
58909+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
58910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58913+4 4 4 4 4 4
58914+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58915+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
58916+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
58917+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
58918+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
58919+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
58920+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
58921+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
58922+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
58923+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58927+4 4 4 4 4 4
58928+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
58929+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
58930+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
58931+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
58932+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
58933+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
58934+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
58935+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
58936+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
58937+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58941+4 4 4 4 4 4
58942+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
58943+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
58944+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
58945+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
58946+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
58947+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
58948+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
58949+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
58950+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58951+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58955+4 4 4 4 4 4
58956+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
58957+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58958+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
58959+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58960+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
58961+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
58962+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
58963+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
58964+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
58965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58969+4 4 4 4 4 4
58970+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
58971+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
58972+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
58973+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
58974+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
58975+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
58976+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
58977+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
58978+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
58979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58983+4 4 4 4 4 4
58984+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58985+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
58986+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
58987+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
58988+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
58989+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
58990+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
58991+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
58992+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58997+4 4 4 4 4 4
58998+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
58999+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
59000+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59001+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
59002+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
59003+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
59004+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
59005+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
59006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59011+4 4 4 4 4 4
59012+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59013+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
59014+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
59015+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
59016+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
59017+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
59018+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
59019+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
59020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59025+4 4 4 4 4 4
59026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59027+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
59028+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59029+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
59030+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
59031+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
59032+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
59033+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
59034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59039+4 4 4 4 4 4
59040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59041+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
59042+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
59043+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
59044+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
59045+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
59046+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
59047+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
59048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59053+4 4 4 4 4 4
59054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59055+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
59056+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
59057+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59058+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
59059+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
59060+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
59061+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59067+4 4 4 4 4 4
59068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59070+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59071+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
59072+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
59073+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
59074+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
59075+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59081+4 4 4 4 4 4
59082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59085+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59086+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
59087+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
59088+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
59089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59095+4 4 4 4 4 4
59096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59099+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59100+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59101+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
59102+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
59103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59109+4 4 4 4 4 4
59110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59113+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59114+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59115+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59116+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
59117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59123+4 4 4 4 4 4
59124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59127+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
59128+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
59129+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
59130+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
59131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59137+4 4 4 4 4 4
59138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59142+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
59143+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59144+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59151+4 4 4 4 4 4
59152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59156+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
59157+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
59158+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
59159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59165+4 4 4 4 4 4
59166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59170+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
59171+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
59172+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59179+4 4 4 4 4 4
59180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59184+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
59185+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
59186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59193+4 4 4 4 4 4
59194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59198+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59199+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
59200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59207+4 4 4 4 4 4
59208diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
59209index 2b8553b..e1a482b 100644
59210--- a/drivers/xen/events/events_base.c
59211+++ b/drivers/xen/events/events_base.c
59212@@ -1564,7 +1564,7 @@ void xen_irq_resume(void)
59213 restore_pirqs();
59214 }
59215
59216-static struct irq_chip xen_dynamic_chip __read_mostly = {
59217+static struct irq_chip xen_dynamic_chip = {
59218 .name = "xen-dyn",
59219
59220 .irq_disable = disable_dynirq,
59221@@ -1578,7 +1578,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
59222 .irq_retrigger = retrigger_dynirq,
59223 };
59224
59225-static struct irq_chip xen_pirq_chip __read_mostly = {
59226+static struct irq_chip xen_pirq_chip = {
59227 .name = "xen-pirq",
59228
59229 .irq_startup = startup_pirq,
59230@@ -1598,7 +1598,7 @@ static struct irq_chip xen_pirq_chip __read_mostly = {
59231 .irq_retrigger = retrigger_dynirq,
59232 };
59233
59234-static struct irq_chip xen_percpu_chip __read_mostly = {
59235+static struct irq_chip xen_percpu_chip = {
59236 .name = "xen-percpu",
59237
59238 .irq_disable = disable_dynirq,
59239diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
59240index fef20db..d28b1ab 100644
59241--- a/drivers/xen/xenfs/xenstored.c
59242+++ b/drivers/xen/xenfs/xenstored.c
59243@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
59244 static int xsd_kva_open(struct inode *inode, struct file *file)
59245 {
59246 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
59247+#ifdef CONFIG_GRKERNSEC_HIDESYM
59248+ NULL);
59249+#else
59250 xen_store_interface);
59251+#endif
59252+
59253 if (!file->private_data)
59254 return -ENOMEM;
59255 return 0;
59256diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
59257index eb14e05..5156de7 100644
59258--- a/fs/9p/vfs_addr.c
59259+++ b/fs/9p/vfs_addr.c
59260@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
59261
59262 retval = v9fs_file_write_internal(inode,
59263 v9inode->writeback_fid,
59264- (__force const char __user *)buffer,
59265+ (const char __force_user *)buffer,
59266 len, &offset, 0);
59267 if (retval > 0)
59268 retval = 0;
59269diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
59270index 3662f1d..90558b5 100644
59271--- a/fs/9p/vfs_inode.c
59272+++ b/fs/9p/vfs_inode.c
59273@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
59274 void
59275 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
59276 {
59277- char *s = nd_get_link(nd);
59278+ const char *s = nd_get_link(nd);
59279
59280 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
59281 dentry, IS_ERR(s) ? "<error>" : s);
59282diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
59283index 270c481..0d8a962 100644
59284--- a/fs/Kconfig.binfmt
59285+++ b/fs/Kconfig.binfmt
59286@@ -106,7 +106,7 @@ config HAVE_AOUT
59287
59288 config BINFMT_AOUT
59289 tristate "Kernel support for a.out and ECOFF binaries"
59290- depends on HAVE_AOUT
59291+ depends on HAVE_AOUT && BROKEN
59292 ---help---
59293 A.out (Assembler.OUTput) is a set of formats for libraries and
59294 executables used in the earliest versions of UNIX. Linux used
59295diff --git a/fs/afs/inode.c b/fs/afs/inode.c
59296index 8a1d38e..300a14e 100644
59297--- a/fs/afs/inode.c
59298+++ b/fs/afs/inode.c
59299@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59300 struct afs_vnode *vnode;
59301 struct super_block *sb;
59302 struct inode *inode;
59303- static atomic_t afs_autocell_ino;
59304+ static atomic_unchecked_t afs_autocell_ino;
59305
59306 _enter("{%x:%u},%*.*s,",
59307 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
59308@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59309 data.fid.unique = 0;
59310 data.fid.vnode = 0;
59311
59312- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
59313+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
59314 afs_iget5_autocell_test, afs_iget5_set,
59315 &data);
59316 if (!inode) {
59317diff --git a/fs/aio.c b/fs/aio.c
59318index a793f70..46f45af 100644
59319--- a/fs/aio.c
59320+++ b/fs/aio.c
59321@@ -404,7 +404,7 @@ static int aio_setup_ring(struct kioctx *ctx)
59322 size += sizeof(struct io_event) * nr_events;
59323
59324 nr_pages = PFN_UP(size);
59325- if (nr_pages < 0)
59326+ if (nr_pages <= 0)
59327 return -EINVAL;
59328
59329 file = aio_private_file(ctx, nr_pages);
59330diff --git a/fs/attr.c b/fs/attr.c
59331index 6530ced..4a827e2 100644
59332--- a/fs/attr.c
59333+++ b/fs/attr.c
59334@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
59335 unsigned long limit;
59336
59337 limit = rlimit(RLIMIT_FSIZE);
59338+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
59339 if (limit != RLIM_INFINITY && offset > limit)
59340 goto out_sig;
59341 if (offset > inode->i_sb->s_maxbytes)
59342diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
59343index 116fd38..c04182da 100644
59344--- a/fs/autofs4/waitq.c
59345+++ b/fs/autofs4/waitq.c
59346@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
59347 {
59348 unsigned long sigpipe, flags;
59349 mm_segment_t fs;
59350- const char *data = (const char *)addr;
59351+ const char __user *data = (const char __force_user *)addr;
59352 ssize_t wr = 0;
59353
59354 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
59355@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
59356 return 1;
59357 }
59358
59359+#ifdef CONFIG_GRKERNSEC_HIDESYM
59360+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
59361+#endif
59362+
59363 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59364 enum autofs_notify notify)
59365 {
59366@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59367
59368 /* If this is a direct mount request create a dummy name */
59369 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
59370+#ifdef CONFIG_GRKERNSEC_HIDESYM
59371+ /* this name does get written to userland via autofs4_write() */
59372+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
59373+#else
59374 qstr.len = sprintf(name, "%p", dentry);
59375+#endif
59376 else {
59377 qstr.len = autofs4_getpath(sbi, dentry, &name);
59378 if (!qstr.len) {
59379diff --git a/fs/befs/endian.h b/fs/befs/endian.h
59380index 2722387..56059b5 100644
59381--- a/fs/befs/endian.h
59382+++ b/fs/befs/endian.h
59383@@ -11,7 +11,7 @@
59384
59385 #include <asm/byteorder.h>
59386
59387-static inline u64
59388+static inline u64 __intentional_overflow(-1)
59389 fs64_to_cpu(const struct super_block *sb, fs64 n)
59390 {
59391 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59392@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
59393 return (__force fs64)cpu_to_be64(n);
59394 }
59395
59396-static inline u32
59397+static inline u32 __intentional_overflow(-1)
59398 fs32_to_cpu(const struct super_block *sb, fs32 n)
59399 {
59400 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59401@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
59402 return (__force fs32)cpu_to_be32(n);
59403 }
59404
59405-static inline u16
59406+static inline u16 __intentional_overflow(-1)
59407 fs16_to_cpu(const struct super_block *sb, fs16 n)
59408 {
59409 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59410diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
59411index 4c55668..eeae150 100644
59412--- a/fs/binfmt_aout.c
59413+++ b/fs/binfmt_aout.c
59414@@ -16,6 +16,7 @@
59415 #include <linux/string.h>
59416 #include <linux/fs.h>
59417 #include <linux/file.h>
59418+#include <linux/security.h>
59419 #include <linux/stat.h>
59420 #include <linux/fcntl.h>
59421 #include <linux/ptrace.h>
59422@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
59423 #endif
59424 # define START_STACK(u) ((void __user *)u.start_stack)
59425
59426+ memset(&dump, 0, sizeof(dump));
59427+
59428 fs = get_fs();
59429 set_fs(KERNEL_DS);
59430 has_dumped = 1;
59431@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
59432
59433 /* If the size of the dump file exceeds the rlimit, then see what would happen
59434 if we wrote the stack, but not the data area. */
59435+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
59436 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
59437 dump.u_dsize = 0;
59438
59439 /* Make sure we have enough room to write the stack and data areas. */
59440+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
59441 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
59442 dump.u_ssize = 0;
59443
59444@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
59445 rlim = rlimit(RLIMIT_DATA);
59446 if (rlim >= RLIM_INFINITY)
59447 rlim = ~0;
59448+
59449+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
59450 if (ex.a_data + ex.a_bss > rlim)
59451 return -ENOMEM;
59452
59453@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
59454
59455 install_exec_creds(bprm);
59456
59457+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59458+ current->mm->pax_flags = 0UL;
59459+#endif
59460+
59461+#ifdef CONFIG_PAX_PAGEEXEC
59462+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
59463+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
59464+
59465+#ifdef CONFIG_PAX_EMUTRAMP
59466+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
59467+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
59468+#endif
59469+
59470+#ifdef CONFIG_PAX_MPROTECT
59471+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
59472+ current->mm->pax_flags |= MF_PAX_MPROTECT;
59473+#endif
59474+
59475+ }
59476+#endif
59477+
59478 if (N_MAGIC(ex) == OMAGIC) {
59479 unsigned long text_addr, map_size;
59480 loff_t pos;
59481@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
59482 return error;
59483
59484 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
59485- PROT_READ | PROT_WRITE | PROT_EXEC,
59486+ PROT_READ | PROT_WRITE,
59487 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
59488 fd_offset + ex.a_text);
59489 if (error != N_DATADDR(ex))
59490diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
59491index d925f55..d31f527 100644
59492--- a/fs/binfmt_elf.c
59493+++ b/fs/binfmt_elf.c
59494@@ -34,6 +34,7 @@
59495 #include <linux/utsname.h>
59496 #include <linux/coredump.h>
59497 #include <linux/sched.h>
59498+#include <linux/xattr.h>
59499 #include <asm/uaccess.h>
59500 #include <asm/param.h>
59501 #include <asm/page.h>
59502@@ -47,7 +48,7 @@
59503
59504 static int load_elf_binary(struct linux_binprm *bprm);
59505 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
59506- int, int, unsigned long);
59507+ int, int, unsigned long) __intentional_overflow(-1);
59508
59509 #ifdef CONFIG_USELIB
59510 static int load_elf_library(struct file *);
59511@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
59512 #define elf_core_dump NULL
59513 #endif
59514
59515+#ifdef CONFIG_PAX_MPROTECT
59516+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
59517+#endif
59518+
59519+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59520+static void elf_handle_mmap(struct file *file);
59521+#endif
59522+
59523 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
59524 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
59525 #else
59526@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
59527 .load_binary = load_elf_binary,
59528 .load_shlib = load_elf_library,
59529 .core_dump = elf_core_dump,
59530+
59531+#ifdef CONFIG_PAX_MPROTECT
59532+ .handle_mprotect= elf_handle_mprotect,
59533+#endif
59534+
59535+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59536+ .handle_mmap = elf_handle_mmap,
59537+#endif
59538+
59539 .min_coredump = ELF_EXEC_PAGESIZE,
59540 };
59541
59542@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
59543
59544 static int set_brk(unsigned long start, unsigned long end)
59545 {
59546+ unsigned long e = end;
59547+
59548 start = ELF_PAGEALIGN(start);
59549 end = ELF_PAGEALIGN(end);
59550 if (end > start) {
59551@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
59552 if (BAD_ADDR(addr))
59553 return addr;
59554 }
59555- current->mm->start_brk = current->mm->brk = end;
59556+ current->mm->start_brk = current->mm->brk = e;
59557 return 0;
59558 }
59559
59560@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59561 elf_addr_t __user *u_rand_bytes;
59562 const char *k_platform = ELF_PLATFORM;
59563 const char *k_base_platform = ELF_BASE_PLATFORM;
59564- unsigned char k_rand_bytes[16];
59565+ u32 k_rand_bytes[4];
59566 int items;
59567 elf_addr_t *elf_info;
59568 int ei_index = 0;
59569 const struct cred *cred = current_cred();
59570 struct vm_area_struct *vma;
59571+ unsigned long saved_auxv[AT_VECTOR_SIZE];
59572
59573 /*
59574 * In some cases (e.g. Hyper-Threading), we want to avoid L1
59575@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59576 * Generate 16 random bytes for userspace PRNG seeding.
59577 */
59578 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
59579- u_rand_bytes = (elf_addr_t __user *)
59580- STACK_ALLOC(p, sizeof(k_rand_bytes));
59581+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
59582+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
59583+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
59584+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
59585+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
59586+ u_rand_bytes = (elf_addr_t __user *) p;
59587 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
59588 return -EFAULT;
59589
59590@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59591 return -EFAULT;
59592 current->mm->env_end = p;
59593
59594+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
59595+
59596 /* Put the elf_info on the stack in the right place. */
59597 sp = (elf_addr_t __user *)envp + 1;
59598- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
59599+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
59600 return -EFAULT;
59601 return 0;
59602 }
59603@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
59604 an ELF header */
59605
59606 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59607- struct file *interpreter, unsigned long *interp_map_addr,
59608+ struct file *interpreter,
59609 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
59610 {
59611 struct elf_phdr *eppnt;
59612- unsigned long load_addr = 0;
59613+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
59614 int load_addr_set = 0;
59615 unsigned long last_bss = 0, elf_bss = 0;
59616- unsigned long error = ~0UL;
59617+ unsigned long error = -EINVAL;
59618 unsigned long total_size;
59619 int i;
59620
59621@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59622 goto out;
59623 }
59624
59625+#ifdef CONFIG_PAX_SEGMEXEC
59626+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
59627+ pax_task_size = SEGMEXEC_TASK_SIZE;
59628+#endif
59629+
59630 eppnt = interp_elf_phdata;
59631 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
59632 if (eppnt->p_type == PT_LOAD) {
59633@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59634 map_addr = elf_map(interpreter, load_addr + vaddr,
59635 eppnt, elf_prot, elf_type, total_size);
59636 total_size = 0;
59637- if (!*interp_map_addr)
59638- *interp_map_addr = map_addr;
59639 error = map_addr;
59640 if (BAD_ADDR(map_addr))
59641 goto out;
59642@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59643 k = load_addr + eppnt->p_vaddr;
59644 if (BAD_ADDR(k) ||
59645 eppnt->p_filesz > eppnt->p_memsz ||
59646- eppnt->p_memsz > TASK_SIZE ||
59647- TASK_SIZE - eppnt->p_memsz < k) {
59648+ eppnt->p_memsz > pax_task_size ||
59649+ pax_task_size - eppnt->p_memsz < k) {
59650 error = -ENOMEM;
59651 goto out;
59652 }
59653@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59654 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
59655
59656 /* Map the last of the bss segment */
59657- error = vm_brk(elf_bss, last_bss - elf_bss);
59658- if (BAD_ADDR(error))
59659- goto out;
59660+ if (last_bss > elf_bss) {
59661+ error = vm_brk(elf_bss, last_bss - elf_bss);
59662+ if (BAD_ADDR(error))
59663+ goto out;
59664+ }
59665 }
59666
59667 error = load_addr;
59668@@ -634,6 +666,336 @@ out:
59669 return error;
59670 }
59671
59672+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59673+#ifdef CONFIG_PAX_SOFTMODE
59674+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
59675+{
59676+ unsigned long pax_flags = 0UL;
59677+
59678+#ifdef CONFIG_PAX_PAGEEXEC
59679+ if (elf_phdata->p_flags & PF_PAGEEXEC)
59680+ pax_flags |= MF_PAX_PAGEEXEC;
59681+#endif
59682+
59683+#ifdef CONFIG_PAX_SEGMEXEC
59684+ if (elf_phdata->p_flags & PF_SEGMEXEC)
59685+ pax_flags |= MF_PAX_SEGMEXEC;
59686+#endif
59687+
59688+#ifdef CONFIG_PAX_EMUTRAMP
59689+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
59690+ pax_flags |= MF_PAX_EMUTRAMP;
59691+#endif
59692+
59693+#ifdef CONFIG_PAX_MPROTECT
59694+ if (elf_phdata->p_flags & PF_MPROTECT)
59695+ pax_flags |= MF_PAX_MPROTECT;
59696+#endif
59697+
59698+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59699+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
59700+ pax_flags |= MF_PAX_RANDMMAP;
59701+#endif
59702+
59703+ return pax_flags;
59704+}
59705+#endif
59706+
59707+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
59708+{
59709+ unsigned long pax_flags = 0UL;
59710+
59711+#ifdef CONFIG_PAX_PAGEEXEC
59712+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
59713+ pax_flags |= MF_PAX_PAGEEXEC;
59714+#endif
59715+
59716+#ifdef CONFIG_PAX_SEGMEXEC
59717+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
59718+ pax_flags |= MF_PAX_SEGMEXEC;
59719+#endif
59720+
59721+#ifdef CONFIG_PAX_EMUTRAMP
59722+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
59723+ pax_flags |= MF_PAX_EMUTRAMP;
59724+#endif
59725+
59726+#ifdef CONFIG_PAX_MPROTECT
59727+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
59728+ pax_flags |= MF_PAX_MPROTECT;
59729+#endif
59730+
59731+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59732+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
59733+ pax_flags |= MF_PAX_RANDMMAP;
59734+#endif
59735+
59736+ return pax_flags;
59737+}
59738+#endif
59739+
59740+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59741+#ifdef CONFIG_PAX_SOFTMODE
59742+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
59743+{
59744+ unsigned long pax_flags = 0UL;
59745+
59746+#ifdef CONFIG_PAX_PAGEEXEC
59747+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
59748+ pax_flags |= MF_PAX_PAGEEXEC;
59749+#endif
59750+
59751+#ifdef CONFIG_PAX_SEGMEXEC
59752+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
59753+ pax_flags |= MF_PAX_SEGMEXEC;
59754+#endif
59755+
59756+#ifdef CONFIG_PAX_EMUTRAMP
59757+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
59758+ pax_flags |= MF_PAX_EMUTRAMP;
59759+#endif
59760+
59761+#ifdef CONFIG_PAX_MPROTECT
59762+ if (pax_flags_softmode & MF_PAX_MPROTECT)
59763+ pax_flags |= MF_PAX_MPROTECT;
59764+#endif
59765+
59766+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59767+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
59768+ pax_flags |= MF_PAX_RANDMMAP;
59769+#endif
59770+
59771+ return pax_flags;
59772+}
59773+#endif
59774+
59775+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
59776+{
59777+ unsigned long pax_flags = 0UL;
59778+
59779+#ifdef CONFIG_PAX_PAGEEXEC
59780+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
59781+ pax_flags |= MF_PAX_PAGEEXEC;
59782+#endif
59783+
59784+#ifdef CONFIG_PAX_SEGMEXEC
59785+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
59786+ pax_flags |= MF_PAX_SEGMEXEC;
59787+#endif
59788+
59789+#ifdef CONFIG_PAX_EMUTRAMP
59790+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
59791+ pax_flags |= MF_PAX_EMUTRAMP;
59792+#endif
59793+
59794+#ifdef CONFIG_PAX_MPROTECT
59795+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
59796+ pax_flags |= MF_PAX_MPROTECT;
59797+#endif
59798+
59799+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59800+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
59801+ pax_flags |= MF_PAX_RANDMMAP;
59802+#endif
59803+
59804+ return pax_flags;
59805+}
59806+#endif
59807+
59808+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59809+static unsigned long pax_parse_defaults(void)
59810+{
59811+ unsigned long pax_flags = 0UL;
59812+
59813+#ifdef CONFIG_PAX_SOFTMODE
59814+ if (pax_softmode)
59815+ return pax_flags;
59816+#endif
59817+
59818+#ifdef CONFIG_PAX_PAGEEXEC
59819+ pax_flags |= MF_PAX_PAGEEXEC;
59820+#endif
59821+
59822+#ifdef CONFIG_PAX_SEGMEXEC
59823+ pax_flags |= MF_PAX_SEGMEXEC;
59824+#endif
59825+
59826+#ifdef CONFIG_PAX_MPROTECT
59827+ pax_flags |= MF_PAX_MPROTECT;
59828+#endif
59829+
59830+#ifdef CONFIG_PAX_RANDMMAP
59831+ if (randomize_va_space)
59832+ pax_flags |= MF_PAX_RANDMMAP;
59833+#endif
59834+
59835+ return pax_flags;
59836+}
59837+
59838+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
59839+{
59840+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
59841+
59842+#ifdef CONFIG_PAX_EI_PAX
59843+
59844+#ifdef CONFIG_PAX_SOFTMODE
59845+ if (pax_softmode)
59846+ return pax_flags;
59847+#endif
59848+
59849+ pax_flags = 0UL;
59850+
59851+#ifdef CONFIG_PAX_PAGEEXEC
59852+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
59853+ pax_flags |= MF_PAX_PAGEEXEC;
59854+#endif
59855+
59856+#ifdef CONFIG_PAX_SEGMEXEC
59857+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
59858+ pax_flags |= MF_PAX_SEGMEXEC;
59859+#endif
59860+
59861+#ifdef CONFIG_PAX_EMUTRAMP
59862+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
59863+ pax_flags |= MF_PAX_EMUTRAMP;
59864+#endif
59865+
59866+#ifdef CONFIG_PAX_MPROTECT
59867+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
59868+ pax_flags |= MF_PAX_MPROTECT;
59869+#endif
59870+
59871+#ifdef CONFIG_PAX_ASLR
59872+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
59873+ pax_flags |= MF_PAX_RANDMMAP;
59874+#endif
59875+
59876+#endif
59877+
59878+ return pax_flags;
59879+
59880+}
59881+
59882+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
59883+{
59884+
59885+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59886+ unsigned long i;
59887+
59888+ for (i = 0UL; i < elf_ex->e_phnum; i++)
59889+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
59890+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
59891+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
59892+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
59893+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
59894+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
59895+ return PAX_PARSE_FLAGS_FALLBACK;
59896+
59897+#ifdef CONFIG_PAX_SOFTMODE
59898+ if (pax_softmode)
59899+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
59900+ else
59901+#endif
59902+
59903+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
59904+ break;
59905+ }
59906+#endif
59907+
59908+ return PAX_PARSE_FLAGS_FALLBACK;
59909+}
59910+
59911+static unsigned long pax_parse_xattr_pax(struct file * const file)
59912+{
59913+
59914+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59915+ ssize_t xattr_size, i;
59916+ unsigned char xattr_value[sizeof("pemrs") - 1];
59917+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
59918+
59919+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
59920+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
59921+ return PAX_PARSE_FLAGS_FALLBACK;
59922+
59923+ for (i = 0; i < xattr_size; i++)
59924+ switch (xattr_value[i]) {
59925+ default:
59926+ return PAX_PARSE_FLAGS_FALLBACK;
59927+
59928+#define parse_flag(option1, option2, flag) \
59929+ case option1: \
59930+ if (pax_flags_hardmode & MF_PAX_##flag) \
59931+ return PAX_PARSE_FLAGS_FALLBACK;\
59932+ pax_flags_hardmode |= MF_PAX_##flag; \
59933+ break; \
59934+ case option2: \
59935+ if (pax_flags_softmode & MF_PAX_##flag) \
59936+ return PAX_PARSE_FLAGS_FALLBACK;\
59937+ pax_flags_softmode |= MF_PAX_##flag; \
59938+ break;
59939+
59940+ parse_flag('p', 'P', PAGEEXEC);
59941+ parse_flag('e', 'E', EMUTRAMP);
59942+ parse_flag('m', 'M', MPROTECT);
59943+ parse_flag('r', 'R', RANDMMAP);
59944+ parse_flag('s', 'S', SEGMEXEC);
59945+
59946+#undef parse_flag
59947+ }
59948+
59949+ if (pax_flags_hardmode & pax_flags_softmode)
59950+ return PAX_PARSE_FLAGS_FALLBACK;
59951+
59952+#ifdef CONFIG_PAX_SOFTMODE
59953+ if (pax_softmode)
59954+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
59955+ else
59956+#endif
59957+
59958+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
59959+#else
59960+ return PAX_PARSE_FLAGS_FALLBACK;
59961+#endif
59962+
59963+}
59964+
59965+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
59966+{
59967+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
59968+
59969+ pax_flags = pax_parse_defaults();
59970+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
59971+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
59972+ xattr_pax_flags = pax_parse_xattr_pax(file);
59973+
59974+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59975+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59976+ pt_pax_flags != xattr_pax_flags)
59977+ return -EINVAL;
59978+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59979+ pax_flags = xattr_pax_flags;
59980+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59981+ pax_flags = pt_pax_flags;
59982+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59983+ pax_flags = ei_pax_flags;
59984+
59985+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
59986+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59987+ if ((__supported_pte_mask & _PAGE_NX))
59988+ pax_flags &= ~MF_PAX_SEGMEXEC;
59989+ else
59990+ pax_flags &= ~MF_PAX_PAGEEXEC;
59991+ }
59992+#endif
59993+
59994+ if (0 > pax_check_flags(&pax_flags))
59995+ return -EINVAL;
59996+
59997+ current->mm->pax_flags = pax_flags;
59998+ return 0;
59999+}
60000+#endif
60001+
60002 /*
60003 * These are the functions used to load ELF style executables and shared
60004 * libraries. There is no binary dependent code anywhere else.
60005@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
60006 {
60007 unsigned long random_variable = 0;
60008
60009+#ifdef CONFIG_PAX_RANDUSTACK
60010+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
60011+ return stack_top - current->mm->delta_stack;
60012+#endif
60013+
60014 if ((current->flags & PF_RANDOMIZE) &&
60015 !(current->personality & ADDR_NO_RANDOMIZE)) {
60016 random_variable = (unsigned long) get_random_int();
60017@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
60018 unsigned long load_addr = 0, load_bias = 0;
60019 int load_addr_set = 0;
60020 char * elf_interpreter = NULL;
60021- unsigned long error;
60022+ unsigned long error = 0;
60023 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
60024 unsigned long elf_bss, elf_brk;
60025 int retval, i;
60026@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
60027 struct elfhdr interp_elf_ex;
60028 } *loc;
60029 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
60030+ unsigned long pax_task_size;
60031
60032 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
60033 if (!loc) {
60034@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
60035 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
60036 may depend on the personality. */
60037 SET_PERSONALITY2(loc->elf_ex, &arch_state);
60038+
60039+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60040+ current->mm->pax_flags = 0UL;
60041+#endif
60042+
60043+#ifdef CONFIG_PAX_DLRESOLVE
60044+ current->mm->call_dl_resolve = 0UL;
60045+#endif
60046+
60047+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
60048+ current->mm->call_syscall = 0UL;
60049+#endif
60050+
60051+#ifdef CONFIG_PAX_ASLR
60052+ current->mm->delta_mmap = 0UL;
60053+ current->mm->delta_stack = 0UL;
60054+#endif
60055+
60056+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60057+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
60058+ send_sig(SIGKILL, current, 0);
60059+ goto out_free_dentry;
60060+ }
60061+#endif
60062+
60063+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60064+ pax_set_initial_flags(bprm);
60065+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60066+ if (pax_set_initial_flags_func)
60067+ (pax_set_initial_flags_func)(bprm);
60068+#endif
60069+
60070+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
60071+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
60072+ current->mm->context.user_cs_limit = PAGE_SIZE;
60073+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
60074+ }
60075+#endif
60076+
60077+#ifdef CONFIG_PAX_SEGMEXEC
60078+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
60079+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
60080+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
60081+ pax_task_size = SEGMEXEC_TASK_SIZE;
60082+ current->mm->def_flags |= VM_NOHUGEPAGE;
60083+ } else
60084+#endif
60085+
60086+ pax_task_size = TASK_SIZE;
60087+
60088+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
60089+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60090+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
60091+ put_cpu();
60092+ }
60093+#endif
60094+
60095+#ifdef CONFIG_PAX_ASLR
60096+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
60097+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
60098+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
60099+ }
60100+#endif
60101+
60102+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
60103+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60104+ executable_stack = EXSTACK_DISABLE_X;
60105+ current->personality &= ~READ_IMPLIES_EXEC;
60106+ } else
60107+#endif
60108+
60109 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
60110 current->personality |= READ_IMPLIES_EXEC;
60111
60112@@ -925,12 +1364,21 @@ static int load_elf_binary(struct linux_binprm *bprm)
60113 #else
60114 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
60115 #endif
60116- total_size = total_mapping_size(elf_phdata,
60117- loc->elf_ex.e_phnum);
60118- if (!total_size) {
60119- error = -EINVAL;
60120- goto out_free_dentry;
60121+
60122+#ifdef CONFIG_PAX_RANDMMAP
60123+ /* PaX: randomize base address at the default exe base if requested */
60124+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
60125+#ifdef CONFIG_SPARC64
60126+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
60127+#else
60128+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
60129+#endif
60130+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
60131+ elf_flags |= MAP_FIXED;
60132 }
60133+#endif
60134+
60135+ total_size = total_mapping_size(elf_phdata, loc->elf_ex.e_phnum);
60136 }
60137
60138 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
60139@@ -962,9 +1410,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
60140 * allowed task size. Note that p_filesz must always be
60141 * <= p_memsz so it is only necessary to check p_memsz.
60142 */
60143- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
60144- elf_ppnt->p_memsz > TASK_SIZE ||
60145- TASK_SIZE - elf_ppnt->p_memsz < k) {
60146+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
60147+ elf_ppnt->p_memsz > pax_task_size ||
60148+ pax_task_size - elf_ppnt->p_memsz < k) {
60149 /* set_brk can never work. Avoid overflows. */
60150 retval = -EINVAL;
60151 goto out_free_dentry;
60152@@ -1000,16 +1448,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
60153 if (retval)
60154 goto out_free_dentry;
60155 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
60156- retval = -EFAULT; /* Nobody gets to see this, but.. */
60157- goto out_free_dentry;
60158+ /*
60159+ * This bss-zeroing can fail if the ELF
60160+ * file specifies odd protections. So
60161+ * we don't check the return value
60162+ */
60163 }
60164
60165+#ifdef CONFIG_PAX_RANDMMAP
60166+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
60167+ unsigned long start, size, flags;
60168+ vm_flags_t vm_flags;
60169+
60170+ start = ELF_PAGEALIGN(elf_brk);
60171+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
60172+ flags = MAP_FIXED | MAP_PRIVATE;
60173+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
60174+
60175+ down_write(&current->mm->mmap_sem);
60176+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
60177+ retval = -ENOMEM;
60178+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
60179+// if (current->personality & ADDR_NO_RANDOMIZE)
60180+// vm_flags |= VM_READ | VM_MAYREAD;
60181+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
60182+ retval = IS_ERR_VALUE(start) ? start : 0;
60183+ }
60184+ up_write(&current->mm->mmap_sem);
60185+ if (retval == 0)
60186+ retval = set_brk(start + size, start + size + PAGE_SIZE);
60187+ if (retval < 0)
60188+ goto out_free_dentry;
60189+ }
60190+#endif
60191+
60192 if (elf_interpreter) {
60193- unsigned long interp_map_addr = 0;
60194-
60195 elf_entry = load_elf_interp(&loc->interp_elf_ex,
60196 interpreter,
60197- &interp_map_addr,
60198 load_bias, interp_elf_phdata);
60199 if (!IS_ERR((void *)elf_entry)) {
60200 /*
60201@@ -1237,7 +1712,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
60202 * Decide what to dump of a segment, part, all or none.
60203 */
60204 static unsigned long vma_dump_size(struct vm_area_struct *vma,
60205- unsigned long mm_flags)
60206+ unsigned long mm_flags, long signr)
60207 {
60208 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
60209
60210@@ -1275,7 +1750,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
60211 if (vma->vm_file == NULL)
60212 return 0;
60213
60214- if (FILTER(MAPPED_PRIVATE))
60215+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
60216 goto whole;
60217
60218 /*
60219@@ -1482,9 +1957,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
60220 {
60221 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
60222 int i = 0;
60223- do
60224+ do {
60225 i += 2;
60226- while (auxv[i - 2] != AT_NULL);
60227+ } while (auxv[i - 2] != AT_NULL);
60228 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
60229 }
60230
60231@@ -1493,7 +1968,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
60232 {
60233 mm_segment_t old_fs = get_fs();
60234 set_fs(KERNEL_DS);
60235- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
60236+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
60237 set_fs(old_fs);
60238 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
60239 }
60240@@ -2213,7 +2688,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60241 vma = next_vma(vma, gate_vma)) {
60242 unsigned long dump_size;
60243
60244- dump_size = vma_dump_size(vma, cprm->mm_flags);
60245+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60246 vma_filesz[i++] = dump_size;
60247 vma_data_size += dump_size;
60248 }
60249@@ -2321,6 +2796,167 @@ out:
60250
60251 #endif /* CONFIG_ELF_CORE */
60252
60253+#ifdef CONFIG_PAX_MPROTECT
60254+/* PaX: non-PIC ELF libraries need relocations on their executable segments
60255+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
60256+ * we'll remove VM_MAYWRITE for good on RELRO segments.
60257+ *
60258+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
60259+ * basis because we want to allow the common case and not the special ones.
60260+ */
60261+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
60262+{
60263+ struct elfhdr elf_h;
60264+ struct elf_phdr elf_p;
60265+ unsigned long i;
60266+ unsigned long oldflags;
60267+ bool is_textrel_rw, is_textrel_rx, is_relro;
60268+
60269+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
60270+ return;
60271+
60272+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
60273+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
60274+
60275+#ifdef CONFIG_PAX_ELFRELOCS
60276+ /* possible TEXTREL */
60277+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
60278+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
60279+#else
60280+ is_textrel_rw = false;
60281+ is_textrel_rx = false;
60282+#endif
60283+
60284+ /* possible RELRO */
60285+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
60286+
60287+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
60288+ return;
60289+
60290+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60291+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60292+
60293+#ifdef CONFIG_PAX_ETEXECRELOCS
60294+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60295+#else
60296+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
60297+#endif
60298+
60299+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60300+ !elf_check_arch(&elf_h) ||
60301+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60302+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60303+ return;
60304+
60305+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60306+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60307+ return;
60308+ switch (elf_p.p_type) {
60309+ case PT_DYNAMIC:
60310+ if (!is_textrel_rw && !is_textrel_rx)
60311+ continue;
60312+ i = 0UL;
60313+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
60314+ elf_dyn dyn;
60315+
60316+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
60317+ break;
60318+ if (dyn.d_tag == DT_NULL)
60319+ break;
60320+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
60321+ gr_log_textrel(vma);
60322+ if (is_textrel_rw)
60323+ vma->vm_flags |= VM_MAYWRITE;
60324+ else
60325+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
60326+ vma->vm_flags &= ~VM_MAYWRITE;
60327+ break;
60328+ }
60329+ i++;
60330+ }
60331+ is_textrel_rw = false;
60332+ is_textrel_rx = false;
60333+ continue;
60334+
60335+ case PT_GNU_RELRO:
60336+ if (!is_relro)
60337+ continue;
60338+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
60339+ vma->vm_flags &= ~VM_MAYWRITE;
60340+ is_relro = false;
60341+ continue;
60342+
60343+#ifdef CONFIG_PAX_PT_PAX_FLAGS
60344+ case PT_PAX_FLAGS: {
60345+ const char *msg_mprotect = "", *msg_emutramp = "";
60346+ char *buffer_lib, *buffer_exe;
60347+
60348+ if (elf_p.p_flags & PF_NOMPROTECT)
60349+ msg_mprotect = "MPROTECT disabled";
60350+
60351+#ifdef CONFIG_PAX_EMUTRAMP
60352+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
60353+ msg_emutramp = "EMUTRAMP enabled";
60354+#endif
60355+
60356+ if (!msg_mprotect[0] && !msg_emutramp[0])
60357+ continue;
60358+
60359+ if (!printk_ratelimit())
60360+ continue;
60361+
60362+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
60363+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
60364+ if (buffer_lib && buffer_exe) {
60365+ char *path_lib, *path_exe;
60366+
60367+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
60368+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
60369+
60370+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
60371+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
60372+
60373+ }
60374+ free_page((unsigned long)buffer_exe);
60375+ free_page((unsigned long)buffer_lib);
60376+ continue;
60377+ }
60378+#endif
60379+
60380+ }
60381+ }
60382+}
60383+#endif
60384+
60385+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60386+
60387+extern int grsec_enable_log_rwxmaps;
60388+
60389+static void elf_handle_mmap(struct file *file)
60390+{
60391+ struct elfhdr elf_h;
60392+ struct elf_phdr elf_p;
60393+ unsigned long i;
60394+
60395+ if (!grsec_enable_log_rwxmaps)
60396+ return;
60397+
60398+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60399+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60400+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
60401+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60402+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60403+ return;
60404+
60405+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60406+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60407+ return;
60408+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
60409+ gr_log_ptgnustack(file);
60410+ }
60411+}
60412+#endif
60413+
60414 static int __init init_elf_binfmt(void)
60415 {
60416 register_binfmt(&elf_format);
60417diff --git a/fs/block_dev.c b/fs/block_dev.c
60418index 975266b..c3d1856 100644
60419--- a/fs/block_dev.c
60420+++ b/fs/block_dev.c
60421@@ -734,7 +734,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
60422 else if (bdev->bd_contains == bdev)
60423 return true; /* is a whole device which isn't held */
60424
60425- else if (whole->bd_holder == bd_may_claim)
60426+ else if (whole->bd_holder == (void *)bd_may_claim)
60427 return true; /* is a partition of a device that is being partitioned */
60428 else if (whole->bd_holder != NULL)
60429 return false; /* is a partition of a held device */
60430diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
60431index 6d67f32..8f33187 100644
60432--- a/fs/btrfs/ctree.c
60433+++ b/fs/btrfs/ctree.c
60434@@ -1181,9 +1181,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
60435 free_extent_buffer(buf);
60436 add_root_to_dirty_list(root);
60437 } else {
60438- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
60439- parent_start = parent->start;
60440- else
60441+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
60442+ if (parent)
60443+ parent_start = parent->start;
60444+ else
60445+ parent_start = 0;
60446+ } else
60447 parent_start = 0;
60448
60449 WARN_ON(trans->transid != btrfs_header_generation(parent));
60450diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
60451index 82f0c7c..dff78a8 100644
60452--- a/fs/btrfs/delayed-inode.c
60453+++ b/fs/btrfs/delayed-inode.c
60454@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
60455
60456 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
60457 {
60458- int seq = atomic_inc_return(&delayed_root->items_seq);
60459+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
60460 if ((atomic_dec_return(&delayed_root->items) <
60461 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
60462 waitqueue_active(&delayed_root->wait))
60463@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
60464
60465 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
60466 {
60467- int val = atomic_read(&delayed_root->items_seq);
60468+ int val = atomic_read_unchecked(&delayed_root->items_seq);
60469
60470 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
60471 return 1;
60472@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
60473 int seq;
60474 int ret;
60475
60476- seq = atomic_read(&delayed_root->items_seq);
60477+ seq = atomic_read_unchecked(&delayed_root->items_seq);
60478
60479 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
60480 if (ret)
60481diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
60482index f70119f..ab5894d 100644
60483--- a/fs/btrfs/delayed-inode.h
60484+++ b/fs/btrfs/delayed-inode.h
60485@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
60486 */
60487 struct list_head prepare_list;
60488 atomic_t items; /* for delayed items */
60489- atomic_t items_seq; /* for delayed items */
60490+ atomic_unchecked_t items_seq; /* for delayed items */
60491 int nodes; /* for delayed nodes */
60492 wait_queue_head_t wait;
60493 };
60494@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
60495 struct btrfs_delayed_root *delayed_root)
60496 {
60497 atomic_set(&delayed_root->items, 0);
60498- atomic_set(&delayed_root->items_seq, 0);
60499+ atomic_set_unchecked(&delayed_root->items_seq, 0);
60500 delayed_root->nodes = 0;
60501 spin_lock_init(&delayed_root->lock);
60502 init_waitqueue_head(&delayed_root->wait);
60503diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
60504index 05fef19..f3774b8 100644
60505--- a/fs/btrfs/super.c
60506+++ b/fs/btrfs/super.c
60507@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
60508 function, line, errstr);
60509 return;
60510 }
60511- ACCESS_ONCE(trans->transaction->aborted) = errno;
60512+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
60513 /* Wake up anybody who may be waiting on this transaction */
60514 wake_up(&root->fs_info->transaction_wait);
60515 wake_up(&root->fs_info->transaction_blocked_wait);
60516diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
60517index 94edb0a..e94dc93 100644
60518--- a/fs/btrfs/sysfs.c
60519+++ b/fs/btrfs/sysfs.c
60520@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
60521 for (set = 0; set < FEAT_MAX; set++) {
60522 int i;
60523 struct attribute *attrs[2];
60524- struct attribute_group agroup = {
60525+ attribute_group_no_const agroup = {
60526 .name = "features",
60527 .attrs = attrs,
60528 };
60529diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
60530index 2299bfd..4098e72 100644
60531--- a/fs/btrfs/tests/free-space-tests.c
60532+++ b/fs/btrfs/tests/free-space-tests.c
60533@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
60534 * extent entry.
60535 */
60536 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
60537- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
60538+ pax_open_kernel();
60539+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
60540+ pax_close_kernel();
60541
60542 /*
60543 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
60544@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
60545 if (ret)
60546 return ret;
60547
60548- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
60549+ pax_open_kernel();
60550+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
60551+ pax_close_kernel();
60552 __btrfs_remove_free_space_cache(cache->free_space_ctl);
60553
60554 return 0;
60555diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
60556index 154990c..d0cf699 100644
60557--- a/fs/btrfs/tree-log.h
60558+++ b/fs/btrfs/tree-log.h
60559@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
60560 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
60561 struct btrfs_trans_handle *trans)
60562 {
60563- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
60564+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
60565 }
60566
60567 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
60568diff --git a/fs/buffer.c b/fs/buffer.c
60569index 20805db..2e8fc69 100644
60570--- a/fs/buffer.c
60571+++ b/fs/buffer.c
60572@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
60573 bh_cachep = kmem_cache_create("buffer_head",
60574 sizeof(struct buffer_head), 0,
60575 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
60576- SLAB_MEM_SPREAD),
60577+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
60578 NULL);
60579
60580 /*
60581diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
60582index fbb08e9..0fda764 100644
60583--- a/fs/cachefiles/bind.c
60584+++ b/fs/cachefiles/bind.c
60585@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
60586 args);
60587
60588 /* start by checking things over */
60589- ASSERT(cache->fstop_percent >= 0 &&
60590- cache->fstop_percent < cache->fcull_percent &&
60591+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
60592 cache->fcull_percent < cache->frun_percent &&
60593 cache->frun_percent < 100);
60594
60595- ASSERT(cache->bstop_percent >= 0 &&
60596- cache->bstop_percent < cache->bcull_percent &&
60597+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
60598 cache->bcull_percent < cache->brun_percent &&
60599 cache->brun_percent < 100);
60600
60601diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
60602index f601def..b2cf704 100644
60603--- a/fs/cachefiles/daemon.c
60604+++ b/fs/cachefiles/daemon.c
60605@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
60606 if (n > buflen)
60607 return -EMSGSIZE;
60608
60609- if (copy_to_user(_buffer, buffer, n) != 0)
60610+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
60611 return -EFAULT;
60612
60613 return n;
60614@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
60615 if (test_bit(CACHEFILES_DEAD, &cache->flags))
60616 return -EIO;
60617
60618- if (datalen < 0 || datalen > PAGE_SIZE - 1)
60619+ if (datalen > PAGE_SIZE - 1)
60620 return -EOPNOTSUPP;
60621
60622 /* drag the command string into the kernel so we can parse it */
60623@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
60624 if (args[0] != '%' || args[1] != '\0')
60625 return -EINVAL;
60626
60627- if (fstop < 0 || fstop >= cache->fcull_percent)
60628+ if (fstop >= cache->fcull_percent)
60629 return cachefiles_daemon_range_error(cache, args);
60630
60631 cache->fstop_percent = fstop;
60632@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
60633 if (args[0] != '%' || args[1] != '\0')
60634 return -EINVAL;
60635
60636- if (bstop < 0 || bstop >= cache->bcull_percent)
60637+ if (bstop >= cache->bcull_percent)
60638 return cachefiles_daemon_range_error(cache, args);
60639
60640 cache->bstop_percent = bstop;
60641diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
60642index 8c52472..c4e3a69 100644
60643--- a/fs/cachefiles/internal.h
60644+++ b/fs/cachefiles/internal.h
60645@@ -66,7 +66,7 @@ struct cachefiles_cache {
60646 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
60647 struct rb_root active_nodes; /* active nodes (can't be culled) */
60648 rwlock_t active_lock; /* lock for active_nodes */
60649- atomic_t gravecounter; /* graveyard uniquifier */
60650+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
60651 unsigned frun_percent; /* when to stop culling (% files) */
60652 unsigned fcull_percent; /* when to start culling (% files) */
60653 unsigned fstop_percent; /* when to stop allocating (% files) */
60654@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
60655 * proc.c
60656 */
60657 #ifdef CONFIG_CACHEFILES_HISTOGRAM
60658-extern atomic_t cachefiles_lookup_histogram[HZ];
60659-extern atomic_t cachefiles_mkdir_histogram[HZ];
60660-extern atomic_t cachefiles_create_histogram[HZ];
60661+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
60662+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
60663+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
60664
60665 extern int __init cachefiles_proc_init(void);
60666 extern void cachefiles_proc_cleanup(void);
60667 static inline
60668-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
60669+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
60670 {
60671 unsigned long jif = jiffies - start_jif;
60672 if (jif >= HZ)
60673 jif = HZ - 1;
60674- atomic_inc(&histogram[jif]);
60675+ atomic_inc_unchecked(&histogram[jif]);
60676 }
60677
60678 #else
60679diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
60680index 1e51714..411eded 100644
60681--- a/fs/cachefiles/namei.c
60682+++ b/fs/cachefiles/namei.c
60683@@ -309,7 +309,7 @@ try_again:
60684 /* first step is to make up a grave dentry in the graveyard */
60685 sprintf(nbuffer, "%08x%08x",
60686 (uint32_t) get_seconds(),
60687- (uint32_t) atomic_inc_return(&cache->gravecounter));
60688+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
60689
60690 /* do the multiway lock magic */
60691 trap = lock_rename(cache->graveyard, dir);
60692diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
60693index eccd339..4c1d995 100644
60694--- a/fs/cachefiles/proc.c
60695+++ b/fs/cachefiles/proc.c
60696@@ -14,9 +14,9 @@
60697 #include <linux/seq_file.h>
60698 #include "internal.h"
60699
60700-atomic_t cachefiles_lookup_histogram[HZ];
60701-atomic_t cachefiles_mkdir_histogram[HZ];
60702-atomic_t cachefiles_create_histogram[HZ];
60703+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
60704+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
60705+atomic_unchecked_t cachefiles_create_histogram[HZ];
60706
60707 /*
60708 * display the latency histogram
60709@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
60710 return 0;
60711 default:
60712 index = (unsigned long) v - 3;
60713- x = atomic_read(&cachefiles_lookup_histogram[index]);
60714- y = atomic_read(&cachefiles_mkdir_histogram[index]);
60715- z = atomic_read(&cachefiles_create_histogram[index]);
60716+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
60717+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
60718+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
60719 if (x == 0 && y == 0 && z == 0)
60720 return 0;
60721
60722diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
60723index 83e9976..bfd1eee 100644
60724--- a/fs/ceph/dir.c
60725+++ b/fs/ceph/dir.c
60726@@ -127,6 +127,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
60727 struct dentry *dentry, *last;
60728 struct ceph_dentry_info *di;
60729 int err = 0;
60730+ char d_name[DNAME_INLINE_LEN];
60731+ const unsigned char *name;
60732
60733 /* claim ref on last dentry we returned */
60734 last = fi->dentry;
60735@@ -190,7 +192,12 @@ more:
60736
60737 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
60738 dentry, dentry, dentry->d_inode);
60739- if (!dir_emit(ctx, dentry->d_name.name,
60740+ name = dentry->d_name.name;
60741+ if (name == dentry->d_iname) {
60742+ memcpy(d_name, name, dentry->d_name.len);
60743+ name = d_name;
60744+ }
60745+ if (!dir_emit(ctx, name,
60746 dentry->d_name.len,
60747 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
60748 dentry->d_inode->i_mode >> 12)) {
60749@@ -248,7 +255,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
60750 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
60751 struct ceph_mds_client *mdsc = fsc->mdsc;
60752 unsigned frag = fpos_frag(ctx->pos);
60753- int off = fpos_off(ctx->pos);
60754+ unsigned int off = fpos_off(ctx->pos);
60755 int err;
60756 u32 ftype;
60757 struct ceph_mds_reply_info_parsed *rinfo;
60758diff --git a/fs/ceph/super.c b/fs/ceph/super.c
60759index a63997b..ddc0577 100644
60760--- a/fs/ceph/super.c
60761+++ b/fs/ceph/super.c
60762@@ -889,7 +889,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
60763 /*
60764 * construct our own bdi so we can control readahead, etc.
60765 */
60766-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
60767+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
60768
60769 static int ceph_register_bdi(struct super_block *sb,
60770 struct ceph_fs_client *fsc)
60771@@ -906,7 +906,7 @@ static int ceph_register_bdi(struct super_block *sb,
60772 VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
60773
60774 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
60775- atomic_long_inc_return(&bdi_seq));
60776+ atomic_long_inc_return_unchecked(&bdi_seq));
60777 if (!err)
60778 sb->s_bdi = &fsc->backing_dev_info;
60779 return err;
60780diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
60781index 7febcf2..62a5721 100644
60782--- a/fs/cifs/cifs_debug.c
60783+++ b/fs/cifs/cifs_debug.c
60784@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
60785
60786 if (strtobool(&c, &bv) == 0) {
60787 #ifdef CONFIG_CIFS_STATS2
60788- atomic_set(&totBufAllocCount, 0);
60789- atomic_set(&totSmBufAllocCount, 0);
60790+ atomic_set_unchecked(&totBufAllocCount, 0);
60791+ atomic_set_unchecked(&totSmBufAllocCount, 0);
60792 #endif /* CONFIG_CIFS_STATS2 */
60793 spin_lock(&cifs_tcp_ses_lock);
60794 list_for_each(tmp1, &cifs_tcp_ses_list) {
60795@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
60796 tcon = list_entry(tmp3,
60797 struct cifs_tcon,
60798 tcon_list);
60799- atomic_set(&tcon->num_smbs_sent, 0);
60800+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
60801 if (server->ops->clear_stats)
60802 server->ops->clear_stats(tcon);
60803 }
60804@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
60805 smBufAllocCount.counter, cifs_min_small);
60806 #ifdef CONFIG_CIFS_STATS2
60807 seq_printf(m, "Total Large %d Small %d Allocations\n",
60808- atomic_read(&totBufAllocCount),
60809- atomic_read(&totSmBufAllocCount));
60810+ atomic_read_unchecked(&totBufAllocCount),
60811+ atomic_read_unchecked(&totSmBufAllocCount));
60812 #endif /* CONFIG_CIFS_STATS2 */
60813
60814 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
60815@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
60816 if (tcon->need_reconnect)
60817 seq_puts(m, "\tDISCONNECTED ");
60818 seq_printf(m, "\nSMBs: %d",
60819- atomic_read(&tcon->num_smbs_sent));
60820+ atomic_read_unchecked(&tcon->num_smbs_sent));
60821 if (server->ops->print_stats)
60822 server->ops->print_stats(m, tcon);
60823 }
60824diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
60825index d72fe37..ded5511 100644
60826--- a/fs/cifs/cifsfs.c
60827+++ b/fs/cifs/cifsfs.c
60828@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
60829 */
60830 cifs_req_cachep = kmem_cache_create("cifs_request",
60831 CIFSMaxBufSize + max_hdr_size, 0,
60832- SLAB_HWCACHE_ALIGN, NULL);
60833+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
60834 if (cifs_req_cachep == NULL)
60835 return -ENOMEM;
60836
60837@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
60838 efficient to alloc 1 per page off the slab compared to 17K (5page)
60839 alloc of large cifs buffers even when page debugging is on */
60840 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
60841- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
60842+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
60843 NULL);
60844 if (cifs_sm_req_cachep == NULL) {
60845 mempool_destroy(cifs_req_poolp);
60846@@ -1204,8 +1204,8 @@ init_cifs(void)
60847 atomic_set(&bufAllocCount, 0);
60848 atomic_set(&smBufAllocCount, 0);
60849 #ifdef CONFIG_CIFS_STATS2
60850- atomic_set(&totBufAllocCount, 0);
60851- atomic_set(&totSmBufAllocCount, 0);
60852+ atomic_set_unchecked(&totBufAllocCount, 0);
60853+ atomic_set_unchecked(&totSmBufAllocCount, 0);
60854 #endif /* CONFIG_CIFS_STATS2 */
60855
60856 atomic_set(&midCount, 0);
60857diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
60858index 22b289a..bbbba08 100644
60859--- a/fs/cifs/cifsglob.h
60860+++ b/fs/cifs/cifsglob.h
60861@@ -823,35 +823,35 @@ struct cifs_tcon {
60862 __u16 Flags; /* optional support bits */
60863 enum statusEnum tidStatus;
60864 #ifdef CONFIG_CIFS_STATS
60865- atomic_t num_smbs_sent;
60866+ atomic_unchecked_t num_smbs_sent;
60867 union {
60868 struct {
60869- atomic_t num_writes;
60870- atomic_t num_reads;
60871- atomic_t num_flushes;
60872- atomic_t num_oplock_brks;
60873- atomic_t num_opens;
60874- atomic_t num_closes;
60875- atomic_t num_deletes;
60876- atomic_t num_mkdirs;
60877- atomic_t num_posixopens;
60878- atomic_t num_posixmkdirs;
60879- atomic_t num_rmdirs;
60880- atomic_t num_renames;
60881- atomic_t num_t2renames;
60882- atomic_t num_ffirst;
60883- atomic_t num_fnext;
60884- atomic_t num_fclose;
60885- atomic_t num_hardlinks;
60886- atomic_t num_symlinks;
60887- atomic_t num_locks;
60888- atomic_t num_acl_get;
60889- atomic_t num_acl_set;
60890+ atomic_unchecked_t num_writes;
60891+ atomic_unchecked_t num_reads;
60892+ atomic_unchecked_t num_flushes;
60893+ atomic_unchecked_t num_oplock_brks;
60894+ atomic_unchecked_t num_opens;
60895+ atomic_unchecked_t num_closes;
60896+ atomic_unchecked_t num_deletes;
60897+ atomic_unchecked_t num_mkdirs;
60898+ atomic_unchecked_t num_posixopens;
60899+ atomic_unchecked_t num_posixmkdirs;
60900+ atomic_unchecked_t num_rmdirs;
60901+ atomic_unchecked_t num_renames;
60902+ atomic_unchecked_t num_t2renames;
60903+ atomic_unchecked_t num_ffirst;
60904+ atomic_unchecked_t num_fnext;
60905+ atomic_unchecked_t num_fclose;
60906+ atomic_unchecked_t num_hardlinks;
60907+ atomic_unchecked_t num_symlinks;
60908+ atomic_unchecked_t num_locks;
60909+ atomic_unchecked_t num_acl_get;
60910+ atomic_unchecked_t num_acl_set;
60911 } cifs_stats;
60912 #ifdef CONFIG_CIFS_SMB2
60913 struct {
60914- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60915- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60916+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60917+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60918 } smb2_stats;
60919 #endif /* CONFIG_CIFS_SMB2 */
60920 } stats;
60921@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
60922 }
60923
60924 #ifdef CONFIG_CIFS_STATS
60925-#define cifs_stats_inc atomic_inc
60926+#define cifs_stats_inc atomic_inc_unchecked
60927
60928 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
60929 unsigned int bytes)
60930@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
60931 /* Various Debug counters */
60932 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
60933 #ifdef CONFIG_CIFS_STATS2
60934-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
60935-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
60936+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
60937+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
60938 #endif
60939 GLOBAL_EXTERN atomic_t smBufAllocCount;
60940 GLOBAL_EXTERN atomic_t midCount;
60941diff --git a/fs/cifs/file.c b/fs/cifs/file.c
60942index ca30c39..570fb94 100644
60943--- a/fs/cifs/file.c
60944+++ b/fs/cifs/file.c
60945@@ -2055,10 +2055,14 @@ static int cifs_writepages(struct address_space *mapping,
60946 index = mapping->writeback_index; /* Start from prev offset */
60947 end = -1;
60948 } else {
60949- index = wbc->range_start >> PAGE_CACHE_SHIFT;
60950- end = wbc->range_end >> PAGE_CACHE_SHIFT;
60951- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
60952+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
60953 range_whole = true;
60954+ index = 0;
60955+ end = ULONG_MAX;
60956+ } else {
60957+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
60958+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
60959+ }
60960 scanned = true;
60961 }
60962 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
60963diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
60964index 3379463..3af418a 100644
60965--- a/fs/cifs/misc.c
60966+++ b/fs/cifs/misc.c
60967@@ -170,7 +170,7 @@ cifs_buf_get(void)
60968 memset(ret_buf, 0, buf_size + 3);
60969 atomic_inc(&bufAllocCount);
60970 #ifdef CONFIG_CIFS_STATS2
60971- atomic_inc(&totBufAllocCount);
60972+ atomic_inc_unchecked(&totBufAllocCount);
60973 #endif /* CONFIG_CIFS_STATS2 */
60974 }
60975
60976@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
60977 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
60978 atomic_inc(&smBufAllocCount);
60979 #ifdef CONFIG_CIFS_STATS2
60980- atomic_inc(&totSmBufAllocCount);
60981+ atomic_inc_unchecked(&totSmBufAllocCount);
60982 #endif /* CONFIG_CIFS_STATS2 */
60983
60984 }
60985diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
60986index d297903..1cb7516 100644
60987--- a/fs/cifs/smb1ops.c
60988+++ b/fs/cifs/smb1ops.c
60989@@ -622,27 +622,27 @@ static void
60990 cifs_clear_stats(struct cifs_tcon *tcon)
60991 {
60992 #ifdef CONFIG_CIFS_STATS
60993- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
60994- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
60995- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
60996- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
60997- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
60998- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
60999- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
61000- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
61001- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
61002- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
61003- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
61004- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
61005- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
61006- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
61007- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
61008- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
61009- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
61010- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
61011- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
61012- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
61013- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
61014+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
61015+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
61016+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
61017+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
61018+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
61019+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
61020+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
61021+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
61022+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
61023+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
61024+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
61025+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
61026+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
61027+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
61028+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
61029+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
61030+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
61031+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
61032+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
61033+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
61034+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
61035 #endif
61036 }
61037
61038@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
61039 {
61040 #ifdef CONFIG_CIFS_STATS
61041 seq_printf(m, " Oplocks breaks: %d",
61042- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
61043+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
61044 seq_printf(m, "\nReads: %d Bytes: %llu",
61045- atomic_read(&tcon->stats.cifs_stats.num_reads),
61046+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
61047 (long long)(tcon->bytes_read));
61048 seq_printf(m, "\nWrites: %d Bytes: %llu",
61049- atomic_read(&tcon->stats.cifs_stats.num_writes),
61050+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
61051 (long long)(tcon->bytes_written));
61052 seq_printf(m, "\nFlushes: %d",
61053- atomic_read(&tcon->stats.cifs_stats.num_flushes));
61054+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
61055 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
61056- atomic_read(&tcon->stats.cifs_stats.num_locks),
61057- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
61058- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
61059+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
61060+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
61061+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
61062 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
61063- atomic_read(&tcon->stats.cifs_stats.num_opens),
61064- atomic_read(&tcon->stats.cifs_stats.num_closes),
61065- atomic_read(&tcon->stats.cifs_stats.num_deletes));
61066+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
61067+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
61068+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
61069 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
61070- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
61071- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
61072+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
61073+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
61074 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
61075- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
61076- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
61077+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
61078+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
61079 seq_printf(m, "\nRenames: %d T2 Renames %d",
61080- atomic_read(&tcon->stats.cifs_stats.num_renames),
61081- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
61082+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
61083+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
61084 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
61085- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
61086- atomic_read(&tcon->stats.cifs_stats.num_fnext),
61087- atomic_read(&tcon->stats.cifs_stats.num_fclose));
61088+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
61089+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
61090+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
61091 #endif
61092 }
61093
61094diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
61095index eab05e1..ffe5ea4 100644
61096--- a/fs/cifs/smb2ops.c
61097+++ b/fs/cifs/smb2ops.c
61098@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
61099 #ifdef CONFIG_CIFS_STATS
61100 int i;
61101 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
61102- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61103- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61104+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61105+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61106 }
61107 #endif
61108 }
61109@@ -459,65 +459,65 @@ static void
61110 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
61111 {
61112 #ifdef CONFIG_CIFS_STATS
61113- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61114- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61115+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61116+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61117 seq_printf(m, "\nNegotiates: %d sent %d failed",
61118- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
61119- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
61120+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
61121+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
61122 seq_printf(m, "\nSessionSetups: %d sent %d failed",
61123- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
61124- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
61125+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
61126+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
61127 seq_printf(m, "\nLogoffs: %d sent %d failed",
61128- atomic_read(&sent[SMB2_LOGOFF_HE]),
61129- atomic_read(&failed[SMB2_LOGOFF_HE]));
61130+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
61131+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
61132 seq_printf(m, "\nTreeConnects: %d sent %d failed",
61133- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
61134- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
61135+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
61136+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
61137 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
61138- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
61139- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
61140+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
61141+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
61142 seq_printf(m, "\nCreates: %d sent %d failed",
61143- atomic_read(&sent[SMB2_CREATE_HE]),
61144- atomic_read(&failed[SMB2_CREATE_HE]));
61145+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
61146+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
61147 seq_printf(m, "\nCloses: %d sent %d failed",
61148- atomic_read(&sent[SMB2_CLOSE_HE]),
61149- atomic_read(&failed[SMB2_CLOSE_HE]));
61150+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
61151+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
61152 seq_printf(m, "\nFlushes: %d sent %d failed",
61153- atomic_read(&sent[SMB2_FLUSH_HE]),
61154- atomic_read(&failed[SMB2_FLUSH_HE]));
61155+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
61156+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
61157 seq_printf(m, "\nReads: %d sent %d failed",
61158- atomic_read(&sent[SMB2_READ_HE]),
61159- atomic_read(&failed[SMB2_READ_HE]));
61160+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
61161+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
61162 seq_printf(m, "\nWrites: %d sent %d failed",
61163- atomic_read(&sent[SMB2_WRITE_HE]),
61164- atomic_read(&failed[SMB2_WRITE_HE]));
61165+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
61166+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
61167 seq_printf(m, "\nLocks: %d sent %d failed",
61168- atomic_read(&sent[SMB2_LOCK_HE]),
61169- atomic_read(&failed[SMB2_LOCK_HE]));
61170+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
61171+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
61172 seq_printf(m, "\nIOCTLs: %d sent %d failed",
61173- atomic_read(&sent[SMB2_IOCTL_HE]),
61174- atomic_read(&failed[SMB2_IOCTL_HE]));
61175+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
61176+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
61177 seq_printf(m, "\nCancels: %d sent %d failed",
61178- atomic_read(&sent[SMB2_CANCEL_HE]),
61179- atomic_read(&failed[SMB2_CANCEL_HE]));
61180+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
61181+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
61182 seq_printf(m, "\nEchos: %d sent %d failed",
61183- atomic_read(&sent[SMB2_ECHO_HE]),
61184- atomic_read(&failed[SMB2_ECHO_HE]));
61185+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
61186+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
61187 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
61188- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
61189- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
61190+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
61191+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
61192 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
61193- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
61194- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
61195+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
61196+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
61197 seq_printf(m, "\nQueryInfos: %d sent %d failed",
61198- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
61199- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
61200+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
61201+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
61202 seq_printf(m, "\nSetInfos: %d sent %d failed",
61203- atomic_read(&sent[SMB2_SET_INFO_HE]),
61204- atomic_read(&failed[SMB2_SET_INFO_HE]));
61205+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
61206+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
61207 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
61208- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
61209- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
61210+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
61211+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
61212 #endif
61213 }
61214
61215diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
61216index 65cd7a8..3518676 100644
61217--- a/fs/cifs/smb2pdu.c
61218+++ b/fs/cifs/smb2pdu.c
61219@@ -2147,8 +2147,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
61220 default:
61221 cifs_dbg(VFS, "info level %u isn't supported\n",
61222 srch_inf->info_level);
61223- rc = -EINVAL;
61224- goto qdir_exit;
61225+ return -EINVAL;
61226 }
61227
61228 req->FileIndex = cpu_to_le32(index);
61229diff --git a/fs/coda/cache.c b/fs/coda/cache.c
61230index 46ee6f2..89a9e7f 100644
61231--- a/fs/coda/cache.c
61232+++ b/fs/coda/cache.c
61233@@ -24,7 +24,7 @@
61234 #include "coda_linux.h"
61235 #include "coda_cache.h"
61236
61237-static atomic_t permission_epoch = ATOMIC_INIT(0);
61238+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
61239
61240 /* replace or extend an acl cache hit */
61241 void coda_cache_enter(struct inode *inode, int mask)
61242@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
61243 struct coda_inode_info *cii = ITOC(inode);
61244
61245 spin_lock(&cii->c_lock);
61246- cii->c_cached_epoch = atomic_read(&permission_epoch);
61247+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
61248 if (!uid_eq(cii->c_uid, current_fsuid())) {
61249 cii->c_uid = current_fsuid();
61250 cii->c_cached_perm = mask;
61251@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
61252 {
61253 struct coda_inode_info *cii = ITOC(inode);
61254 spin_lock(&cii->c_lock);
61255- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
61256+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
61257 spin_unlock(&cii->c_lock);
61258 }
61259
61260 /* remove all acl caches */
61261 void coda_cache_clear_all(struct super_block *sb)
61262 {
61263- atomic_inc(&permission_epoch);
61264+ atomic_inc_unchecked(&permission_epoch);
61265 }
61266
61267
61268@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
61269 spin_lock(&cii->c_lock);
61270 hit = (mask & cii->c_cached_perm) == mask &&
61271 uid_eq(cii->c_uid, current_fsuid()) &&
61272- cii->c_cached_epoch == atomic_read(&permission_epoch);
61273+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
61274 spin_unlock(&cii->c_lock);
61275
61276 return hit;
61277diff --git a/fs/compat.c b/fs/compat.c
61278index 6fd272d..dd34ba2 100644
61279--- a/fs/compat.c
61280+++ b/fs/compat.c
61281@@ -54,7 +54,7 @@
61282 #include <asm/ioctls.h>
61283 #include "internal.h"
61284
61285-int compat_log = 1;
61286+int compat_log = 0;
61287
61288 int compat_printk(const char *fmt, ...)
61289 {
61290@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
61291
61292 set_fs(KERNEL_DS);
61293 /* The __user pointer cast is valid because of the set_fs() */
61294- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
61295+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
61296 set_fs(oldfs);
61297 /* truncating is ok because it's a user address */
61298 if (!ret)
61299@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
61300 goto out;
61301
61302 ret = -EINVAL;
61303- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
61304+ if (nr_segs > UIO_MAXIOV)
61305 goto out;
61306 if (nr_segs > fast_segs) {
61307 ret = -ENOMEM;
61308@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
61309 struct compat_readdir_callback {
61310 struct dir_context ctx;
61311 struct compat_old_linux_dirent __user *dirent;
61312+ struct file * file;
61313 int result;
61314 };
61315
61316@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
61317 buf->result = -EOVERFLOW;
61318 return -EOVERFLOW;
61319 }
61320+
61321+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61322+ return 0;
61323+
61324 buf->result++;
61325 dirent = buf->dirent;
61326 if (!access_ok(VERIFY_WRITE, dirent,
61327@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
61328 if (!f.file)
61329 return -EBADF;
61330
61331+ buf.file = f.file;
61332 error = iterate_dir(f.file, &buf.ctx);
61333 if (buf.result)
61334 error = buf.result;
61335@@ -913,6 +919,7 @@ struct compat_getdents_callback {
61336 struct dir_context ctx;
61337 struct compat_linux_dirent __user *current_dir;
61338 struct compat_linux_dirent __user *previous;
61339+ struct file * file;
61340 int count;
61341 int error;
61342 };
61343@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
61344 buf->error = -EOVERFLOW;
61345 return -EOVERFLOW;
61346 }
61347+
61348+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61349+ return 0;
61350+
61351 dirent = buf->previous;
61352 if (dirent) {
61353 if (__put_user(offset, &dirent->d_off))
61354@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
61355 if (!f.file)
61356 return -EBADF;
61357
61358+ buf.file = f.file;
61359 error = iterate_dir(f.file, &buf.ctx);
61360 if (error >= 0)
61361 error = buf.error;
61362@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
61363 struct dir_context ctx;
61364 struct linux_dirent64 __user *current_dir;
61365 struct linux_dirent64 __user *previous;
61366+ struct file * file;
61367 int count;
61368 int error;
61369 };
61370@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
61371 buf->error = -EINVAL; /* only used if we fail.. */
61372 if (reclen > buf->count)
61373 return -EINVAL;
61374+
61375+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61376+ return 0;
61377+
61378 dirent = buf->previous;
61379
61380 if (dirent) {
61381@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
61382 if (!f.file)
61383 return -EBADF;
61384
61385+ buf.file = f.file;
61386 error = iterate_dir(f.file, &buf.ctx);
61387 if (error >= 0)
61388 error = buf.error;
61389diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
61390index 4d24d17..4f8c09e 100644
61391--- a/fs/compat_binfmt_elf.c
61392+++ b/fs/compat_binfmt_elf.c
61393@@ -30,11 +30,13 @@
61394 #undef elf_phdr
61395 #undef elf_shdr
61396 #undef elf_note
61397+#undef elf_dyn
61398 #undef elf_addr_t
61399 #define elfhdr elf32_hdr
61400 #define elf_phdr elf32_phdr
61401 #define elf_shdr elf32_shdr
61402 #define elf_note elf32_note
61403+#define elf_dyn Elf32_Dyn
61404 #define elf_addr_t Elf32_Addr
61405
61406 /*
61407diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
61408index afec645..9c65620 100644
61409--- a/fs/compat_ioctl.c
61410+++ b/fs/compat_ioctl.c
61411@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
61412 return -EFAULT;
61413 if (__get_user(udata, &ss32->iomem_base))
61414 return -EFAULT;
61415- ss.iomem_base = compat_ptr(udata);
61416+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
61417 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
61418 __get_user(ss.port_high, &ss32->port_high))
61419 return -EFAULT;
61420@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
61421 for (i = 0; i < nmsgs; i++) {
61422 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
61423 return -EFAULT;
61424- if (get_user(datap, &umsgs[i].buf) ||
61425- put_user(compat_ptr(datap), &tmsgs[i].buf))
61426+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
61427+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
61428 return -EFAULT;
61429 }
61430 return sys_ioctl(fd, cmd, (unsigned long)tdata);
61431@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
61432 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
61433 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
61434 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
61435- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
61436+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
61437 return -EFAULT;
61438
61439 return ioctl_preallocate(file, p);
61440@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
61441 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
61442 {
61443 unsigned int a, b;
61444- a = *(unsigned int *)p;
61445- b = *(unsigned int *)q;
61446+ a = *(const unsigned int *)p;
61447+ b = *(const unsigned int *)q;
61448 if (a > b)
61449 return 1;
61450 if (a < b)
61451diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
61452index cf0db00..c7f70e8 100644
61453--- a/fs/configfs/dir.c
61454+++ b/fs/configfs/dir.c
61455@@ -1540,7 +1540,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61456 }
61457 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
61458 struct configfs_dirent *next;
61459- const char *name;
61460+ const unsigned char * name;
61461+ char d_name[sizeof(next->s_dentry->d_iname)];
61462 int len;
61463 struct inode *inode = NULL;
61464
61465@@ -1549,7 +1550,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61466 continue;
61467
61468 name = configfs_get_name(next);
61469- len = strlen(name);
61470+ if (next->s_dentry && name == next->s_dentry->d_iname) {
61471+ len = next->s_dentry->d_name.len;
61472+ memcpy(d_name, name, len);
61473+ name = d_name;
61474+ } else
61475+ len = strlen(name);
61476
61477 /*
61478 * We'll have a dentry and an inode for
61479diff --git a/fs/coredump.c b/fs/coredump.c
61480index bbbe139..b76fae5 100644
61481--- a/fs/coredump.c
61482+++ b/fs/coredump.c
61483@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
61484 struct pipe_inode_info *pipe = file->private_data;
61485
61486 pipe_lock(pipe);
61487- pipe->readers++;
61488- pipe->writers--;
61489+ atomic_inc(&pipe->readers);
61490+ atomic_dec(&pipe->writers);
61491 wake_up_interruptible_sync(&pipe->wait);
61492 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
61493 pipe_unlock(pipe);
61494@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
61495 * We actually want wait_event_freezable() but then we need
61496 * to clear TIF_SIGPENDING and improve dump_interrupted().
61497 */
61498- wait_event_interruptible(pipe->wait, pipe->readers == 1);
61499+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
61500
61501 pipe_lock(pipe);
61502- pipe->readers--;
61503- pipe->writers++;
61504+ atomic_dec(&pipe->readers);
61505+ atomic_inc(&pipe->writers);
61506 pipe_unlock(pipe);
61507 }
61508
61509@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
61510 struct files_struct *displaced;
61511 bool need_nonrelative = false;
61512 bool core_dumped = false;
61513- static atomic_t core_dump_count = ATOMIC_INIT(0);
61514+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
61515+ long signr = siginfo->si_signo;
61516+ int dumpable;
61517 struct coredump_params cprm = {
61518 .siginfo = siginfo,
61519 .regs = signal_pt_regs(),
61520@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
61521 .mm_flags = mm->flags,
61522 };
61523
61524- audit_core_dumps(siginfo->si_signo);
61525+ audit_core_dumps(signr);
61526+
61527+ dumpable = __get_dumpable(cprm.mm_flags);
61528+
61529+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
61530+ gr_handle_brute_attach(dumpable);
61531
61532 binfmt = mm->binfmt;
61533 if (!binfmt || !binfmt->core_dump)
61534 goto fail;
61535- if (!__get_dumpable(cprm.mm_flags))
61536+ if (!dumpable)
61537 goto fail;
61538
61539 cred = prepare_creds();
61540@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
61541 need_nonrelative = true;
61542 }
61543
61544- retval = coredump_wait(siginfo->si_signo, &core_state);
61545+ retval = coredump_wait(signr, &core_state);
61546 if (retval < 0)
61547 goto fail_creds;
61548
61549@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
61550 }
61551 cprm.limit = RLIM_INFINITY;
61552
61553- dump_count = atomic_inc_return(&core_dump_count);
61554+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
61555 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
61556 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
61557 task_tgid_vnr(current), current->comm);
61558@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
61559 } else {
61560 struct inode *inode;
61561
61562+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
61563+
61564 if (cprm.limit < binfmt->min_coredump)
61565 goto fail_unlock;
61566
61567@@ -681,7 +690,7 @@ close_fail:
61568 filp_close(cprm.file, NULL);
61569 fail_dropcount:
61570 if (ispipe)
61571- atomic_dec(&core_dump_count);
61572+ atomic_dec_unchecked(&core_dump_count);
61573 fail_unlock:
61574 kfree(cn.corename);
61575 coredump_finish(mm, core_dumped);
61576@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
61577 struct file *file = cprm->file;
61578 loff_t pos = file->f_pos;
61579 ssize_t n;
61580+
61581+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
61582 if (cprm->written + nr > cprm->limit)
61583 return 0;
61584 while (nr) {
61585diff --git a/fs/dcache.c b/fs/dcache.c
61586index c71e373..5c1f656 100644
61587--- a/fs/dcache.c
61588+++ b/fs/dcache.c
61589@@ -511,7 +511,7 @@ static void __dentry_kill(struct dentry *dentry)
61590 * dentry_iput drops the locks, at which point nobody (except
61591 * transient RCU lookups) can reach this dentry.
61592 */
61593- BUG_ON(dentry->d_lockref.count > 0);
61594+ BUG_ON(__lockref_read(&dentry->d_lockref) > 0);
61595 this_cpu_dec(nr_dentry);
61596 if (dentry->d_op && dentry->d_op->d_release)
61597 dentry->d_op->d_release(dentry);
61598@@ -564,7 +564,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
61599 struct dentry *parent = dentry->d_parent;
61600 if (IS_ROOT(dentry))
61601 return NULL;
61602- if (unlikely(dentry->d_lockref.count < 0))
61603+ if (unlikely(__lockref_read(&dentry->d_lockref) < 0))
61604 return NULL;
61605 if (likely(spin_trylock(&parent->d_lock)))
61606 return parent;
61607@@ -626,8 +626,8 @@ static inline bool fast_dput(struct dentry *dentry)
61608 */
61609 if (unlikely(ret < 0)) {
61610 spin_lock(&dentry->d_lock);
61611- if (dentry->d_lockref.count > 1) {
61612- dentry->d_lockref.count--;
61613+ if (__lockref_read(&dentry->d_lockref) > 1) {
61614+ __lockref_dec(&dentry->d_lockref);
61615 spin_unlock(&dentry->d_lock);
61616 return 1;
61617 }
61618@@ -682,7 +682,7 @@ static inline bool fast_dput(struct dentry *dentry)
61619 * else could have killed it and marked it dead. Either way, we
61620 * don't need to do anything else.
61621 */
61622- if (dentry->d_lockref.count) {
61623+ if (__lockref_read(&dentry->d_lockref)) {
61624 spin_unlock(&dentry->d_lock);
61625 return 1;
61626 }
61627@@ -692,7 +692,7 @@ static inline bool fast_dput(struct dentry *dentry)
61628 * lock, and we just tested that it was zero, so we can just
61629 * set it to 1.
61630 */
61631- dentry->d_lockref.count = 1;
61632+ __lockref_set(&dentry->d_lockref, 1);
61633 return 0;
61634 }
61635
61636@@ -751,7 +751,7 @@ repeat:
61637 dentry->d_flags |= DCACHE_REFERENCED;
61638 dentry_lru_add(dentry);
61639
61640- dentry->d_lockref.count--;
61641+ __lockref_dec(&dentry->d_lockref);
61642 spin_unlock(&dentry->d_lock);
61643 return;
61644
61645@@ -766,7 +766,7 @@ EXPORT_SYMBOL(dput);
61646 /* This must be called with d_lock held */
61647 static inline void __dget_dlock(struct dentry *dentry)
61648 {
61649- dentry->d_lockref.count++;
61650+ __lockref_inc(&dentry->d_lockref);
61651 }
61652
61653 static inline void __dget(struct dentry *dentry)
61654@@ -807,8 +807,8 @@ repeat:
61655 goto repeat;
61656 }
61657 rcu_read_unlock();
61658- BUG_ON(!ret->d_lockref.count);
61659- ret->d_lockref.count++;
61660+ BUG_ON(!__lockref_read(&ret->d_lockref));
61661+ __lockref_inc(&ret->d_lockref);
61662 spin_unlock(&ret->d_lock);
61663 return ret;
61664 }
61665@@ -886,9 +886,9 @@ restart:
61666 spin_lock(&inode->i_lock);
61667 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
61668 spin_lock(&dentry->d_lock);
61669- if (!dentry->d_lockref.count) {
61670+ if (!__lockref_read(&dentry->d_lockref)) {
61671 struct dentry *parent = lock_parent(dentry);
61672- if (likely(!dentry->d_lockref.count)) {
61673+ if (likely(!__lockref_read(&dentry->d_lockref))) {
61674 __dentry_kill(dentry);
61675 dput(parent);
61676 goto restart;
61677@@ -923,7 +923,7 @@ static void shrink_dentry_list(struct list_head *list)
61678 * We found an inuse dentry which was not removed from
61679 * the LRU because of laziness during lookup. Do not free it.
61680 */
61681- if (dentry->d_lockref.count > 0) {
61682+ if (__lockref_read(&dentry->d_lockref) > 0) {
61683 spin_unlock(&dentry->d_lock);
61684 if (parent)
61685 spin_unlock(&parent->d_lock);
61686@@ -961,8 +961,8 @@ static void shrink_dentry_list(struct list_head *list)
61687 dentry = parent;
61688 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
61689 parent = lock_parent(dentry);
61690- if (dentry->d_lockref.count != 1) {
61691- dentry->d_lockref.count--;
61692+ if (__lockref_read(&dentry->d_lockref) != 1) {
61693+ __lockref_inc(&dentry->d_lockref);
61694 spin_unlock(&dentry->d_lock);
61695 if (parent)
61696 spin_unlock(&parent->d_lock);
61697@@ -1002,7 +1002,7 @@ static enum lru_status dentry_lru_isolate(struct list_head *item,
61698 * counts, just remove them from the LRU. Otherwise give them
61699 * another pass through the LRU.
61700 */
61701- if (dentry->d_lockref.count) {
61702+ if (__lockref_read(&dentry->d_lockref)) {
61703 d_lru_isolate(lru, dentry);
61704 spin_unlock(&dentry->d_lock);
61705 return LRU_REMOVED;
61706@@ -1336,7 +1336,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
61707 } else {
61708 if (dentry->d_flags & DCACHE_LRU_LIST)
61709 d_lru_del(dentry);
61710- if (!dentry->d_lockref.count) {
61711+ if (!__lockref_read(&dentry->d_lockref)) {
61712 d_shrink_add(dentry, &data->dispose);
61713 data->found++;
61714 }
61715@@ -1384,7 +1384,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
61716 return D_WALK_CONTINUE;
61717
61718 /* root with refcount 1 is fine */
61719- if (dentry == _data && dentry->d_lockref.count == 1)
61720+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
61721 return D_WALK_CONTINUE;
61722
61723 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
61724@@ -1393,7 +1393,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
61725 dentry->d_inode ?
61726 dentry->d_inode->i_ino : 0UL,
61727 dentry,
61728- dentry->d_lockref.count,
61729+ __lockref_read(&dentry->d_lockref),
61730 dentry->d_sb->s_type->name,
61731 dentry->d_sb->s_id);
61732 WARN_ON(1);
61733@@ -1534,7 +1534,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
61734 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
61735 if (name->len > DNAME_INLINE_LEN-1) {
61736 size_t size = offsetof(struct external_name, name[1]);
61737- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
61738+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
61739 if (!p) {
61740 kmem_cache_free(dentry_cache, dentry);
61741 return NULL;
61742@@ -1557,7 +1557,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
61743 smp_wmb();
61744 dentry->d_name.name = dname;
61745
61746- dentry->d_lockref.count = 1;
61747+ __lockref_set(&dentry->d_lockref, 1);
61748 dentry->d_flags = 0;
61749 spin_lock_init(&dentry->d_lock);
61750 seqcount_init(&dentry->d_seq);
61751@@ -1566,6 +1566,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
61752 dentry->d_sb = sb;
61753 dentry->d_op = NULL;
61754 dentry->d_fsdata = NULL;
61755+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
61756+ atomic_set(&dentry->chroot_refcnt, 0);
61757+#endif
61758 INIT_HLIST_BL_NODE(&dentry->d_hash);
61759 INIT_LIST_HEAD(&dentry->d_lru);
61760 INIT_LIST_HEAD(&dentry->d_subdirs);
61761@@ -2290,7 +2293,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
61762 goto next;
61763 }
61764
61765- dentry->d_lockref.count++;
61766+ __lockref_inc(&dentry->d_lockref);
61767 found = dentry;
61768 spin_unlock(&dentry->d_lock);
61769 break;
61770@@ -2358,7 +2361,7 @@ again:
61771 spin_lock(&dentry->d_lock);
61772 inode = dentry->d_inode;
61773 isdir = S_ISDIR(inode->i_mode);
61774- if (dentry->d_lockref.count == 1) {
61775+ if (__lockref_read(&dentry->d_lockref) == 1) {
61776 if (!spin_trylock(&inode->i_lock)) {
61777 spin_unlock(&dentry->d_lock);
61778 cpu_relax();
61779@@ -3311,7 +3314,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
61780
61781 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
61782 dentry->d_flags |= DCACHE_GENOCIDE;
61783- dentry->d_lockref.count--;
61784+ __lockref_dec(&dentry->d_lockref);
61785 }
61786 }
61787 return D_WALK_CONTINUE;
61788@@ -3427,7 +3430,8 @@ void __init vfs_caches_init(unsigned long mempages)
61789 mempages -= reserve;
61790
61791 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
61792- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
61793+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
61794+ SLAB_NO_SANITIZE, NULL);
61795
61796 dcache_init();
61797 inode_init();
61798diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
61799index 96400ab..906103d 100644
61800--- a/fs/debugfs/inode.c
61801+++ b/fs/debugfs/inode.c
61802@@ -386,6 +386,10 @@ struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
61803 }
61804 EXPORT_SYMBOL_GPL(debugfs_create_file_size);
61805
61806+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
61807+extern int grsec_enable_sysfs_restrict;
61808+#endif
61809+
61810 /**
61811 * debugfs_create_dir - create a directory in the debugfs filesystem
61812 * @name: a pointer to a string containing the name of the directory to
61813@@ -404,6 +408,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
61814 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
61815 * returned.
61816 */
61817+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
61818+extern int grsec_enable_sysfs_restrict;
61819+#endif
61820+
61821 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
61822 {
61823 struct dentry *dentry = start_creating(name, parent);
61824@@ -416,7 +424,12 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
61825 if (unlikely(!inode))
61826 return failed_creating(dentry);
61827
61828- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
61829+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
61830+ if (grsec_enable_sysfs_restrict)
61831+ inode->i_mode = S_IFDIR | S_IRWXU;
61832+ else
61833+#endif
61834+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
61835 inode->i_op = &simple_dir_inode_operations;
61836 inode->i_fop = &simple_dir_operations;
61837
61838diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
61839index b08b518..d6acffa 100644
61840--- a/fs/ecryptfs/inode.c
61841+++ b/fs/ecryptfs/inode.c
61842@@ -663,7 +663,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
61843 old_fs = get_fs();
61844 set_fs(get_ds());
61845 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
61846- (char __user *)lower_buf,
61847+ (char __force_user *)lower_buf,
61848 PATH_MAX);
61849 set_fs(old_fs);
61850 if (rc < 0)
61851diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
61852index e4141f2..d8263e8 100644
61853--- a/fs/ecryptfs/miscdev.c
61854+++ b/fs/ecryptfs/miscdev.c
61855@@ -304,7 +304,7 @@ check_list:
61856 goto out_unlock_msg_ctx;
61857 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
61858 if (msg_ctx->msg) {
61859- if (copy_to_user(&buf[i], packet_length, packet_length_size))
61860+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
61861 goto out_unlock_msg_ctx;
61862 i += packet_length_size;
61863 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
61864diff --git a/fs/exec.c b/fs/exec.c
61865index 00400cf..b9dca28 100644
61866--- a/fs/exec.c
61867+++ b/fs/exec.c
61868@@ -56,8 +56,20 @@
61869 #include <linux/pipe_fs_i.h>
61870 #include <linux/oom.h>
61871 #include <linux/compat.h>
61872+#include <linux/random.h>
61873+#include <linux/seq_file.h>
61874+#include <linux/coredump.h>
61875+#include <linux/mman.h>
61876+
61877+#ifdef CONFIG_PAX_REFCOUNT
61878+#include <linux/kallsyms.h>
61879+#include <linux/kdebug.h>
61880+#endif
61881+
61882+#include <trace/events/fs.h>
61883
61884 #include <asm/uaccess.h>
61885+#include <asm/sections.h>
61886 #include <asm/mmu_context.h>
61887 #include <asm/tlb.h>
61888
61889@@ -66,19 +78,34 @@
61890
61891 #include <trace/events/sched.h>
61892
61893+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
61894+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
61895+{
61896+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
61897+}
61898+#endif
61899+
61900+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
61901+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
61902+EXPORT_SYMBOL(pax_set_initial_flags_func);
61903+#endif
61904+
61905 int suid_dumpable = 0;
61906
61907 static LIST_HEAD(formats);
61908 static DEFINE_RWLOCK(binfmt_lock);
61909
61910+extern int gr_process_kernel_exec_ban(void);
61911+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
61912+
61913 void __register_binfmt(struct linux_binfmt * fmt, int insert)
61914 {
61915 BUG_ON(!fmt);
61916 if (WARN_ON(!fmt->load_binary))
61917 return;
61918 write_lock(&binfmt_lock);
61919- insert ? list_add(&fmt->lh, &formats) :
61920- list_add_tail(&fmt->lh, &formats);
61921+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
61922+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
61923 write_unlock(&binfmt_lock);
61924 }
61925
61926@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
61927 void unregister_binfmt(struct linux_binfmt * fmt)
61928 {
61929 write_lock(&binfmt_lock);
61930- list_del(&fmt->lh);
61931+ pax_list_del((struct list_head *)&fmt->lh);
61932 write_unlock(&binfmt_lock);
61933 }
61934
61935@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61936 int write)
61937 {
61938 struct page *page;
61939- int ret;
61940
61941-#ifdef CONFIG_STACK_GROWSUP
61942- if (write) {
61943- ret = expand_downwards(bprm->vma, pos);
61944- if (ret < 0)
61945- return NULL;
61946- }
61947-#endif
61948- ret = get_user_pages(current, bprm->mm, pos,
61949- 1, write, 1, &page, NULL);
61950- if (ret <= 0)
61951+ if (0 > expand_downwards(bprm->vma, pos))
61952+ return NULL;
61953+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
61954 return NULL;
61955
61956 if (write) {
61957@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61958 if (size <= ARG_MAX)
61959 return page;
61960
61961+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61962+ // only allow 512KB for argv+env on suid/sgid binaries
61963+ // to prevent easy ASLR exhaustion
61964+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
61965+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
61966+ (size > (512 * 1024))) {
61967+ put_page(page);
61968+ return NULL;
61969+ }
61970+#endif
61971+
61972 /*
61973 * Limit to 1/4-th the stack size for the argv+env strings.
61974 * This ensures that:
61975@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61976 vma->vm_end = STACK_TOP_MAX;
61977 vma->vm_start = vma->vm_end - PAGE_SIZE;
61978 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
61979+
61980+#ifdef CONFIG_PAX_SEGMEXEC
61981+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61982+#endif
61983+
61984 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
61985 INIT_LIST_HEAD(&vma->anon_vma_chain);
61986
61987@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61988 arch_bprm_mm_init(mm, vma);
61989 up_write(&mm->mmap_sem);
61990 bprm->p = vma->vm_end - sizeof(void *);
61991+
61992+#ifdef CONFIG_PAX_RANDUSTACK
61993+ if (randomize_va_space)
61994+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
61995+#endif
61996+
61997 return 0;
61998 err:
61999 up_write(&mm->mmap_sem);
62000@@ -396,7 +437,7 @@ struct user_arg_ptr {
62001 } ptr;
62002 };
62003
62004-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62005+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62006 {
62007 const char __user *native;
62008
62009@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62010 compat_uptr_t compat;
62011
62012 if (get_user(compat, argv.ptr.compat + nr))
62013- return ERR_PTR(-EFAULT);
62014+ return (const char __force_user *)ERR_PTR(-EFAULT);
62015
62016 return compat_ptr(compat);
62017 }
62018 #endif
62019
62020 if (get_user(native, argv.ptr.native + nr))
62021- return ERR_PTR(-EFAULT);
62022+ return (const char __force_user *)ERR_PTR(-EFAULT);
62023
62024 return native;
62025 }
62026@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
62027 if (!p)
62028 break;
62029
62030- if (IS_ERR(p))
62031+ if (IS_ERR((const char __force_kernel *)p))
62032 return -EFAULT;
62033
62034 if (i >= max)
62035@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
62036
62037 ret = -EFAULT;
62038 str = get_user_arg_ptr(argv, argc);
62039- if (IS_ERR(str))
62040+ if (IS_ERR((const char __force_kernel *)str))
62041 goto out;
62042
62043 len = strnlen_user(str, MAX_ARG_STRLEN);
62044@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
62045 int r;
62046 mm_segment_t oldfs = get_fs();
62047 struct user_arg_ptr argv = {
62048- .ptr.native = (const char __user *const __user *)__argv,
62049+ .ptr.native = (const char __user * const __force_user *)__argv,
62050 };
62051
62052 set_fs(KERNEL_DS);
62053@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
62054 unsigned long new_end = old_end - shift;
62055 struct mmu_gather tlb;
62056
62057- BUG_ON(new_start > new_end);
62058+ if (new_start >= new_end || new_start < mmap_min_addr)
62059+ return -ENOMEM;
62060
62061 /*
62062 * ensure there are no vmas between where we want to go
62063@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
62064 if (vma != find_vma(mm, new_start))
62065 return -EFAULT;
62066
62067+#ifdef CONFIG_PAX_SEGMEXEC
62068+ BUG_ON(pax_find_mirror_vma(vma));
62069+#endif
62070+
62071 /*
62072 * cover the whole range: [new_start, old_end)
62073 */
62074@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
62075 stack_top = arch_align_stack(stack_top);
62076 stack_top = PAGE_ALIGN(stack_top);
62077
62078- if (unlikely(stack_top < mmap_min_addr) ||
62079- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
62080- return -ENOMEM;
62081-
62082 stack_shift = vma->vm_end - stack_top;
62083
62084 bprm->p -= stack_shift;
62085@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
62086 bprm->exec -= stack_shift;
62087
62088 down_write(&mm->mmap_sem);
62089+
62090+ /* Move stack pages down in memory. */
62091+ if (stack_shift) {
62092+ ret = shift_arg_pages(vma, stack_shift);
62093+ if (ret)
62094+ goto out_unlock;
62095+ }
62096+
62097 vm_flags = VM_STACK_FLAGS;
62098
62099+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62100+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
62101+ vm_flags &= ~VM_EXEC;
62102+
62103+#ifdef CONFIG_PAX_MPROTECT
62104+ if (mm->pax_flags & MF_PAX_MPROTECT)
62105+ vm_flags &= ~VM_MAYEXEC;
62106+#endif
62107+
62108+ }
62109+#endif
62110+
62111 /*
62112 * Adjust stack execute permissions; explicitly enable for
62113 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
62114@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
62115 goto out_unlock;
62116 BUG_ON(prev != vma);
62117
62118- /* Move stack pages down in memory. */
62119- if (stack_shift) {
62120- ret = shift_arg_pages(vma, stack_shift);
62121- if (ret)
62122- goto out_unlock;
62123- }
62124-
62125 /* mprotect_fixup is overkill to remove the temporary stack flags */
62126 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
62127
62128@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
62129 #endif
62130 current->mm->start_stack = bprm->p;
62131 ret = expand_stack(vma, stack_base);
62132+
62133+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
62134+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
62135+ unsigned long size;
62136+ vm_flags_t vm_flags;
62137+
62138+ size = STACK_TOP - vma->vm_end;
62139+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
62140+
62141+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
62142+
62143+#ifdef CONFIG_X86
62144+ if (!ret) {
62145+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
62146+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
62147+ }
62148+#endif
62149+
62150+ }
62151+#endif
62152+
62153 if (ret)
62154 ret = -EFAULT;
62155
62156@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
62157 if (err)
62158 goto exit;
62159
62160- if (name->name[0] != '\0')
62161+ if (name->name[0] != '\0') {
62162 fsnotify_open(file);
62163+ trace_open_exec(name->name);
62164+ }
62165
62166 out:
62167 return file;
62168@@ -815,7 +893,7 @@ int kernel_read(struct file *file, loff_t offset,
62169 old_fs = get_fs();
62170 set_fs(get_ds());
62171 /* The cast to a user pointer is valid due to the set_fs() */
62172- result = vfs_read(file, (void __user *)addr, count, &pos);
62173+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
62174 set_fs(old_fs);
62175 return result;
62176 }
62177@@ -860,6 +938,7 @@ static int exec_mmap(struct mm_struct *mm)
62178 tsk->mm = mm;
62179 tsk->active_mm = mm;
62180 activate_mm(active_mm, mm);
62181+ populate_stack();
62182 tsk->mm->vmacache_seqnum = 0;
62183 vmacache_flush(tsk);
62184 task_unlock(tsk);
62185@@ -926,10 +1005,14 @@ static int de_thread(struct task_struct *tsk)
62186 if (!thread_group_leader(tsk)) {
62187 struct task_struct *leader = tsk->group_leader;
62188
62189- sig->notify_count = -1; /* for exit_notify() */
62190 for (;;) {
62191 threadgroup_change_begin(tsk);
62192 write_lock_irq(&tasklist_lock);
62193+ /*
62194+ * Do this under tasklist_lock to ensure that
62195+ * exit_notify() can't miss ->group_exit_task
62196+ */
62197+ sig->notify_count = -1;
62198 if (likely(leader->exit_state))
62199 break;
62200 __set_current_state(TASK_KILLABLE);
62201@@ -1258,7 +1341,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
62202 }
62203 rcu_read_unlock();
62204
62205- if (p->fs->users > n_fs)
62206+ if (atomic_read(&p->fs->users) > n_fs)
62207 bprm->unsafe |= LSM_UNSAFE_SHARE;
62208 else
62209 p->fs->in_exec = 1;
62210@@ -1459,6 +1542,31 @@ static int exec_binprm(struct linux_binprm *bprm)
62211 return ret;
62212 }
62213
62214+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62215+static DEFINE_PER_CPU(u64, exec_counter);
62216+static int __init init_exec_counters(void)
62217+{
62218+ unsigned int cpu;
62219+
62220+ for_each_possible_cpu(cpu) {
62221+ per_cpu(exec_counter, cpu) = (u64)cpu;
62222+ }
62223+
62224+ return 0;
62225+}
62226+early_initcall(init_exec_counters);
62227+static inline void increment_exec_counter(void)
62228+{
62229+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
62230+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
62231+}
62232+#else
62233+static inline void increment_exec_counter(void) {}
62234+#endif
62235+
62236+extern void gr_handle_exec_args(struct linux_binprm *bprm,
62237+ struct user_arg_ptr argv);
62238+
62239 /*
62240 * sys_execve() executes a new program.
62241 */
62242@@ -1467,6 +1575,11 @@ static int do_execveat_common(int fd, struct filename *filename,
62243 struct user_arg_ptr envp,
62244 int flags)
62245 {
62246+#ifdef CONFIG_GRKERNSEC
62247+ struct file *old_exec_file;
62248+ struct acl_subject_label *old_acl;
62249+ struct rlimit old_rlim[RLIM_NLIMITS];
62250+#endif
62251 char *pathbuf = NULL;
62252 struct linux_binprm *bprm;
62253 struct file *file;
62254@@ -1476,6 +1589,8 @@ static int do_execveat_common(int fd, struct filename *filename,
62255 if (IS_ERR(filename))
62256 return PTR_ERR(filename);
62257
62258+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
62259+
62260 /*
62261 * We move the actual failure in case of RLIMIT_NPROC excess from
62262 * set*uid() to execve() because too many poorly written programs
62263@@ -1513,6 +1628,11 @@ static int do_execveat_common(int fd, struct filename *filename,
62264 if (IS_ERR(file))
62265 goto out_unmark;
62266
62267+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
62268+ retval = -EPERM;
62269+ goto out_unmark;
62270+ }
62271+
62272 sched_exec();
62273
62274 bprm->file = file;
62275@@ -1539,6 +1659,11 @@ static int do_execveat_common(int fd, struct filename *filename,
62276 }
62277 bprm->interp = bprm->filename;
62278
62279+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
62280+ retval = -EACCES;
62281+ goto out_unmark;
62282+ }
62283+
62284 retval = bprm_mm_init(bprm);
62285 if (retval)
62286 goto out_unmark;
62287@@ -1555,24 +1680,70 @@ static int do_execveat_common(int fd, struct filename *filename,
62288 if (retval < 0)
62289 goto out;
62290
62291+#ifdef CONFIG_GRKERNSEC
62292+ old_acl = current->acl;
62293+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
62294+ old_exec_file = current->exec_file;
62295+ get_file(file);
62296+ current->exec_file = file;
62297+#endif
62298+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62299+ /* limit suid stack to 8MB
62300+ * we saved the old limits above and will restore them if this exec fails
62301+ */
62302+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
62303+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
62304+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
62305+#endif
62306+
62307+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
62308+ retval = -EPERM;
62309+ goto out_fail;
62310+ }
62311+
62312+ if (!gr_tpe_allow(file)) {
62313+ retval = -EACCES;
62314+ goto out_fail;
62315+ }
62316+
62317+ if (gr_check_crash_exec(file)) {
62318+ retval = -EACCES;
62319+ goto out_fail;
62320+ }
62321+
62322+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
62323+ bprm->unsafe);
62324+ if (retval < 0)
62325+ goto out_fail;
62326+
62327 retval = copy_strings_kernel(1, &bprm->filename, bprm);
62328 if (retval < 0)
62329- goto out;
62330+ goto out_fail;
62331
62332 bprm->exec = bprm->p;
62333 retval = copy_strings(bprm->envc, envp, bprm);
62334 if (retval < 0)
62335- goto out;
62336+ goto out_fail;
62337
62338 retval = copy_strings(bprm->argc, argv, bprm);
62339 if (retval < 0)
62340- goto out;
62341+ goto out_fail;
62342+
62343+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
62344+
62345+ gr_handle_exec_args(bprm, argv);
62346
62347 retval = exec_binprm(bprm);
62348 if (retval < 0)
62349- goto out;
62350+ goto out_fail;
62351+#ifdef CONFIG_GRKERNSEC
62352+ if (old_exec_file)
62353+ fput(old_exec_file);
62354+#endif
62355
62356 /* execve succeeded */
62357+
62358+ increment_exec_counter();
62359 current->fs->in_exec = 0;
62360 current->in_execve = 0;
62361 acct_update_integrals(current);
62362@@ -1584,6 +1755,14 @@ static int do_execveat_common(int fd, struct filename *filename,
62363 put_files_struct(displaced);
62364 return retval;
62365
62366+out_fail:
62367+#ifdef CONFIG_GRKERNSEC
62368+ current->acl = old_acl;
62369+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
62370+ fput(current->exec_file);
62371+ current->exec_file = old_exec_file;
62372+#endif
62373+
62374 out:
62375 if (bprm->mm) {
62376 acct_arg_size(bprm, 0);
62377@@ -1730,3 +1909,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
62378 argv, envp, flags);
62379 }
62380 #endif
62381+
62382+int pax_check_flags(unsigned long *flags)
62383+{
62384+ int retval = 0;
62385+
62386+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
62387+ if (*flags & MF_PAX_SEGMEXEC)
62388+ {
62389+ *flags &= ~MF_PAX_SEGMEXEC;
62390+ retval = -EINVAL;
62391+ }
62392+#endif
62393+
62394+ if ((*flags & MF_PAX_PAGEEXEC)
62395+
62396+#ifdef CONFIG_PAX_PAGEEXEC
62397+ && (*flags & MF_PAX_SEGMEXEC)
62398+#endif
62399+
62400+ )
62401+ {
62402+ *flags &= ~MF_PAX_PAGEEXEC;
62403+ retval = -EINVAL;
62404+ }
62405+
62406+ if ((*flags & MF_PAX_MPROTECT)
62407+
62408+#ifdef CONFIG_PAX_MPROTECT
62409+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62410+#endif
62411+
62412+ )
62413+ {
62414+ *flags &= ~MF_PAX_MPROTECT;
62415+ retval = -EINVAL;
62416+ }
62417+
62418+ if ((*flags & MF_PAX_EMUTRAMP)
62419+
62420+#ifdef CONFIG_PAX_EMUTRAMP
62421+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62422+#endif
62423+
62424+ )
62425+ {
62426+ *flags &= ~MF_PAX_EMUTRAMP;
62427+ retval = -EINVAL;
62428+ }
62429+
62430+ return retval;
62431+}
62432+
62433+EXPORT_SYMBOL(pax_check_flags);
62434+
62435+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62436+char *pax_get_path(const struct path *path, char *buf, int buflen)
62437+{
62438+ char *pathname = d_path(path, buf, buflen);
62439+
62440+ if (IS_ERR(pathname))
62441+ goto toolong;
62442+
62443+ pathname = mangle_path(buf, pathname, "\t\n\\");
62444+ if (!pathname)
62445+ goto toolong;
62446+
62447+ *pathname = 0;
62448+ return buf;
62449+
62450+toolong:
62451+ return "<path too long>";
62452+}
62453+EXPORT_SYMBOL(pax_get_path);
62454+
62455+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
62456+{
62457+ struct task_struct *tsk = current;
62458+ struct mm_struct *mm = current->mm;
62459+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
62460+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
62461+ char *path_exec = NULL;
62462+ char *path_fault = NULL;
62463+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
62464+ siginfo_t info = { };
62465+
62466+ if (buffer_exec && buffer_fault) {
62467+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
62468+
62469+ down_read(&mm->mmap_sem);
62470+ vma = mm->mmap;
62471+ while (vma && (!vma_exec || !vma_fault)) {
62472+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
62473+ vma_exec = vma;
62474+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
62475+ vma_fault = vma;
62476+ vma = vma->vm_next;
62477+ }
62478+ if (vma_exec)
62479+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
62480+ if (vma_fault) {
62481+ start = vma_fault->vm_start;
62482+ end = vma_fault->vm_end;
62483+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
62484+ if (vma_fault->vm_file)
62485+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
62486+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
62487+ path_fault = "<heap>";
62488+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
62489+ path_fault = "<stack>";
62490+ else
62491+ path_fault = "<anonymous mapping>";
62492+ }
62493+ up_read(&mm->mmap_sem);
62494+ }
62495+ if (tsk->signal->curr_ip)
62496+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
62497+ else
62498+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
62499+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
62500+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
62501+ free_page((unsigned long)buffer_exec);
62502+ free_page((unsigned long)buffer_fault);
62503+ pax_report_insns(regs, pc, sp);
62504+ info.si_signo = SIGKILL;
62505+ info.si_errno = 0;
62506+ info.si_code = SI_KERNEL;
62507+ info.si_pid = 0;
62508+ info.si_uid = 0;
62509+ do_coredump(&info);
62510+}
62511+#endif
62512+
62513+#ifdef CONFIG_PAX_REFCOUNT
62514+void pax_report_refcount_overflow(struct pt_regs *regs)
62515+{
62516+ if (current->signal->curr_ip)
62517+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
62518+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
62519+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62520+ else
62521+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
62522+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62523+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
62524+ preempt_disable();
62525+ show_regs(regs);
62526+ preempt_enable();
62527+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
62528+}
62529+#endif
62530+
62531+#ifdef CONFIG_PAX_USERCOPY
62532+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
62533+static noinline int check_stack_object(const void *obj, unsigned long len)
62534+{
62535+ const void * const stack = task_stack_page(current);
62536+ const void * const stackend = stack + THREAD_SIZE;
62537+
62538+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
62539+ const void *frame = NULL;
62540+ const void *oldframe;
62541+#endif
62542+
62543+ if (obj + len < obj)
62544+ return -1;
62545+
62546+ if (obj + len <= stack || stackend <= obj)
62547+ return 0;
62548+
62549+ if (obj < stack || stackend < obj + len)
62550+ return -1;
62551+
62552+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
62553+ oldframe = __builtin_frame_address(1);
62554+ if (oldframe)
62555+ frame = __builtin_frame_address(2);
62556+ /*
62557+ low ----------------------------------------------> high
62558+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
62559+ ^----------------^
62560+ allow copies only within here
62561+ */
62562+ while (stack <= frame && frame < stackend) {
62563+ /* if obj + len extends past the last frame, this
62564+ check won't pass and the next frame will be 0,
62565+ causing us to bail out and correctly report
62566+ the copy as invalid
62567+ */
62568+ if (obj + len <= frame)
62569+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
62570+ oldframe = frame;
62571+ frame = *(const void * const *)frame;
62572+ }
62573+ return -1;
62574+#else
62575+ return 1;
62576+#endif
62577+}
62578+
62579+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
62580+{
62581+ if (current->signal->curr_ip)
62582+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
62583+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
62584+ else
62585+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
62586+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
62587+ dump_stack();
62588+ gr_handle_kernel_exploit();
62589+ do_group_exit(SIGKILL);
62590+}
62591+#endif
62592+
62593+#ifdef CONFIG_PAX_USERCOPY
62594+
62595+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
62596+{
62597+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62598+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
62599+#ifdef CONFIG_MODULES
62600+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
62601+#else
62602+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
62603+#endif
62604+
62605+#else
62606+ unsigned long textlow = (unsigned long)_stext;
62607+ unsigned long texthigh = (unsigned long)_etext;
62608+
62609+#ifdef CONFIG_X86_64
62610+ /* check against linear mapping as well */
62611+ if (high > (unsigned long)__va(__pa(textlow)) &&
62612+ low < (unsigned long)__va(__pa(texthigh)))
62613+ return true;
62614+#endif
62615+
62616+#endif
62617+
62618+ if (high <= textlow || low >= texthigh)
62619+ return false;
62620+ else
62621+ return true;
62622+}
62623+#endif
62624+
62625+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
62626+{
62627+#ifdef CONFIG_PAX_USERCOPY
62628+ const char *type;
62629+#endif
62630+
62631+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
62632+ unsigned long stackstart = (unsigned long)task_stack_page(current);
62633+ unsigned long currentsp = (unsigned long)&stackstart;
62634+ if (unlikely((currentsp < stackstart + 512 ||
62635+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
62636+ BUG();
62637+#endif
62638+
62639+#ifndef CONFIG_PAX_USERCOPY_DEBUG
62640+ if (const_size)
62641+ return;
62642+#endif
62643+
62644+#ifdef CONFIG_PAX_USERCOPY
62645+ if (!n)
62646+ return;
62647+
62648+ type = check_heap_object(ptr, n);
62649+ if (!type) {
62650+ int ret = check_stack_object(ptr, n);
62651+ if (ret == 1 || ret == 2)
62652+ return;
62653+ if (ret == 0) {
62654+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
62655+ type = "<kernel text>";
62656+ else
62657+ return;
62658+ } else
62659+ type = "<process stack>";
62660+ }
62661+
62662+ pax_report_usercopy(ptr, n, to_user, type);
62663+#endif
62664+
62665+}
62666+EXPORT_SYMBOL(__check_object_size);
62667+
62668+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
62669+void pax_track_stack(void)
62670+{
62671+ unsigned long sp = (unsigned long)&sp;
62672+ if (sp < current_thread_info()->lowest_stack &&
62673+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
62674+ current_thread_info()->lowest_stack = sp;
62675+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
62676+ BUG();
62677+}
62678+EXPORT_SYMBOL(pax_track_stack);
62679+#endif
62680+
62681+#ifdef CONFIG_PAX_SIZE_OVERFLOW
62682+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
62683+{
62684+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
62685+ dump_stack();
62686+ do_group_exit(SIGKILL);
62687+}
62688+EXPORT_SYMBOL(report_size_overflow);
62689+#endif
62690diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
62691index 9f9992b..8b59411 100644
62692--- a/fs/ext2/balloc.c
62693+++ b/fs/ext2/balloc.c
62694@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
62695
62696 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
62697 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
62698- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
62699+ if (free_blocks < root_blocks + 1 &&
62700 !uid_eq(sbi->s_resuid, current_fsuid()) &&
62701 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
62702- !in_group_p (sbi->s_resgid))) {
62703+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
62704 return 0;
62705 }
62706 return 1;
62707diff --git a/fs/ext2/super.c b/fs/ext2/super.c
62708index d0e746e..82e06f0 100644
62709--- a/fs/ext2/super.c
62710+++ b/fs/ext2/super.c
62711@@ -267,10 +267,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
62712 #ifdef CONFIG_EXT2_FS_XATTR
62713 if (test_opt(sb, XATTR_USER))
62714 seq_puts(seq, ",user_xattr");
62715- if (!test_opt(sb, XATTR_USER) &&
62716- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
62717+ if (!test_opt(sb, XATTR_USER))
62718 seq_puts(seq, ",nouser_xattr");
62719- }
62720 #endif
62721
62722 #ifdef CONFIG_EXT2_FS_POSIX_ACL
62723@@ -856,8 +854,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
62724 if (def_mount_opts & EXT2_DEFM_UID16)
62725 set_opt(sbi->s_mount_opt, NO_UID32);
62726 #ifdef CONFIG_EXT2_FS_XATTR
62727- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
62728- set_opt(sbi->s_mount_opt, XATTR_USER);
62729+ /* always enable user xattrs */
62730+ set_opt(sbi->s_mount_opt, XATTR_USER);
62731 #endif
62732 #ifdef CONFIG_EXT2_FS_POSIX_ACL
62733 if (def_mount_opts & EXT2_DEFM_ACL)
62734diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
62735index 9142614..97484fa 100644
62736--- a/fs/ext2/xattr.c
62737+++ b/fs/ext2/xattr.c
62738@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
62739 struct buffer_head *bh = NULL;
62740 struct ext2_xattr_entry *entry;
62741 char *end;
62742- size_t rest = buffer_size;
62743+ size_t rest = buffer_size, total_size = 0;
62744 int error;
62745
62746 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
62747@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
62748 buffer += size;
62749 }
62750 rest -= size;
62751+ total_size += size;
62752 }
62753 }
62754- error = buffer_size - rest; /* total size */
62755+ error = total_size;
62756
62757 cleanup:
62758 brelse(bh);
62759diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
62760index 158b5d4..2432610 100644
62761--- a/fs/ext3/balloc.c
62762+++ b/fs/ext3/balloc.c
62763@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
62764
62765 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
62766 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
62767- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
62768+ if (free_blocks < root_blocks + 1 &&
62769 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
62770 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
62771- !in_group_p (sbi->s_resgid))) {
62772+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
62773 return 0;
62774 }
62775 return 1;
62776diff --git a/fs/ext3/super.c b/fs/ext3/super.c
62777index d4dbf3c..906a6fb 100644
62778--- a/fs/ext3/super.c
62779+++ b/fs/ext3/super.c
62780@@ -655,10 +655,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
62781 #ifdef CONFIG_EXT3_FS_XATTR
62782 if (test_opt(sb, XATTR_USER))
62783 seq_puts(seq, ",user_xattr");
62784- if (!test_opt(sb, XATTR_USER) &&
62785- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
62786+ if (!test_opt(sb, XATTR_USER))
62787 seq_puts(seq, ",nouser_xattr");
62788- }
62789 #endif
62790 #ifdef CONFIG_EXT3_FS_POSIX_ACL
62791 if (test_opt(sb, POSIX_ACL))
62792@@ -1760,8 +1758,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
62793 if (def_mount_opts & EXT3_DEFM_UID16)
62794 set_opt(sbi->s_mount_opt, NO_UID32);
62795 #ifdef CONFIG_EXT3_FS_XATTR
62796- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
62797- set_opt(sbi->s_mount_opt, XATTR_USER);
62798+ /* always enable user xattrs */
62799+ set_opt(sbi->s_mount_opt, XATTR_USER);
62800 #endif
62801 #ifdef CONFIG_EXT3_FS_POSIX_ACL
62802 if (def_mount_opts & EXT3_DEFM_ACL)
62803diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
62804index c6874be..f8a6ae8 100644
62805--- a/fs/ext3/xattr.c
62806+++ b/fs/ext3/xattr.c
62807@@ -330,7 +330,7 @@ static int
62808 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
62809 char *buffer, size_t buffer_size)
62810 {
62811- size_t rest = buffer_size;
62812+ size_t rest = buffer_size, total_size = 0;
62813
62814 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
62815 const struct xattr_handler *handler =
62816@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
62817 buffer += size;
62818 }
62819 rest -= size;
62820+ total_size += size;
62821 }
62822 }
62823- return buffer_size - rest;
62824+ return total_size;
62825 }
62826
62827 static int
62828diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
62829index 83a6f49..d4e4d03 100644
62830--- a/fs/ext4/balloc.c
62831+++ b/fs/ext4/balloc.c
62832@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
62833 /* Hm, nope. Are (enough) root reserved clusters available? */
62834 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
62835 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
62836- capable(CAP_SYS_RESOURCE) ||
62837- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
62838+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
62839+ capable_nolog(CAP_SYS_RESOURCE)) {
62840
62841 if (free_clusters >= (nclusters + dirty_clusters +
62842 resv_clusters))
62843diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
62844index f63c3d5..3c1a033 100644
62845--- a/fs/ext4/ext4.h
62846+++ b/fs/ext4/ext4.h
62847@@ -1287,19 +1287,19 @@ struct ext4_sb_info {
62848 unsigned long s_mb_last_start;
62849
62850 /* stats for buddy allocator */
62851- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
62852- atomic_t s_bal_success; /* we found long enough chunks */
62853- atomic_t s_bal_allocated; /* in blocks */
62854- atomic_t s_bal_ex_scanned; /* total extents scanned */
62855- atomic_t s_bal_goals; /* goal hits */
62856- atomic_t s_bal_breaks; /* too long searches */
62857- atomic_t s_bal_2orders; /* 2^order hits */
62858+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
62859+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
62860+ atomic_unchecked_t s_bal_allocated; /* in blocks */
62861+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
62862+ atomic_unchecked_t s_bal_goals; /* goal hits */
62863+ atomic_unchecked_t s_bal_breaks; /* too long searches */
62864+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
62865 spinlock_t s_bal_lock;
62866 unsigned long s_mb_buddies_generated;
62867 unsigned long long s_mb_generation_time;
62868- atomic_t s_mb_lost_chunks;
62869- atomic_t s_mb_preallocated;
62870- atomic_t s_mb_discarded;
62871+ atomic_unchecked_t s_mb_lost_chunks;
62872+ atomic_unchecked_t s_mb_preallocated;
62873+ atomic_unchecked_t s_mb_discarded;
62874 atomic_t s_lock_busy;
62875
62876 /* locality groups */
62877diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
62878index 8d1e602..abf497b 100644
62879--- a/fs/ext4/mballoc.c
62880+++ b/fs/ext4/mballoc.c
62881@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
62882 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
62883
62884 if (EXT4_SB(sb)->s_mb_stats)
62885- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
62886+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
62887
62888 break;
62889 }
62890@@ -2211,7 +2211,7 @@ repeat:
62891 ac->ac_status = AC_STATUS_CONTINUE;
62892 ac->ac_flags |= EXT4_MB_HINT_FIRST;
62893 cr = 3;
62894- atomic_inc(&sbi->s_mb_lost_chunks);
62895+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
62896 goto repeat;
62897 }
62898 }
62899@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
62900 if (sbi->s_mb_stats) {
62901 ext4_msg(sb, KERN_INFO,
62902 "mballoc: %u blocks %u reqs (%u success)",
62903- atomic_read(&sbi->s_bal_allocated),
62904- atomic_read(&sbi->s_bal_reqs),
62905- atomic_read(&sbi->s_bal_success));
62906+ atomic_read_unchecked(&sbi->s_bal_allocated),
62907+ atomic_read_unchecked(&sbi->s_bal_reqs),
62908+ atomic_read_unchecked(&sbi->s_bal_success));
62909 ext4_msg(sb, KERN_INFO,
62910 "mballoc: %u extents scanned, %u goal hits, "
62911 "%u 2^N hits, %u breaks, %u lost",
62912- atomic_read(&sbi->s_bal_ex_scanned),
62913- atomic_read(&sbi->s_bal_goals),
62914- atomic_read(&sbi->s_bal_2orders),
62915- atomic_read(&sbi->s_bal_breaks),
62916- atomic_read(&sbi->s_mb_lost_chunks));
62917+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
62918+ atomic_read_unchecked(&sbi->s_bal_goals),
62919+ atomic_read_unchecked(&sbi->s_bal_2orders),
62920+ atomic_read_unchecked(&sbi->s_bal_breaks),
62921+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
62922 ext4_msg(sb, KERN_INFO,
62923 "mballoc: %lu generated and it took %Lu",
62924 sbi->s_mb_buddies_generated,
62925 sbi->s_mb_generation_time);
62926 ext4_msg(sb, KERN_INFO,
62927 "mballoc: %u preallocated, %u discarded",
62928- atomic_read(&sbi->s_mb_preallocated),
62929- atomic_read(&sbi->s_mb_discarded));
62930+ atomic_read_unchecked(&sbi->s_mb_preallocated),
62931+ atomic_read_unchecked(&sbi->s_mb_discarded));
62932 }
62933
62934 free_percpu(sbi->s_locality_groups);
62935@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
62936 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
62937
62938 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
62939- atomic_inc(&sbi->s_bal_reqs);
62940- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
62941+ atomic_inc_unchecked(&sbi->s_bal_reqs);
62942+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
62943 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
62944- atomic_inc(&sbi->s_bal_success);
62945- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
62946+ atomic_inc_unchecked(&sbi->s_bal_success);
62947+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
62948 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
62949 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
62950- atomic_inc(&sbi->s_bal_goals);
62951+ atomic_inc_unchecked(&sbi->s_bal_goals);
62952 if (ac->ac_found > sbi->s_mb_max_to_scan)
62953- atomic_inc(&sbi->s_bal_breaks);
62954+ atomic_inc_unchecked(&sbi->s_bal_breaks);
62955 }
62956
62957 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
62958@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
62959 trace_ext4_mb_new_inode_pa(ac, pa);
62960
62961 ext4_mb_use_inode_pa(ac, pa);
62962- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
62963+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
62964
62965 ei = EXT4_I(ac->ac_inode);
62966 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62967@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
62968 trace_ext4_mb_new_group_pa(ac, pa);
62969
62970 ext4_mb_use_group_pa(ac, pa);
62971- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62972+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62973
62974 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62975 lg = ac->ac_lg;
62976@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
62977 * from the bitmap and continue.
62978 */
62979 }
62980- atomic_add(free, &sbi->s_mb_discarded);
62981+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
62982
62983 return err;
62984 }
62985@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
62986 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
62987 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
62988 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
62989- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62990+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62991 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
62992
62993 return 0;
62994diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
62995index 8313ca3..8a37d08 100644
62996--- a/fs/ext4/mmp.c
62997+++ b/fs/ext4/mmp.c
62998@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
62999 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
63000 const char *function, unsigned int line, const char *msg)
63001 {
63002- __ext4_warning(sb, function, line, msg);
63003+ __ext4_warning(sb, function, line, "%s", msg);
63004 __ext4_warning(sb, function, line,
63005 "MMP failure info: last update time: %llu, last update "
63006 "node: %s, last update device: %s\n",
63007diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
63008index 8a8ec62..1b02de5 100644
63009--- a/fs/ext4/resize.c
63010+++ b/fs/ext4/resize.c
63011@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
63012
63013 ext4_debug("mark blocks [%llu/%u] used\n", block, count);
63014 for (count2 = count; count > 0; count -= count2, block += count2) {
63015- ext4_fsblk_t start;
63016+ ext4_fsblk_t start, diff;
63017 struct buffer_head *bh;
63018 ext4_group_t group;
63019 int err;
63020@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
63021 start = ext4_group_first_block_no(sb, group);
63022 group -= flex_gd->groups[0].group;
63023
63024- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
63025- if (count2 > count)
63026- count2 = count;
63027-
63028 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
63029 BUG_ON(flex_gd->count > 1);
63030 continue;
63031@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
63032 err = ext4_journal_get_write_access(handle, bh);
63033 if (err)
63034 return err;
63035+
63036+ diff = block - start;
63037+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
63038+ if (count2 > count)
63039+ count2 = count;
63040+
63041 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
63042- block - start, count2);
63043- ext4_set_bits(bh->b_data, block - start, count2);
63044+ diff, count2);
63045+ ext4_set_bits(bh->b_data, diff, count2);
63046
63047 err = ext4_handle_dirty_metadata(handle, NULL, bh);
63048 if (unlikely(err))
63049diff --git a/fs/ext4/super.c b/fs/ext4/super.c
63050index e061e66..87bc092 100644
63051--- a/fs/ext4/super.c
63052+++ b/fs/ext4/super.c
63053@@ -1243,7 +1243,7 @@ static ext4_fsblk_t get_sb_block(void **data)
63054 }
63055
63056 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
63057-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
63058+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
63059 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
63060
63061 #ifdef CONFIG_QUOTA
63062@@ -2443,7 +2443,7 @@ struct ext4_attr {
63063 int offset;
63064 int deprecated_val;
63065 } u;
63066-};
63067+} __do_const;
63068
63069 static int parse_strtoull(const char *buf,
63070 unsigned long long max, unsigned long long *value)
63071diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
63072index 1e09fc7..0400dd4 100644
63073--- a/fs/ext4/xattr.c
63074+++ b/fs/ext4/xattr.c
63075@@ -399,7 +399,7 @@ static int
63076 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
63077 char *buffer, size_t buffer_size)
63078 {
63079- size_t rest = buffer_size;
63080+ size_t rest = buffer_size, total_size = 0;
63081
63082 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
63083 const struct xattr_handler *handler =
63084@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
63085 buffer += size;
63086 }
63087 rest -= size;
63088+ total_size += size;
63089 }
63090 }
63091- return buffer_size - rest;
63092+ return total_size;
63093 }
63094
63095 static int
63096diff --git a/fs/fcntl.c b/fs/fcntl.c
63097index ee85cd4..9dd0d20 100644
63098--- a/fs/fcntl.c
63099+++ b/fs/fcntl.c
63100@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
63101 int force)
63102 {
63103 security_file_set_fowner(filp);
63104+ if (gr_handle_chroot_fowner(pid, type))
63105+ return;
63106+ if (gr_check_protected_task_fowner(pid, type))
63107+ return;
63108 f_modown(filp, pid, type, force);
63109 }
63110 EXPORT_SYMBOL(__f_setown);
63111diff --git a/fs/fhandle.c b/fs/fhandle.c
63112index 999ff5c..2281df9 100644
63113--- a/fs/fhandle.c
63114+++ b/fs/fhandle.c
63115@@ -8,6 +8,7 @@
63116 #include <linux/fs_struct.h>
63117 #include <linux/fsnotify.h>
63118 #include <linux/personality.h>
63119+#include <linux/grsecurity.h>
63120 #include <asm/uaccess.h>
63121 #include "internal.h"
63122 #include "mount.h"
63123@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
63124 } else
63125 retval = 0;
63126 /* copy the mount id */
63127- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
63128- sizeof(*mnt_id)) ||
63129+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
63130 copy_to_user(ufh, handle,
63131 sizeof(struct file_handle) + handle_bytes))
63132 retval = -EFAULT;
63133@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
63134 * the directory. Ideally we would like CAP_DAC_SEARCH.
63135 * But we don't have that
63136 */
63137- if (!capable(CAP_DAC_READ_SEARCH)) {
63138+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
63139 retval = -EPERM;
63140 goto out_err;
63141 }
63142@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
63143 goto out_err;
63144 }
63145 /* copy the full handle */
63146- if (copy_from_user(handle, ufh,
63147- sizeof(struct file_handle) +
63148+ *handle = f_handle;
63149+ if (copy_from_user(&handle->f_handle,
63150+ &ufh->f_handle,
63151 f_handle.handle_bytes)) {
63152 retval = -EFAULT;
63153 goto out_handle;
63154diff --git a/fs/file.c b/fs/file.c
63155index ee738ea..f6c15629 100644
63156--- a/fs/file.c
63157+++ b/fs/file.c
63158@@ -16,6 +16,7 @@
63159 #include <linux/slab.h>
63160 #include <linux/vmalloc.h>
63161 #include <linux/file.h>
63162+#include <linux/security.h>
63163 #include <linux/fdtable.h>
63164 #include <linux/bitops.h>
63165 #include <linux/interrupt.h>
63166@@ -139,7 +140,7 @@ out:
63167 * Return <0 error code on error; 1 on successful completion.
63168 * The files->file_lock should be held on entry, and will be held on exit.
63169 */
63170-static int expand_fdtable(struct files_struct *files, int nr)
63171+static int expand_fdtable(struct files_struct *files, unsigned int nr)
63172 __releases(files->file_lock)
63173 __acquires(files->file_lock)
63174 {
63175@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
63176 * expanded and execution may have blocked.
63177 * The files->file_lock should be held on entry, and will be held on exit.
63178 */
63179-static int expand_files(struct files_struct *files, int nr)
63180+static int expand_files(struct files_struct *files, unsigned int nr)
63181 {
63182 struct fdtable *fdt;
63183
63184@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
63185 if (!file)
63186 return __close_fd(files, fd);
63187
63188+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
63189 if (fd >= rlimit(RLIMIT_NOFILE))
63190 return -EBADF;
63191
63192@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
63193 if (unlikely(oldfd == newfd))
63194 return -EINVAL;
63195
63196+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
63197 if (newfd >= rlimit(RLIMIT_NOFILE))
63198 return -EBADF;
63199
63200@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
63201 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
63202 {
63203 int err;
63204+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
63205 if (from >= rlimit(RLIMIT_NOFILE))
63206 return -EINVAL;
63207 err = alloc_fd(from, flags);
63208diff --git a/fs/filesystems.c b/fs/filesystems.c
63209index 5797d45..7d7d79a 100644
63210--- a/fs/filesystems.c
63211+++ b/fs/filesystems.c
63212@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
63213 int len = dot ? dot - name : strlen(name);
63214
63215 fs = __get_fs_type(name, len);
63216+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63217+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
63218+#else
63219 if (!fs && (request_module("fs-%.*s", len, name) == 0))
63220+#endif
63221 fs = __get_fs_type(name, len);
63222
63223 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
63224diff --git a/fs/fs_struct.c b/fs/fs_struct.c
63225index 7dca743..1ff87ae 100644
63226--- a/fs/fs_struct.c
63227+++ b/fs/fs_struct.c
63228@@ -4,6 +4,7 @@
63229 #include <linux/path.h>
63230 #include <linux/slab.h>
63231 #include <linux/fs_struct.h>
63232+#include <linux/grsecurity.h>
63233 #include "internal.h"
63234
63235 /*
63236@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
63237 struct path old_root;
63238
63239 path_get(path);
63240+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
63241 spin_lock(&fs->lock);
63242 write_seqcount_begin(&fs->seq);
63243 old_root = fs->root;
63244 fs->root = *path;
63245+ gr_set_chroot_entries(current, path);
63246 write_seqcount_end(&fs->seq);
63247 spin_unlock(&fs->lock);
63248- if (old_root.dentry)
63249+ if (old_root.dentry) {
63250+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
63251 path_put(&old_root);
63252+ }
63253 }
63254
63255 /*
63256@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
63257 int hits = 0;
63258 spin_lock(&fs->lock);
63259 write_seqcount_begin(&fs->seq);
63260+ /* this root replacement is only done by pivot_root,
63261+ leave grsec's chroot tagging alone for this task
63262+ so that a pivoted root isn't treated as a chroot
63263+ */
63264 hits += replace_path(&fs->root, old_root, new_root);
63265 hits += replace_path(&fs->pwd, old_root, new_root);
63266 write_seqcount_end(&fs->seq);
63267@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
63268
63269 void free_fs_struct(struct fs_struct *fs)
63270 {
63271+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
63272 path_put(&fs->root);
63273 path_put(&fs->pwd);
63274 kmem_cache_free(fs_cachep, fs);
63275@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
63276 task_lock(tsk);
63277 spin_lock(&fs->lock);
63278 tsk->fs = NULL;
63279- kill = !--fs->users;
63280+ gr_clear_chroot_entries(tsk);
63281+ kill = !atomic_dec_return(&fs->users);
63282 spin_unlock(&fs->lock);
63283 task_unlock(tsk);
63284 if (kill)
63285@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
63286 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
63287 /* We don't need to lock fs - think why ;-) */
63288 if (fs) {
63289- fs->users = 1;
63290+ atomic_set(&fs->users, 1);
63291 fs->in_exec = 0;
63292 spin_lock_init(&fs->lock);
63293 seqcount_init(&fs->seq);
63294@@ -121,9 +132,13 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
63295 spin_lock(&old->lock);
63296 fs->root = old->root;
63297 path_get(&fs->root);
63298+ /* instead of calling gr_set_chroot_entries here,
63299+ we call it from every caller of this function
63300+ */
63301 fs->pwd = old->pwd;
63302 path_get(&fs->pwd);
63303 spin_unlock(&old->lock);
63304+ gr_inc_chroot_refcnts(fs->root.dentry, fs->root.mnt);
63305 }
63306 return fs;
63307 }
63308@@ -139,8 +154,9 @@ int unshare_fs_struct(void)
63309
63310 task_lock(current);
63311 spin_lock(&fs->lock);
63312- kill = !--fs->users;
63313+ kill = !atomic_dec_return(&fs->users);
63314 current->fs = new_fs;
63315+ gr_set_chroot_entries(current, &new_fs->root);
63316 spin_unlock(&fs->lock);
63317 task_unlock(current);
63318
63319@@ -153,13 +169,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
63320
63321 int current_umask(void)
63322 {
63323- return current->fs->umask;
63324+ return current->fs->umask | gr_acl_umask();
63325 }
63326 EXPORT_SYMBOL(current_umask);
63327
63328 /* to be mentioned only in INIT_TASK */
63329 struct fs_struct init_fs = {
63330- .users = 1,
63331+ .users = ATOMIC_INIT(1),
63332 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
63333 .seq = SEQCNT_ZERO(init_fs.seq),
63334 .umask = 0022,
63335diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
63336index 89acec7..a575262 100644
63337--- a/fs/fscache/cookie.c
63338+++ b/fs/fscache/cookie.c
63339@@ -19,7 +19,7 @@
63340
63341 struct kmem_cache *fscache_cookie_jar;
63342
63343-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
63344+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
63345
63346 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
63347 static int fscache_alloc_object(struct fscache_cache *cache,
63348@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
63349 parent ? (char *) parent->def->name : "<no-parent>",
63350 def->name, netfs_data, enable);
63351
63352- fscache_stat(&fscache_n_acquires);
63353+ fscache_stat_unchecked(&fscache_n_acquires);
63354
63355 /* if there's no parent cookie, then we don't create one here either */
63356 if (!parent) {
63357- fscache_stat(&fscache_n_acquires_null);
63358+ fscache_stat_unchecked(&fscache_n_acquires_null);
63359 _leave(" [no parent]");
63360 return NULL;
63361 }
63362@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63363 /* allocate and initialise a cookie */
63364 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
63365 if (!cookie) {
63366- fscache_stat(&fscache_n_acquires_oom);
63367+ fscache_stat_unchecked(&fscache_n_acquires_oom);
63368 _leave(" [ENOMEM]");
63369 return NULL;
63370 }
63371@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
63372
63373 switch (cookie->def->type) {
63374 case FSCACHE_COOKIE_TYPE_INDEX:
63375- fscache_stat(&fscache_n_cookie_index);
63376+ fscache_stat_unchecked(&fscache_n_cookie_index);
63377 break;
63378 case FSCACHE_COOKIE_TYPE_DATAFILE:
63379- fscache_stat(&fscache_n_cookie_data);
63380+ fscache_stat_unchecked(&fscache_n_cookie_data);
63381 break;
63382 default:
63383- fscache_stat(&fscache_n_cookie_special);
63384+ fscache_stat_unchecked(&fscache_n_cookie_special);
63385 break;
63386 }
63387
63388@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63389 } else {
63390 atomic_dec(&parent->n_children);
63391 __fscache_cookie_put(cookie);
63392- fscache_stat(&fscache_n_acquires_nobufs);
63393+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
63394 _leave(" = NULL");
63395 return NULL;
63396 }
63397@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63398 }
63399 }
63400
63401- fscache_stat(&fscache_n_acquires_ok);
63402+ fscache_stat_unchecked(&fscache_n_acquires_ok);
63403 _leave(" = %p", cookie);
63404 return cookie;
63405 }
63406@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
63407 cache = fscache_select_cache_for_object(cookie->parent);
63408 if (!cache) {
63409 up_read(&fscache_addremove_sem);
63410- fscache_stat(&fscache_n_acquires_no_cache);
63411+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
63412 _leave(" = -ENOMEDIUM [no cache]");
63413 return -ENOMEDIUM;
63414 }
63415@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
63416 object = cache->ops->alloc_object(cache, cookie);
63417 fscache_stat_d(&fscache_n_cop_alloc_object);
63418 if (IS_ERR(object)) {
63419- fscache_stat(&fscache_n_object_no_alloc);
63420+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
63421 ret = PTR_ERR(object);
63422 goto error;
63423 }
63424
63425- fscache_stat(&fscache_n_object_alloc);
63426+ fscache_stat_unchecked(&fscache_n_object_alloc);
63427
63428- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
63429+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
63430
63431 _debug("ALLOC OBJ%x: %s {%lx}",
63432 object->debug_id, cookie->def->name, object->events);
63433@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
63434
63435 _enter("{%s}", cookie->def->name);
63436
63437- fscache_stat(&fscache_n_invalidates);
63438+ fscache_stat_unchecked(&fscache_n_invalidates);
63439
63440 /* Only permit invalidation of data files. Invalidating an index will
63441 * require the caller to release all its attachments to the tree rooted
63442@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
63443 {
63444 struct fscache_object *object;
63445
63446- fscache_stat(&fscache_n_updates);
63447+ fscache_stat_unchecked(&fscache_n_updates);
63448
63449 if (!cookie) {
63450- fscache_stat(&fscache_n_updates_null);
63451+ fscache_stat_unchecked(&fscache_n_updates_null);
63452 _leave(" [no cookie]");
63453 return;
63454 }
63455@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
63456 */
63457 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
63458 {
63459- fscache_stat(&fscache_n_relinquishes);
63460+ fscache_stat_unchecked(&fscache_n_relinquishes);
63461 if (retire)
63462- fscache_stat(&fscache_n_relinquishes_retire);
63463+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
63464
63465 if (!cookie) {
63466- fscache_stat(&fscache_n_relinquishes_null);
63467+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
63468 _leave(" [no cookie]");
63469 return;
63470 }
63471@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
63472 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
63473 goto inconsistent;
63474
63475- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
63476+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63477
63478 __fscache_use_cookie(cookie);
63479 if (fscache_submit_op(object, op) < 0)
63480diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
63481index 7872a62..d91b19f 100644
63482--- a/fs/fscache/internal.h
63483+++ b/fs/fscache/internal.h
63484@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
63485 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
63486 extern int fscache_wait_for_operation_activation(struct fscache_object *,
63487 struct fscache_operation *,
63488- atomic_t *,
63489- atomic_t *,
63490+ atomic_unchecked_t *,
63491+ atomic_unchecked_t *,
63492 void (*)(struct fscache_operation *));
63493 extern void fscache_invalidate_writes(struct fscache_cookie *);
63494
63495@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
63496 * stats.c
63497 */
63498 #ifdef CONFIG_FSCACHE_STATS
63499-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
63500-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
63501+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
63502+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
63503
63504-extern atomic_t fscache_n_op_pend;
63505-extern atomic_t fscache_n_op_run;
63506-extern atomic_t fscache_n_op_enqueue;
63507-extern atomic_t fscache_n_op_deferred_release;
63508-extern atomic_t fscache_n_op_release;
63509-extern atomic_t fscache_n_op_gc;
63510-extern atomic_t fscache_n_op_cancelled;
63511-extern atomic_t fscache_n_op_rejected;
63512+extern atomic_unchecked_t fscache_n_op_pend;
63513+extern atomic_unchecked_t fscache_n_op_run;
63514+extern atomic_unchecked_t fscache_n_op_enqueue;
63515+extern atomic_unchecked_t fscache_n_op_deferred_release;
63516+extern atomic_unchecked_t fscache_n_op_release;
63517+extern atomic_unchecked_t fscache_n_op_gc;
63518+extern atomic_unchecked_t fscache_n_op_cancelled;
63519+extern atomic_unchecked_t fscache_n_op_rejected;
63520
63521-extern atomic_t fscache_n_attr_changed;
63522-extern atomic_t fscache_n_attr_changed_ok;
63523-extern atomic_t fscache_n_attr_changed_nobufs;
63524-extern atomic_t fscache_n_attr_changed_nomem;
63525-extern atomic_t fscache_n_attr_changed_calls;
63526+extern atomic_unchecked_t fscache_n_attr_changed;
63527+extern atomic_unchecked_t fscache_n_attr_changed_ok;
63528+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
63529+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
63530+extern atomic_unchecked_t fscache_n_attr_changed_calls;
63531
63532-extern atomic_t fscache_n_allocs;
63533-extern atomic_t fscache_n_allocs_ok;
63534-extern atomic_t fscache_n_allocs_wait;
63535-extern atomic_t fscache_n_allocs_nobufs;
63536-extern atomic_t fscache_n_allocs_intr;
63537-extern atomic_t fscache_n_allocs_object_dead;
63538-extern atomic_t fscache_n_alloc_ops;
63539-extern atomic_t fscache_n_alloc_op_waits;
63540+extern atomic_unchecked_t fscache_n_allocs;
63541+extern atomic_unchecked_t fscache_n_allocs_ok;
63542+extern atomic_unchecked_t fscache_n_allocs_wait;
63543+extern atomic_unchecked_t fscache_n_allocs_nobufs;
63544+extern atomic_unchecked_t fscache_n_allocs_intr;
63545+extern atomic_unchecked_t fscache_n_allocs_object_dead;
63546+extern atomic_unchecked_t fscache_n_alloc_ops;
63547+extern atomic_unchecked_t fscache_n_alloc_op_waits;
63548
63549-extern atomic_t fscache_n_retrievals;
63550-extern atomic_t fscache_n_retrievals_ok;
63551-extern atomic_t fscache_n_retrievals_wait;
63552-extern atomic_t fscache_n_retrievals_nodata;
63553-extern atomic_t fscache_n_retrievals_nobufs;
63554-extern atomic_t fscache_n_retrievals_intr;
63555-extern atomic_t fscache_n_retrievals_nomem;
63556-extern atomic_t fscache_n_retrievals_object_dead;
63557-extern atomic_t fscache_n_retrieval_ops;
63558-extern atomic_t fscache_n_retrieval_op_waits;
63559+extern atomic_unchecked_t fscache_n_retrievals;
63560+extern atomic_unchecked_t fscache_n_retrievals_ok;
63561+extern atomic_unchecked_t fscache_n_retrievals_wait;
63562+extern atomic_unchecked_t fscache_n_retrievals_nodata;
63563+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
63564+extern atomic_unchecked_t fscache_n_retrievals_intr;
63565+extern atomic_unchecked_t fscache_n_retrievals_nomem;
63566+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
63567+extern atomic_unchecked_t fscache_n_retrieval_ops;
63568+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
63569
63570-extern atomic_t fscache_n_stores;
63571-extern atomic_t fscache_n_stores_ok;
63572-extern atomic_t fscache_n_stores_again;
63573-extern atomic_t fscache_n_stores_nobufs;
63574-extern atomic_t fscache_n_stores_oom;
63575-extern atomic_t fscache_n_store_ops;
63576-extern atomic_t fscache_n_store_calls;
63577-extern atomic_t fscache_n_store_pages;
63578-extern atomic_t fscache_n_store_radix_deletes;
63579-extern atomic_t fscache_n_store_pages_over_limit;
63580+extern atomic_unchecked_t fscache_n_stores;
63581+extern atomic_unchecked_t fscache_n_stores_ok;
63582+extern atomic_unchecked_t fscache_n_stores_again;
63583+extern atomic_unchecked_t fscache_n_stores_nobufs;
63584+extern atomic_unchecked_t fscache_n_stores_oom;
63585+extern atomic_unchecked_t fscache_n_store_ops;
63586+extern atomic_unchecked_t fscache_n_store_calls;
63587+extern atomic_unchecked_t fscache_n_store_pages;
63588+extern atomic_unchecked_t fscache_n_store_radix_deletes;
63589+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
63590
63591-extern atomic_t fscache_n_store_vmscan_not_storing;
63592-extern atomic_t fscache_n_store_vmscan_gone;
63593-extern atomic_t fscache_n_store_vmscan_busy;
63594-extern atomic_t fscache_n_store_vmscan_cancelled;
63595-extern atomic_t fscache_n_store_vmscan_wait;
63596+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63597+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
63598+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
63599+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63600+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
63601
63602-extern atomic_t fscache_n_marks;
63603-extern atomic_t fscache_n_uncaches;
63604+extern atomic_unchecked_t fscache_n_marks;
63605+extern atomic_unchecked_t fscache_n_uncaches;
63606
63607-extern atomic_t fscache_n_acquires;
63608-extern atomic_t fscache_n_acquires_null;
63609-extern atomic_t fscache_n_acquires_no_cache;
63610-extern atomic_t fscache_n_acquires_ok;
63611-extern atomic_t fscache_n_acquires_nobufs;
63612-extern atomic_t fscache_n_acquires_oom;
63613+extern atomic_unchecked_t fscache_n_acquires;
63614+extern atomic_unchecked_t fscache_n_acquires_null;
63615+extern atomic_unchecked_t fscache_n_acquires_no_cache;
63616+extern atomic_unchecked_t fscache_n_acquires_ok;
63617+extern atomic_unchecked_t fscache_n_acquires_nobufs;
63618+extern atomic_unchecked_t fscache_n_acquires_oom;
63619
63620-extern atomic_t fscache_n_invalidates;
63621-extern atomic_t fscache_n_invalidates_run;
63622+extern atomic_unchecked_t fscache_n_invalidates;
63623+extern atomic_unchecked_t fscache_n_invalidates_run;
63624
63625-extern atomic_t fscache_n_updates;
63626-extern atomic_t fscache_n_updates_null;
63627-extern atomic_t fscache_n_updates_run;
63628+extern atomic_unchecked_t fscache_n_updates;
63629+extern atomic_unchecked_t fscache_n_updates_null;
63630+extern atomic_unchecked_t fscache_n_updates_run;
63631
63632-extern atomic_t fscache_n_relinquishes;
63633-extern atomic_t fscache_n_relinquishes_null;
63634-extern atomic_t fscache_n_relinquishes_waitcrt;
63635-extern atomic_t fscache_n_relinquishes_retire;
63636+extern atomic_unchecked_t fscache_n_relinquishes;
63637+extern atomic_unchecked_t fscache_n_relinquishes_null;
63638+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63639+extern atomic_unchecked_t fscache_n_relinquishes_retire;
63640
63641-extern atomic_t fscache_n_cookie_index;
63642-extern atomic_t fscache_n_cookie_data;
63643-extern atomic_t fscache_n_cookie_special;
63644+extern atomic_unchecked_t fscache_n_cookie_index;
63645+extern atomic_unchecked_t fscache_n_cookie_data;
63646+extern atomic_unchecked_t fscache_n_cookie_special;
63647
63648-extern atomic_t fscache_n_object_alloc;
63649-extern atomic_t fscache_n_object_no_alloc;
63650-extern atomic_t fscache_n_object_lookups;
63651-extern atomic_t fscache_n_object_lookups_negative;
63652-extern atomic_t fscache_n_object_lookups_positive;
63653-extern atomic_t fscache_n_object_lookups_timed_out;
63654-extern atomic_t fscache_n_object_created;
63655-extern atomic_t fscache_n_object_avail;
63656-extern atomic_t fscache_n_object_dead;
63657+extern atomic_unchecked_t fscache_n_object_alloc;
63658+extern atomic_unchecked_t fscache_n_object_no_alloc;
63659+extern atomic_unchecked_t fscache_n_object_lookups;
63660+extern atomic_unchecked_t fscache_n_object_lookups_negative;
63661+extern atomic_unchecked_t fscache_n_object_lookups_positive;
63662+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
63663+extern atomic_unchecked_t fscache_n_object_created;
63664+extern atomic_unchecked_t fscache_n_object_avail;
63665+extern atomic_unchecked_t fscache_n_object_dead;
63666
63667-extern atomic_t fscache_n_checkaux_none;
63668-extern atomic_t fscache_n_checkaux_okay;
63669-extern atomic_t fscache_n_checkaux_update;
63670-extern atomic_t fscache_n_checkaux_obsolete;
63671+extern atomic_unchecked_t fscache_n_checkaux_none;
63672+extern atomic_unchecked_t fscache_n_checkaux_okay;
63673+extern atomic_unchecked_t fscache_n_checkaux_update;
63674+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
63675
63676 extern atomic_t fscache_n_cop_alloc_object;
63677 extern atomic_t fscache_n_cop_lookup_object;
63678@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
63679 atomic_inc(stat);
63680 }
63681
63682+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
63683+{
63684+ atomic_inc_unchecked(stat);
63685+}
63686+
63687 static inline void fscache_stat_d(atomic_t *stat)
63688 {
63689 atomic_dec(stat);
63690@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
63691
63692 #define __fscache_stat(stat) (NULL)
63693 #define fscache_stat(stat) do {} while (0)
63694+#define fscache_stat_unchecked(stat) do {} while (0)
63695 #define fscache_stat_d(stat) do {} while (0)
63696 #endif
63697
63698diff --git a/fs/fscache/object.c b/fs/fscache/object.c
63699index da032da..0076ce7 100644
63700--- a/fs/fscache/object.c
63701+++ b/fs/fscache/object.c
63702@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
63703 _debug("LOOKUP \"%s\" in \"%s\"",
63704 cookie->def->name, object->cache->tag->name);
63705
63706- fscache_stat(&fscache_n_object_lookups);
63707+ fscache_stat_unchecked(&fscache_n_object_lookups);
63708 fscache_stat(&fscache_n_cop_lookup_object);
63709 ret = object->cache->ops->lookup_object(object);
63710 fscache_stat_d(&fscache_n_cop_lookup_object);
63711@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
63712 if (ret == -ETIMEDOUT) {
63713 /* probably stuck behind another object, so move this one to
63714 * the back of the queue */
63715- fscache_stat(&fscache_n_object_lookups_timed_out);
63716+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
63717 _leave(" [timeout]");
63718 return NO_TRANSIT;
63719 }
63720@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
63721 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
63722
63723 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
63724- fscache_stat(&fscache_n_object_lookups_negative);
63725+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
63726
63727 /* Allow write requests to begin stacking up and read requests to begin
63728 * returning ENODATA.
63729@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
63730 /* if we were still looking up, then we must have a positive lookup
63731 * result, in which case there may be data available */
63732 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
63733- fscache_stat(&fscache_n_object_lookups_positive);
63734+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
63735
63736 /* We do (presumably) have data */
63737 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
63738@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
63739 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
63740 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
63741 } else {
63742- fscache_stat(&fscache_n_object_created);
63743+ fscache_stat_unchecked(&fscache_n_object_created);
63744 }
63745
63746 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
63747@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
63748 fscache_stat_d(&fscache_n_cop_lookup_complete);
63749
63750 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
63751- fscache_stat(&fscache_n_object_avail);
63752+ fscache_stat_unchecked(&fscache_n_object_avail);
63753
63754 _leave("");
63755 return transit_to(JUMPSTART_DEPS);
63756@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
63757
63758 /* this just shifts the object release to the work processor */
63759 fscache_put_object(object);
63760- fscache_stat(&fscache_n_object_dead);
63761+ fscache_stat_unchecked(&fscache_n_object_dead);
63762
63763 _leave("");
63764 return transit_to(OBJECT_DEAD);
63765@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
63766 enum fscache_checkaux result;
63767
63768 if (!object->cookie->def->check_aux) {
63769- fscache_stat(&fscache_n_checkaux_none);
63770+ fscache_stat_unchecked(&fscache_n_checkaux_none);
63771 return FSCACHE_CHECKAUX_OKAY;
63772 }
63773
63774@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
63775 switch (result) {
63776 /* entry okay as is */
63777 case FSCACHE_CHECKAUX_OKAY:
63778- fscache_stat(&fscache_n_checkaux_okay);
63779+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
63780 break;
63781
63782 /* entry requires update */
63783 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
63784- fscache_stat(&fscache_n_checkaux_update);
63785+ fscache_stat_unchecked(&fscache_n_checkaux_update);
63786 break;
63787
63788 /* entry requires deletion */
63789 case FSCACHE_CHECKAUX_OBSOLETE:
63790- fscache_stat(&fscache_n_checkaux_obsolete);
63791+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
63792 break;
63793
63794 default:
63795@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
63796 {
63797 const struct fscache_state *s;
63798
63799- fscache_stat(&fscache_n_invalidates_run);
63800+ fscache_stat_unchecked(&fscache_n_invalidates_run);
63801 fscache_stat(&fscache_n_cop_invalidate_object);
63802 s = _fscache_invalidate_object(object, event);
63803 fscache_stat_d(&fscache_n_cop_invalidate_object);
63804@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
63805 {
63806 _enter("{OBJ%x},%d", object->debug_id, event);
63807
63808- fscache_stat(&fscache_n_updates_run);
63809+ fscache_stat_unchecked(&fscache_n_updates_run);
63810 fscache_stat(&fscache_n_cop_update_object);
63811 object->cache->ops->update_object(object);
63812 fscache_stat_d(&fscache_n_cop_update_object);
63813diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
63814index e7b87a0..a85d47a 100644
63815--- a/fs/fscache/operation.c
63816+++ b/fs/fscache/operation.c
63817@@ -17,7 +17,7 @@
63818 #include <linux/slab.h>
63819 #include "internal.h"
63820
63821-atomic_t fscache_op_debug_id;
63822+atomic_unchecked_t fscache_op_debug_id;
63823 EXPORT_SYMBOL(fscache_op_debug_id);
63824
63825 /**
63826@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
63827 ASSERTCMP(atomic_read(&op->usage), >, 0);
63828 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
63829
63830- fscache_stat(&fscache_n_op_enqueue);
63831+ fscache_stat_unchecked(&fscache_n_op_enqueue);
63832 switch (op->flags & FSCACHE_OP_TYPE) {
63833 case FSCACHE_OP_ASYNC:
63834 _debug("queue async");
63835@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
63836 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
63837 if (op->processor)
63838 fscache_enqueue_operation(op);
63839- fscache_stat(&fscache_n_op_run);
63840+ fscache_stat_unchecked(&fscache_n_op_run);
63841 }
63842
63843 /*
63844@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
63845 if (object->n_in_progress > 0) {
63846 atomic_inc(&op->usage);
63847 list_add_tail(&op->pend_link, &object->pending_ops);
63848- fscache_stat(&fscache_n_op_pend);
63849+ fscache_stat_unchecked(&fscache_n_op_pend);
63850 } else if (!list_empty(&object->pending_ops)) {
63851 atomic_inc(&op->usage);
63852 list_add_tail(&op->pend_link, &object->pending_ops);
63853- fscache_stat(&fscache_n_op_pend);
63854+ fscache_stat_unchecked(&fscache_n_op_pend);
63855 fscache_start_operations(object);
63856 } else {
63857 ASSERTCMP(object->n_in_progress, ==, 0);
63858@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
63859 object->n_exclusive++; /* reads and writes must wait */
63860 atomic_inc(&op->usage);
63861 list_add_tail(&op->pend_link, &object->pending_ops);
63862- fscache_stat(&fscache_n_op_pend);
63863+ fscache_stat_unchecked(&fscache_n_op_pend);
63864 ret = 0;
63865 } else {
63866 /* If we're in any other state, there must have been an I/O
63867@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
63868 if (object->n_exclusive > 0) {
63869 atomic_inc(&op->usage);
63870 list_add_tail(&op->pend_link, &object->pending_ops);
63871- fscache_stat(&fscache_n_op_pend);
63872+ fscache_stat_unchecked(&fscache_n_op_pend);
63873 } else if (!list_empty(&object->pending_ops)) {
63874 atomic_inc(&op->usage);
63875 list_add_tail(&op->pend_link, &object->pending_ops);
63876- fscache_stat(&fscache_n_op_pend);
63877+ fscache_stat_unchecked(&fscache_n_op_pend);
63878 fscache_start_operations(object);
63879 } else {
63880 ASSERTCMP(object->n_exclusive, ==, 0);
63881@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
63882 object->n_ops++;
63883 atomic_inc(&op->usage);
63884 list_add_tail(&op->pend_link, &object->pending_ops);
63885- fscache_stat(&fscache_n_op_pend);
63886+ fscache_stat_unchecked(&fscache_n_op_pend);
63887 ret = 0;
63888 } else if (fscache_object_is_dying(object)) {
63889- fscache_stat(&fscache_n_op_rejected);
63890+ fscache_stat_unchecked(&fscache_n_op_rejected);
63891 op->state = FSCACHE_OP_ST_CANCELLED;
63892 ret = -ENOBUFS;
63893 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
63894@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
63895 ret = -EBUSY;
63896 if (op->state == FSCACHE_OP_ST_PENDING) {
63897 ASSERT(!list_empty(&op->pend_link));
63898- fscache_stat(&fscache_n_op_cancelled);
63899+ fscache_stat_unchecked(&fscache_n_op_cancelled);
63900 list_del_init(&op->pend_link);
63901 if (do_cancel)
63902 do_cancel(op);
63903@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
63904 while (!list_empty(&object->pending_ops)) {
63905 op = list_entry(object->pending_ops.next,
63906 struct fscache_operation, pend_link);
63907- fscache_stat(&fscache_n_op_cancelled);
63908+ fscache_stat_unchecked(&fscache_n_op_cancelled);
63909 list_del_init(&op->pend_link);
63910
63911 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
63912@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
63913 op->state, ==, FSCACHE_OP_ST_CANCELLED);
63914 op->state = FSCACHE_OP_ST_DEAD;
63915
63916- fscache_stat(&fscache_n_op_release);
63917+ fscache_stat_unchecked(&fscache_n_op_release);
63918
63919 if (op->release) {
63920 op->release(op);
63921@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
63922 * lock, and defer it otherwise */
63923 if (!spin_trylock(&object->lock)) {
63924 _debug("defer put");
63925- fscache_stat(&fscache_n_op_deferred_release);
63926+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
63927
63928 cache = object->cache;
63929 spin_lock(&cache->op_gc_list_lock);
63930@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
63931
63932 _debug("GC DEFERRED REL OBJ%x OP%x",
63933 object->debug_id, op->debug_id);
63934- fscache_stat(&fscache_n_op_gc);
63935+ fscache_stat_unchecked(&fscache_n_op_gc);
63936
63937 ASSERTCMP(atomic_read(&op->usage), ==, 0);
63938 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
63939diff --git a/fs/fscache/page.c b/fs/fscache/page.c
63940index de33b3f..8be4d29 100644
63941--- a/fs/fscache/page.c
63942+++ b/fs/fscache/page.c
63943@@ -74,7 +74,7 @@ try_again:
63944 val = radix_tree_lookup(&cookie->stores, page->index);
63945 if (!val) {
63946 rcu_read_unlock();
63947- fscache_stat(&fscache_n_store_vmscan_not_storing);
63948+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
63949 __fscache_uncache_page(cookie, page);
63950 return true;
63951 }
63952@@ -104,11 +104,11 @@ try_again:
63953 spin_unlock(&cookie->stores_lock);
63954
63955 if (xpage) {
63956- fscache_stat(&fscache_n_store_vmscan_cancelled);
63957- fscache_stat(&fscache_n_store_radix_deletes);
63958+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
63959+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63960 ASSERTCMP(xpage, ==, page);
63961 } else {
63962- fscache_stat(&fscache_n_store_vmscan_gone);
63963+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
63964 }
63965
63966 wake_up_bit(&cookie->flags, 0);
63967@@ -123,11 +123,11 @@ page_busy:
63968 * sleeping on memory allocation, so we may need to impose a timeout
63969 * too. */
63970 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
63971- fscache_stat(&fscache_n_store_vmscan_busy);
63972+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
63973 return false;
63974 }
63975
63976- fscache_stat(&fscache_n_store_vmscan_wait);
63977+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
63978 if (!release_page_wait_timeout(cookie, page))
63979 _debug("fscache writeout timeout page: %p{%lx}",
63980 page, page->index);
63981@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
63982 FSCACHE_COOKIE_STORING_TAG);
63983 if (!radix_tree_tag_get(&cookie->stores, page->index,
63984 FSCACHE_COOKIE_PENDING_TAG)) {
63985- fscache_stat(&fscache_n_store_radix_deletes);
63986+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63987 xpage = radix_tree_delete(&cookie->stores, page->index);
63988 }
63989 spin_unlock(&cookie->stores_lock);
63990@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
63991
63992 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
63993
63994- fscache_stat(&fscache_n_attr_changed_calls);
63995+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
63996
63997 if (fscache_object_is_active(object)) {
63998 fscache_stat(&fscache_n_cop_attr_changed);
63999@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
64000
64001 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64002
64003- fscache_stat(&fscache_n_attr_changed);
64004+ fscache_stat_unchecked(&fscache_n_attr_changed);
64005
64006 op = kzalloc(sizeof(*op), GFP_KERNEL);
64007 if (!op) {
64008- fscache_stat(&fscache_n_attr_changed_nomem);
64009+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
64010 _leave(" = -ENOMEM");
64011 return -ENOMEM;
64012 }
64013@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
64014 if (fscache_submit_exclusive_op(object, op) < 0)
64015 goto nobufs_dec;
64016 spin_unlock(&cookie->lock);
64017- fscache_stat(&fscache_n_attr_changed_ok);
64018+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
64019 fscache_put_operation(op);
64020 _leave(" = 0");
64021 return 0;
64022@@ -242,7 +242,7 @@ nobufs:
64023 kfree(op);
64024 if (wake_cookie)
64025 __fscache_wake_unused_cookie(cookie);
64026- fscache_stat(&fscache_n_attr_changed_nobufs);
64027+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
64028 _leave(" = %d", -ENOBUFS);
64029 return -ENOBUFS;
64030 }
64031@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
64032 /* allocate a retrieval operation and attempt to submit it */
64033 op = kzalloc(sizeof(*op), GFP_NOIO);
64034 if (!op) {
64035- fscache_stat(&fscache_n_retrievals_nomem);
64036+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64037 return NULL;
64038 }
64039
64040@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
64041 return 0;
64042 }
64043
64044- fscache_stat(&fscache_n_retrievals_wait);
64045+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
64046
64047 jif = jiffies;
64048 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
64049 TASK_INTERRUPTIBLE) != 0) {
64050- fscache_stat(&fscache_n_retrievals_intr);
64051+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64052 _leave(" = -ERESTARTSYS");
64053 return -ERESTARTSYS;
64054 }
64055@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
64056 */
64057 int fscache_wait_for_operation_activation(struct fscache_object *object,
64058 struct fscache_operation *op,
64059- atomic_t *stat_op_waits,
64060- atomic_t *stat_object_dead,
64061+ atomic_unchecked_t *stat_op_waits,
64062+ atomic_unchecked_t *stat_object_dead,
64063 void (*do_cancel)(struct fscache_operation *))
64064 {
64065 int ret;
64066@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
64067
64068 _debug(">>> WT");
64069 if (stat_op_waits)
64070- fscache_stat(stat_op_waits);
64071+ fscache_stat_unchecked(stat_op_waits);
64072 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
64073 TASK_INTERRUPTIBLE) != 0) {
64074 ret = fscache_cancel_op(op, do_cancel);
64075@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
64076 check_if_dead:
64077 if (op->state == FSCACHE_OP_ST_CANCELLED) {
64078 if (stat_object_dead)
64079- fscache_stat(stat_object_dead);
64080+ fscache_stat_unchecked(stat_object_dead);
64081 _leave(" = -ENOBUFS [cancelled]");
64082 return -ENOBUFS;
64083 }
64084@@ -381,7 +381,7 @@ check_if_dead:
64085 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
64086 fscache_cancel_op(op, do_cancel);
64087 if (stat_object_dead)
64088- fscache_stat(stat_object_dead);
64089+ fscache_stat_unchecked(stat_object_dead);
64090 return -ENOBUFS;
64091 }
64092 return 0;
64093@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64094
64095 _enter("%p,%p,,,", cookie, page);
64096
64097- fscache_stat(&fscache_n_retrievals);
64098+ fscache_stat_unchecked(&fscache_n_retrievals);
64099
64100 if (hlist_empty(&cookie->backing_objects))
64101 goto nobufs;
64102@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64103 goto nobufs_unlock_dec;
64104 spin_unlock(&cookie->lock);
64105
64106- fscache_stat(&fscache_n_retrieval_ops);
64107+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
64108
64109 /* pin the netfs read context in case we need to do the actual netfs
64110 * read because we've encountered a cache read failure */
64111@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64112
64113 error:
64114 if (ret == -ENOMEM)
64115- fscache_stat(&fscache_n_retrievals_nomem);
64116+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64117 else if (ret == -ERESTARTSYS)
64118- fscache_stat(&fscache_n_retrievals_intr);
64119+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64120 else if (ret == -ENODATA)
64121- fscache_stat(&fscache_n_retrievals_nodata);
64122+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
64123 else if (ret < 0)
64124- fscache_stat(&fscache_n_retrievals_nobufs);
64125+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64126 else
64127- fscache_stat(&fscache_n_retrievals_ok);
64128+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
64129
64130 fscache_put_retrieval(op);
64131 _leave(" = %d", ret);
64132@@ -505,7 +505,7 @@ nobufs_unlock:
64133 __fscache_wake_unused_cookie(cookie);
64134 kfree(op);
64135 nobufs:
64136- fscache_stat(&fscache_n_retrievals_nobufs);
64137+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64138 _leave(" = -ENOBUFS");
64139 return -ENOBUFS;
64140 }
64141@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64142
64143 _enter("%p,,%d,,,", cookie, *nr_pages);
64144
64145- fscache_stat(&fscache_n_retrievals);
64146+ fscache_stat_unchecked(&fscache_n_retrievals);
64147
64148 if (hlist_empty(&cookie->backing_objects))
64149 goto nobufs;
64150@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64151 goto nobufs_unlock_dec;
64152 spin_unlock(&cookie->lock);
64153
64154- fscache_stat(&fscache_n_retrieval_ops);
64155+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
64156
64157 /* pin the netfs read context in case we need to do the actual netfs
64158 * read because we've encountered a cache read failure */
64159@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64160
64161 error:
64162 if (ret == -ENOMEM)
64163- fscache_stat(&fscache_n_retrievals_nomem);
64164+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64165 else if (ret == -ERESTARTSYS)
64166- fscache_stat(&fscache_n_retrievals_intr);
64167+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64168 else if (ret == -ENODATA)
64169- fscache_stat(&fscache_n_retrievals_nodata);
64170+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
64171 else if (ret < 0)
64172- fscache_stat(&fscache_n_retrievals_nobufs);
64173+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64174 else
64175- fscache_stat(&fscache_n_retrievals_ok);
64176+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
64177
64178 fscache_put_retrieval(op);
64179 _leave(" = %d", ret);
64180@@ -636,7 +636,7 @@ nobufs_unlock:
64181 if (wake_cookie)
64182 __fscache_wake_unused_cookie(cookie);
64183 nobufs:
64184- fscache_stat(&fscache_n_retrievals_nobufs);
64185+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64186 _leave(" = -ENOBUFS");
64187 return -ENOBUFS;
64188 }
64189@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64190
64191 _enter("%p,%p,,,", cookie, page);
64192
64193- fscache_stat(&fscache_n_allocs);
64194+ fscache_stat_unchecked(&fscache_n_allocs);
64195
64196 if (hlist_empty(&cookie->backing_objects))
64197 goto nobufs;
64198@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64199 goto nobufs_unlock_dec;
64200 spin_unlock(&cookie->lock);
64201
64202- fscache_stat(&fscache_n_alloc_ops);
64203+ fscache_stat_unchecked(&fscache_n_alloc_ops);
64204
64205 ret = fscache_wait_for_operation_activation(
64206 object, &op->op,
64207@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64208
64209 error:
64210 if (ret == -ERESTARTSYS)
64211- fscache_stat(&fscache_n_allocs_intr);
64212+ fscache_stat_unchecked(&fscache_n_allocs_intr);
64213 else if (ret < 0)
64214- fscache_stat(&fscache_n_allocs_nobufs);
64215+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
64216 else
64217- fscache_stat(&fscache_n_allocs_ok);
64218+ fscache_stat_unchecked(&fscache_n_allocs_ok);
64219
64220 fscache_put_retrieval(op);
64221 _leave(" = %d", ret);
64222@@ -730,7 +730,7 @@ nobufs_unlock:
64223 if (wake_cookie)
64224 __fscache_wake_unused_cookie(cookie);
64225 nobufs:
64226- fscache_stat(&fscache_n_allocs_nobufs);
64227+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
64228 _leave(" = -ENOBUFS");
64229 return -ENOBUFS;
64230 }
64231@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64232
64233 spin_lock(&cookie->stores_lock);
64234
64235- fscache_stat(&fscache_n_store_calls);
64236+ fscache_stat_unchecked(&fscache_n_store_calls);
64237
64238 /* find a page to store */
64239 page = NULL;
64240@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64241 page = results[0];
64242 _debug("gang %d [%lx]", n, page->index);
64243 if (page->index > op->store_limit) {
64244- fscache_stat(&fscache_n_store_pages_over_limit);
64245+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
64246 goto superseded;
64247 }
64248
64249@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64250 spin_unlock(&cookie->stores_lock);
64251 spin_unlock(&object->lock);
64252
64253- fscache_stat(&fscache_n_store_pages);
64254+ fscache_stat_unchecked(&fscache_n_store_pages);
64255 fscache_stat(&fscache_n_cop_write_page);
64256 ret = object->cache->ops->write_page(op, page);
64257 fscache_stat_d(&fscache_n_cop_write_page);
64258@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64259 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64260 ASSERT(PageFsCache(page));
64261
64262- fscache_stat(&fscache_n_stores);
64263+ fscache_stat_unchecked(&fscache_n_stores);
64264
64265 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
64266 _leave(" = -ENOBUFS [invalidating]");
64267@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64268 spin_unlock(&cookie->stores_lock);
64269 spin_unlock(&object->lock);
64270
64271- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
64272+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
64273 op->store_limit = object->store_limit;
64274
64275 __fscache_use_cookie(cookie);
64276@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64277
64278 spin_unlock(&cookie->lock);
64279 radix_tree_preload_end();
64280- fscache_stat(&fscache_n_store_ops);
64281- fscache_stat(&fscache_n_stores_ok);
64282+ fscache_stat_unchecked(&fscache_n_store_ops);
64283+ fscache_stat_unchecked(&fscache_n_stores_ok);
64284
64285 /* the work queue now carries its own ref on the object */
64286 fscache_put_operation(&op->op);
64287@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64288 return 0;
64289
64290 already_queued:
64291- fscache_stat(&fscache_n_stores_again);
64292+ fscache_stat_unchecked(&fscache_n_stores_again);
64293 already_pending:
64294 spin_unlock(&cookie->stores_lock);
64295 spin_unlock(&object->lock);
64296 spin_unlock(&cookie->lock);
64297 radix_tree_preload_end();
64298 kfree(op);
64299- fscache_stat(&fscache_n_stores_ok);
64300+ fscache_stat_unchecked(&fscache_n_stores_ok);
64301 _leave(" = 0");
64302 return 0;
64303
64304@@ -1039,14 +1039,14 @@ nobufs:
64305 kfree(op);
64306 if (wake_cookie)
64307 __fscache_wake_unused_cookie(cookie);
64308- fscache_stat(&fscache_n_stores_nobufs);
64309+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
64310 _leave(" = -ENOBUFS");
64311 return -ENOBUFS;
64312
64313 nomem_free:
64314 kfree(op);
64315 nomem:
64316- fscache_stat(&fscache_n_stores_oom);
64317+ fscache_stat_unchecked(&fscache_n_stores_oom);
64318 _leave(" = -ENOMEM");
64319 return -ENOMEM;
64320 }
64321@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
64322 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64323 ASSERTCMP(page, !=, NULL);
64324
64325- fscache_stat(&fscache_n_uncaches);
64326+ fscache_stat_unchecked(&fscache_n_uncaches);
64327
64328 /* cache withdrawal may beat us to it */
64329 if (!PageFsCache(page))
64330@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
64331 struct fscache_cookie *cookie = op->op.object->cookie;
64332
64333 #ifdef CONFIG_FSCACHE_STATS
64334- atomic_inc(&fscache_n_marks);
64335+ atomic_inc_unchecked(&fscache_n_marks);
64336 #endif
64337
64338 _debug("- mark %p{%lx}", page, page->index);
64339diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
64340index 40d13c7..ddf52b9 100644
64341--- a/fs/fscache/stats.c
64342+++ b/fs/fscache/stats.c
64343@@ -18,99 +18,99 @@
64344 /*
64345 * operation counters
64346 */
64347-atomic_t fscache_n_op_pend;
64348-atomic_t fscache_n_op_run;
64349-atomic_t fscache_n_op_enqueue;
64350-atomic_t fscache_n_op_requeue;
64351-atomic_t fscache_n_op_deferred_release;
64352-atomic_t fscache_n_op_release;
64353-atomic_t fscache_n_op_gc;
64354-atomic_t fscache_n_op_cancelled;
64355-atomic_t fscache_n_op_rejected;
64356+atomic_unchecked_t fscache_n_op_pend;
64357+atomic_unchecked_t fscache_n_op_run;
64358+atomic_unchecked_t fscache_n_op_enqueue;
64359+atomic_unchecked_t fscache_n_op_requeue;
64360+atomic_unchecked_t fscache_n_op_deferred_release;
64361+atomic_unchecked_t fscache_n_op_release;
64362+atomic_unchecked_t fscache_n_op_gc;
64363+atomic_unchecked_t fscache_n_op_cancelled;
64364+atomic_unchecked_t fscache_n_op_rejected;
64365
64366-atomic_t fscache_n_attr_changed;
64367-atomic_t fscache_n_attr_changed_ok;
64368-atomic_t fscache_n_attr_changed_nobufs;
64369-atomic_t fscache_n_attr_changed_nomem;
64370-atomic_t fscache_n_attr_changed_calls;
64371+atomic_unchecked_t fscache_n_attr_changed;
64372+atomic_unchecked_t fscache_n_attr_changed_ok;
64373+atomic_unchecked_t fscache_n_attr_changed_nobufs;
64374+atomic_unchecked_t fscache_n_attr_changed_nomem;
64375+atomic_unchecked_t fscache_n_attr_changed_calls;
64376
64377-atomic_t fscache_n_allocs;
64378-atomic_t fscache_n_allocs_ok;
64379-atomic_t fscache_n_allocs_wait;
64380-atomic_t fscache_n_allocs_nobufs;
64381-atomic_t fscache_n_allocs_intr;
64382-atomic_t fscache_n_allocs_object_dead;
64383-atomic_t fscache_n_alloc_ops;
64384-atomic_t fscache_n_alloc_op_waits;
64385+atomic_unchecked_t fscache_n_allocs;
64386+atomic_unchecked_t fscache_n_allocs_ok;
64387+atomic_unchecked_t fscache_n_allocs_wait;
64388+atomic_unchecked_t fscache_n_allocs_nobufs;
64389+atomic_unchecked_t fscache_n_allocs_intr;
64390+atomic_unchecked_t fscache_n_allocs_object_dead;
64391+atomic_unchecked_t fscache_n_alloc_ops;
64392+atomic_unchecked_t fscache_n_alloc_op_waits;
64393
64394-atomic_t fscache_n_retrievals;
64395-atomic_t fscache_n_retrievals_ok;
64396-atomic_t fscache_n_retrievals_wait;
64397-atomic_t fscache_n_retrievals_nodata;
64398-atomic_t fscache_n_retrievals_nobufs;
64399-atomic_t fscache_n_retrievals_intr;
64400-atomic_t fscache_n_retrievals_nomem;
64401-atomic_t fscache_n_retrievals_object_dead;
64402-atomic_t fscache_n_retrieval_ops;
64403-atomic_t fscache_n_retrieval_op_waits;
64404+atomic_unchecked_t fscache_n_retrievals;
64405+atomic_unchecked_t fscache_n_retrievals_ok;
64406+atomic_unchecked_t fscache_n_retrievals_wait;
64407+atomic_unchecked_t fscache_n_retrievals_nodata;
64408+atomic_unchecked_t fscache_n_retrievals_nobufs;
64409+atomic_unchecked_t fscache_n_retrievals_intr;
64410+atomic_unchecked_t fscache_n_retrievals_nomem;
64411+atomic_unchecked_t fscache_n_retrievals_object_dead;
64412+atomic_unchecked_t fscache_n_retrieval_ops;
64413+atomic_unchecked_t fscache_n_retrieval_op_waits;
64414
64415-atomic_t fscache_n_stores;
64416-atomic_t fscache_n_stores_ok;
64417-atomic_t fscache_n_stores_again;
64418-atomic_t fscache_n_stores_nobufs;
64419-atomic_t fscache_n_stores_oom;
64420-atomic_t fscache_n_store_ops;
64421-atomic_t fscache_n_store_calls;
64422-atomic_t fscache_n_store_pages;
64423-atomic_t fscache_n_store_radix_deletes;
64424-atomic_t fscache_n_store_pages_over_limit;
64425+atomic_unchecked_t fscache_n_stores;
64426+atomic_unchecked_t fscache_n_stores_ok;
64427+atomic_unchecked_t fscache_n_stores_again;
64428+atomic_unchecked_t fscache_n_stores_nobufs;
64429+atomic_unchecked_t fscache_n_stores_oom;
64430+atomic_unchecked_t fscache_n_store_ops;
64431+atomic_unchecked_t fscache_n_store_calls;
64432+atomic_unchecked_t fscache_n_store_pages;
64433+atomic_unchecked_t fscache_n_store_radix_deletes;
64434+atomic_unchecked_t fscache_n_store_pages_over_limit;
64435
64436-atomic_t fscache_n_store_vmscan_not_storing;
64437-atomic_t fscache_n_store_vmscan_gone;
64438-atomic_t fscache_n_store_vmscan_busy;
64439-atomic_t fscache_n_store_vmscan_cancelled;
64440-atomic_t fscache_n_store_vmscan_wait;
64441+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
64442+atomic_unchecked_t fscache_n_store_vmscan_gone;
64443+atomic_unchecked_t fscache_n_store_vmscan_busy;
64444+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
64445+atomic_unchecked_t fscache_n_store_vmscan_wait;
64446
64447-atomic_t fscache_n_marks;
64448-atomic_t fscache_n_uncaches;
64449+atomic_unchecked_t fscache_n_marks;
64450+atomic_unchecked_t fscache_n_uncaches;
64451
64452-atomic_t fscache_n_acquires;
64453-atomic_t fscache_n_acquires_null;
64454-atomic_t fscache_n_acquires_no_cache;
64455-atomic_t fscache_n_acquires_ok;
64456-atomic_t fscache_n_acquires_nobufs;
64457-atomic_t fscache_n_acquires_oom;
64458+atomic_unchecked_t fscache_n_acquires;
64459+atomic_unchecked_t fscache_n_acquires_null;
64460+atomic_unchecked_t fscache_n_acquires_no_cache;
64461+atomic_unchecked_t fscache_n_acquires_ok;
64462+atomic_unchecked_t fscache_n_acquires_nobufs;
64463+atomic_unchecked_t fscache_n_acquires_oom;
64464
64465-atomic_t fscache_n_invalidates;
64466-atomic_t fscache_n_invalidates_run;
64467+atomic_unchecked_t fscache_n_invalidates;
64468+atomic_unchecked_t fscache_n_invalidates_run;
64469
64470-atomic_t fscache_n_updates;
64471-atomic_t fscache_n_updates_null;
64472-atomic_t fscache_n_updates_run;
64473+atomic_unchecked_t fscache_n_updates;
64474+atomic_unchecked_t fscache_n_updates_null;
64475+atomic_unchecked_t fscache_n_updates_run;
64476
64477-atomic_t fscache_n_relinquishes;
64478-atomic_t fscache_n_relinquishes_null;
64479-atomic_t fscache_n_relinquishes_waitcrt;
64480-atomic_t fscache_n_relinquishes_retire;
64481+atomic_unchecked_t fscache_n_relinquishes;
64482+atomic_unchecked_t fscache_n_relinquishes_null;
64483+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
64484+atomic_unchecked_t fscache_n_relinquishes_retire;
64485
64486-atomic_t fscache_n_cookie_index;
64487-atomic_t fscache_n_cookie_data;
64488-atomic_t fscache_n_cookie_special;
64489+atomic_unchecked_t fscache_n_cookie_index;
64490+atomic_unchecked_t fscache_n_cookie_data;
64491+atomic_unchecked_t fscache_n_cookie_special;
64492
64493-atomic_t fscache_n_object_alloc;
64494-atomic_t fscache_n_object_no_alloc;
64495-atomic_t fscache_n_object_lookups;
64496-atomic_t fscache_n_object_lookups_negative;
64497-atomic_t fscache_n_object_lookups_positive;
64498-atomic_t fscache_n_object_lookups_timed_out;
64499-atomic_t fscache_n_object_created;
64500-atomic_t fscache_n_object_avail;
64501-atomic_t fscache_n_object_dead;
64502+atomic_unchecked_t fscache_n_object_alloc;
64503+atomic_unchecked_t fscache_n_object_no_alloc;
64504+atomic_unchecked_t fscache_n_object_lookups;
64505+atomic_unchecked_t fscache_n_object_lookups_negative;
64506+atomic_unchecked_t fscache_n_object_lookups_positive;
64507+atomic_unchecked_t fscache_n_object_lookups_timed_out;
64508+atomic_unchecked_t fscache_n_object_created;
64509+atomic_unchecked_t fscache_n_object_avail;
64510+atomic_unchecked_t fscache_n_object_dead;
64511
64512-atomic_t fscache_n_checkaux_none;
64513-atomic_t fscache_n_checkaux_okay;
64514-atomic_t fscache_n_checkaux_update;
64515-atomic_t fscache_n_checkaux_obsolete;
64516+atomic_unchecked_t fscache_n_checkaux_none;
64517+atomic_unchecked_t fscache_n_checkaux_okay;
64518+atomic_unchecked_t fscache_n_checkaux_update;
64519+atomic_unchecked_t fscache_n_checkaux_obsolete;
64520
64521 atomic_t fscache_n_cop_alloc_object;
64522 atomic_t fscache_n_cop_lookup_object;
64523@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
64524 seq_puts(m, "FS-Cache statistics\n");
64525
64526 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
64527- atomic_read(&fscache_n_cookie_index),
64528- atomic_read(&fscache_n_cookie_data),
64529- atomic_read(&fscache_n_cookie_special));
64530+ atomic_read_unchecked(&fscache_n_cookie_index),
64531+ atomic_read_unchecked(&fscache_n_cookie_data),
64532+ atomic_read_unchecked(&fscache_n_cookie_special));
64533
64534 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
64535- atomic_read(&fscache_n_object_alloc),
64536- atomic_read(&fscache_n_object_no_alloc),
64537- atomic_read(&fscache_n_object_avail),
64538- atomic_read(&fscache_n_object_dead));
64539+ atomic_read_unchecked(&fscache_n_object_alloc),
64540+ atomic_read_unchecked(&fscache_n_object_no_alloc),
64541+ atomic_read_unchecked(&fscache_n_object_avail),
64542+ atomic_read_unchecked(&fscache_n_object_dead));
64543 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
64544- atomic_read(&fscache_n_checkaux_none),
64545- atomic_read(&fscache_n_checkaux_okay),
64546- atomic_read(&fscache_n_checkaux_update),
64547- atomic_read(&fscache_n_checkaux_obsolete));
64548+ atomic_read_unchecked(&fscache_n_checkaux_none),
64549+ atomic_read_unchecked(&fscache_n_checkaux_okay),
64550+ atomic_read_unchecked(&fscache_n_checkaux_update),
64551+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
64552
64553 seq_printf(m, "Pages : mrk=%u unc=%u\n",
64554- atomic_read(&fscache_n_marks),
64555- atomic_read(&fscache_n_uncaches));
64556+ atomic_read_unchecked(&fscache_n_marks),
64557+ atomic_read_unchecked(&fscache_n_uncaches));
64558
64559 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
64560 " oom=%u\n",
64561- atomic_read(&fscache_n_acquires),
64562- atomic_read(&fscache_n_acquires_null),
64563- atomic_read(&fscache_n_acquires_no_cache),
64564- atomic_read(&fscache_n_acquires_ok),
64565- atomic_read(&fscache_n_acquires_nobufs),
64566- atomic_read(&fscache_n_acquires_oom));
64567+ atomic_read_unchecked(&fscache_n_acquires),
64568+ atomic_read_unchecked(&fscache_n_acquires_null),
64569+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
64570+ atomic_read_unchecked(&fscache_n_acquires_ok),
64571+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
64572+ atomic_read_unchecked(&fscache_n_acquires_oom));
64573
64574 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
64575- atomic_read(&fscache_n_object_lookups),
64576- atomic_read(&fscache_n_object_lookups_negative),
64577- atomic_read(&fscache_n_object_lookups_positive),
64578- atomic_read(&fscache_n_object_created),
64579- atomic_read(&fscache_n_object_lookups_timed_out));
64580+ atomic_read_unchecked(&fscache_n_object_lookups),
64581+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
64582+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
64583+ atomic_read_unchecked(&fscache_n_object_created),
64584+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
64585
64586 seq_printf(m, "Invals : n=%u run=%u\n",
64587- atomic_read(&fscache_n_invalidates),
64588- atomic_read(&fscache_n_invalidates_run));
64589+ atomic_read_unchecked(&fscache_n_invalidates),
64590+ atomic_read_unchecked(&fscache_n_invalidates_run));
64591
64592 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
64593- atomic_read(&fscache_n_updates),
64594- atomic_read(&fscache_n_updates_null),
64595- atomic_read(&fscache_n_updates_run));
64596+ atomic_read_unchecked(&fscache_n_updates),
64597+ atomic_read_unchecked(&fscache_n_updates_null),
64598+ atomic_read_unchecked(&fscache_n_updates_run));
64599
64600 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
64601- atomic_read(&fscache_n_relinquishes),
64602- atomic_read(&fscache_n_relinquishes_null),
64603- atomic_read(&fscache_n_relinquishes_waitcrt),
64604- atomic_read(&fscache_n_relinquishes_retire));
64605+ atomic_read_unchecked(&fscache_n_relinquishes),
64606+ atomic_read_unchecked(&fscache_n_relinquishes_null),
64607+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
64608+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
64609
64610 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
64611- atomic_read(&fscache_n_attr_changed),
64612- atomic_read(&fscache_n_attr_changed_ok),
64613- atomic_read(&fscache_n_attr_changed_nobufs),
64614- atomic_read(&fscache_n_attr_changed_nomem),
64615- atomic_read(&fscache_n_attr_changed_calls));
64616+ atomic_read_unchecked(&fscache_n_attr_changed),
64617+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
64618+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
64619+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
64620+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
64621
64622 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
64623- atomic_read(&fscache_n_allocs),
64624- atomic_read(&fscache_n_allocs_ok),
64625- atomic_read(&fscache_n_allocs_wait),
64626- atomic_read(&fscache_n_allocs_nobufs),
64627- atomic_read(&fscache_n_allocs_intr));
64628+ atomic_read_unchecked(&fscache_n_allocs),
64629+ atomic_read_unchecked(&fscache_n_allocs_ok),
64630+ atomic_read_unchecked(&fscache_n_allocs_wait),
64631+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
64632+ atomic_read_unchecked(&fscache_n_allocs_intr));
64633 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
64634- atomic_read(&fscache_n_alloc_ops),
64635- atomic_read(&fscache_n_alloc_op_waits),
64636- atomic_read(&fscache_n_allocs_object_dead));
64637+ atomic_read_unchecked(&fscache_n_alloc_ops),
64638+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
64639+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
64640
64641 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
64642 " int=%u oom=%u\n",
64643- atomic_read(&fscache_n_retrievals),
64644- atomic_read(&fscache_n_retrievals_ok),
64645- atomic_read(&fscache_n_retrievals_wait),
64646- atomic_read(&fscache_n_retrievals_nodata),
64647- atomic_read(&fscache_n_retrievals_nobufs),
64648- atomic_read(&fscache_n_retrievals_intr),
64649- atomic_read(&fscache_n_retrievals_nomem));
64650+ atomic_read_unchecked(&fscache_n_retrievals),
64651+ atomic_read_unchecked(&fscache_n_retrievals_ok),
64652+ atomic_read_unchecked(&fscache_n_retrievals_wait),
64653+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
64654+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
64655+ atomic_read_unchecked(&fscache_n_retrievals_intr),
64656+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
64657 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
64658- atomic_read(&fscache_n_retrieval_ops),
64659- atomic_read(&fscache_n_retrieval_op_waits),
64660- atomic_read(&fscache_n_retrievals_object_dead));
64661+ atomic_read_unchecked(&fscache_n_retrieval_ops),
64662+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
64663+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
64664
64665 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
64666- atomic_read(&fscache_n_stores),
64667- atomic_read(&fscache_n_stores_ok),
64668- atomic_read(&fscache_n_stores_again),
64669- atomic_read(&fscache_n_stores_nobufs),
64670- atomic_read(&fscache_n_stores_oom));
64671+ atomic_read_unchecked(&fscache_n_stores),
64672+ atomic_read_unchecked(&fscache_n_stores_ok),
64673+ atomic_read_unchecked(&fscache_n_stores_again),
64674+ atomic_read_unchecked(&fscache_n_stores_nobufs),
64675+ atomic_read_unchecked(&fscache_n_stores_oom));
64676 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
64677- atomic_read(&fscache_n_store_ops),
64678- atomic_read(&fscache_n_store_calls),
64679- atomic_read(&fscache_n_store_pages),
64680- atomic_read(&fscache_n_store_radix_deletes),
64681- atomic_read(&fscache_n_store_pages_over_limit));
64682+ atomic_read_unchecked(&fscache_n_store_ops),
64683+ atomic_read_unchecked(&fscache_n_store_calls),
64684+ atomic_read_unchecked(&fscache_n_store_pages),
64685+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
64686+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
64687
64688 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
64689- atomic_read(&fscache_n_store_vmscan_not_storing),
64690- atomic_read(&fscache_n_store_vmscan_gone),
64691- atomic_read(&fscache_n_store_vmscan_busy),
64692- atomic_read(&fscache_n_store_vmscan_cancelled),
64693- atomic_read(&fscache_n_store_vmscan_wait));
64694+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
64695+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
64696+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
64697+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
64698+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
64699
64700 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
64701- atomic_read(&fscache_n_op_pend),
64702- atomic_read(&fscache_n_op_run),
64703- atomic_read(&fscache_n_op_enqueue),
64704- atomic_read(&fscache_n_op_cancelled),
64705- atomic_read(&fscache_n_op_rejected));
64706+ atomic_read_unchecked(&fscache_n_op_pend),
64707+ atomic_read_unchecked(&fscache_n_op_run),
64708+ atomic_read_unchecked(&fscache_n_op_enqueue),
64709+ atomic_read_unchecked(&fscache_n_op_cancelled),
64710+ atomic_read_unchecked(&fscache_n_op_rejected));
64711 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
64712- atomic_read(&fscache_n_op_deferred_release),
64713- atomic_read(&fscache_n_op_release),
64714- atomic_read(&fscache_n_op_gc));
64715+ atomic_read_unchecked(&fscache_n_op_deferred_release),
64716+ atomic_read_unchecked(&fscache_n_op_release),
64717+ atomic_read_unchecked(&fscache_n_op_gc));
64718
64719 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
64720 atomic_read(&fscache_n_cop_alloc_object),
64721diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
64722index 28d0c7a..04816b7 100644
64723--- a/fs/fuse/cuse.c
64724+++ b/fs/fuse/cuse.c
64725@@ -611,10 +611,12 @@ static int __init cuse_init(void)
64726 INIT_LIST_HEAD(&cuse_conntbl[i]);
64727
64728 /* inherit and extend fuse_dev_operations */
64729- cuse_channel_fops = fuse_dev_operations;
64730- cuse_channel_fops.owner = THIS_MODULE;
64731- cuse_channel_fops.open = cuse_channel_open;
64732- cuse_channel_fops.release = cuse_channel_release;
64733+ pax_open_kernel();
64734+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
64735+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
64736+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
64737+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
64738+ pax_close_kernel();
64739
64740 cuse_class = class_create(THIS_MODULE, "cuse");
64741 if (IS_ERR(cuse_class))
64742diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
64743index 39706c5..a803c71 100644
64744--- a/fs/fuse/dev.c
64745+++ b/fs/fuse/dev.c
64746@@ -1405,7 +1405,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
64747 ret = 0;
64748 pipe_lock(pipe);
64749
64750- if (!pipe->readers) {
64751+ if (!atomic_read(&pipe->readers)) {
64752 send_sig(SIGPIPE, current, 0);
64753 if (!ret)
64754 ret = -EPIPE;
64755@@ -1434,7 +1434,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
64756 page_nr++;
64757 ret += buf->len;
64758
64759- if (pipe->files)
64760+ if (atomic_read(&pipe->files))
64761 do_wakeup = 1;
64762 }
64763
64764diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
64765index 1545b71..7fabe47 100644
64766--- a/fs/fuse/dir.c
64767+++ b/fs/fuse/dir.c
64768@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
64769 return link;
64770 }
64771
64772-static void free_link(char *link)
64773+static void free_link(const char *link)
64774 {
64775 if (!IS_ERR(link))
64776 free_page((unsigned long) link);
64777diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
64778index f42dffb..4a4c435 100644
64779--- a/fs/gfs2/glock.c
64780+++ b/fs/gfs2/glock.c
64781@@ -385,9 +385,9 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
64782 if (held1 != held2) {
64783 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
64784 if (held2)
64785- gl->gl_lockref.count++;
64786+ __lockref_inc(&gl->gl_lockref);
64787 else
64788- gl->gl_lockref.count--;
64789+ __lockref_dec(&gl->gl_lockref);
64790 }
64791 if (held1 && held2 && list_empty(&gl->gl_holders))
64792 clear_bit(GLF_QUEUED, &gl->gl_flags);
64793@@ -614,9 +614,9 @@ out:
64794 out_sched:
64795 clear_bit(GLF_LOCK, &gl->gl_flags);
64796 smp_mb__after_atomic();
64797- gl->gl_lockref.count++;
64798+ __lockref_inc(&gl->gl_lockref);
64799 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
64800- gl->gl_lockref.count--;
64801+ __lockref_dec(&gl->gl_lockref);
64802 return;
64803
64804 out_unlock:
64805@@ -742,7 +742,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
64806 gl->gl_sbd = sdp;
64807 gl->gl_flags = 0;
64808 gl->gl_name = name;
64809- gl->gl_lockref.count = 1;
64810+ __lockref_set(&gl->gl_lockref, 1);
64811 gl->gl_state = LM_ST_UNLOCKED;
64812 gl->gl_target = LM_ST_UNLOCKED;
64813 gl->gl_demote_state = LM_ST_EXCLUSIVE;
64814@@ -1020,9 +1020,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
64815 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
64816 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
64817 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
64818- gl->gl_lockref.count++;
64819+ __lockref_inc(&gl->gl_lockref);
64820 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
64821- gl->gl_lockref.count--;
64822+ __lockref_dec(&gl->gl_lockref);
64823 }
64824 run_queue(gl, 1);
64825 spin_unlock(&gl->gl_spin);
64826@@ -1325,7 +1325,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
64827 }
64828 }
64829
64830- gl->gl_lockref.count++;
64831+ __lockref_inc(&gl->gl_lockref);
64832 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
64833 spin_unlock(&gl->gl_spin);
64834
64835@@ -1384,12 +1384,12 @@ add_back_to_lru:
64836 goto add_back_to_lru;
64837 }
64838 clear_bit(GLF_LRU, &gl->gl_flags);
64839- gl->gl_lockref.count++;
64840+ __lockref_inc(&gl->gl_lockref);
64841 if (demote_ok(gl))
64842 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
64843 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
64844 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
64845- gl->gl_lockref.count--;
64846+ __lockref_dec(&gl->gl_lockref);
64847 spin_unlock(&gl->gl_spin);
64848 cond_resched_lock(&lru_lock);
64849 }
64850@@ -1719,7 +1719,7 @@ void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
64851 state2str(gl->gl_demote_state), dtime,
64852 atomic_read(&gl->gl_ail_count),
64853 atomic_read(&gl->gl_revokes),
64854- (int)gl->gl_lockref.count, gl->gl_hold_time);
64855+ __lockref_read(&gl->gl_lockref), gl->gl_hold_time);
64856
64857 list_for_each_entry(gh, &gl->gl_holders, gh_list)
64858 dump_holder(seq, gh);
64859diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
64860index fe91951..ce38a6e 100644
64861--- a/fs/gfs2/glops.c
64862+++ b/fs/gfs2/glops.c
64863@@ -544,9 +544,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
64864
64865 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
64866 gl->gl_state == LM_ST_SHARED && ip) {
64867- gl->gl_lockref.count++;
64868+ __lockref_inc(&gl->gl_lockref);
64869 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
64870- gl->gl_lockref.count--;
64871+ __lockref_dec(&gl->gl_lockref);
64872 }
64873 }
64874
64875diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
64876index 3aa17d4..b338075 100644
64877--- a/fs/gfs2/quota.c
64878+++ b/fs/gfs2/quota.c
64879@@ -154,7 +154,7 @@ static enum lru_status gfs2_qd_isolate(struct list_head *item,
64880 if (!spin_trylock(&qd->qd_lockref.lock))
64881 return LRU_SKIP;
64882
64883- if (qd->qd_lockref.count == 0) {
64884+ if (__lockref_read(&qd->qd_lockref) == 0) {
64885 lockref_mark_dead(&qd->qd_lockref);
64886 list_lru_isolate_move(lru, &qd->qd_lru, dispose);
64887 }
64888@@ -221,7 +221,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
64889 return NULL;
64890
64891 qd->qd_sbd = sdp;
64892- qd->qd_lockref.count = 1;
64893+ __lockref_set(&qd->qd_lockref, 1);
64894 spin_lock_init(&qd->qd_lockref.lock);
64895 qd->qd_id = qid;
64896 qd->qd_slot = -1;
64897@@ -312,7 +312,7 @@ static void qd_put(struct gfs2_quota_data *qd)
64898 if (lockref_put_or_lock(&qd->qd_lockref))
64899 return;
64900
64901- qd->qd_lockref.count = 0;
64902+ __lockref_set(&qd->qd_lockref, 0);
64903 list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
64904 spin_unlock(&qd->qd_lockref.lock);
64905
64906diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
64907index fd62cae..3494dfa 100644
64908--- a/fs/hostfs/hostfs_kern.c
64909+++ b/fs/hostfs/hostfs_kern.c
64910@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64911
64912 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
64913 {
64914- char *s = nd_get_link(nd);
64915+ const char *s = nd_get_link(nd);
64916 if (!IS_ERR(s))
64917 __putname(s);
64918 }
64919diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
64920index c274aca..772fa5e 100644
64921--- a/fs/hugetlbfs/inode.c
64922+++ b/fs/hugetlbfs/inode.c
64923@@ -148,6 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64924 struct mm_struct *mm = current->mm;
64925 struct vm_area_struct *vma;
64926 struct hstate *h = hstate_file(file);
64927+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
64928 struct vm_unmapped_area_info info;
64929
64930 if (len & ~huge_page_mask(h))
64931@@ -161,17 +162,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64932 return addr;
64933 }
64934
64935+#ifdef CONFIG_PAX_RANDMMAP
64936+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64937+#endif
64938+
64939 if (addr) {
64940 addr = ALIGN(addr, huge_page_size(h));
64941 vma = find_vma(mm, addr);
64942- if (TASK_SIZE - len >= addr &&
64943- (!vma || addr + len <= vma->vm_start))
64944+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
64945 return addr;
64946 }
64947
64948 info.flags = 0;
64949 info.length = len;
64950 info.low_limit = TASK_UNMAPPED_BASE;
64951+
64952+#ifdef CONFIG_PAX_RANDMMAP
64953+ if (mm->pax_flags & MF_PAX_RANDMMAP)
64954+ info.low_limit += mm->delta_mmap;
64955+#endif
64956+
64957 info.high_limit = TASK_SIZE;
64958 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
64959 info.align_offset = 0;
64960@@ -912,7 +922,7 @@ static struct file_system_type hugetlbfs_fs_type = {
64961 };
64962 MODULE_ALIAS_FS("hugetlbfs");
64963
64964-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64965+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64966
64967 static int can_do_hugetlb_shm(void)
64968 {
64969diff --git a/fs/inode.c b/fs/inode.c
64970index f00b16f..b653fea 100644
64971--- a/fs/inode.c
64972+++ b/fs/inode.c
64973@@ -830,16 +830,20 @@ unsigned int get_next_ino(void)
64974 unsigned int *p = &get_cpu_var(last_ino);
64975 unsigned int res = *p;
64976
64977+start:
64978+
64979 #ifdef CONFIG_SMP
64980 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
64981- static atomic_t shared_last_ino;
64982- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
64983+ static atomic_unchecked_t shared_last_ino;
64984+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
64985
64986 res = next - LAST_INO_BATCH;
64987 }
64988 #endif
64989
64990- *p = ++res;
64991+ if (unlikely(!++res))
64992+ goto start; /* never zero */
64993+ *p = res;
64994 put_cpu_var(last_ino);
64995 return res;
64996 }
64997diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
64998index 4a6cf28..d3a29d3 100644
64999--- a/fs/jffs2/erase.c
65000+++ b/fs/jffs2/erase.c
65001@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
65002 struct jffs2_unknown_node marker = {
65003 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
65004 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
65005- .totlen = cpu_to_je32(c->cleanmarker_size)
65006+ .totlen = cpu_to_je32(c->cleanmarker_size),
65007+ .hdr_crc = cpu_to_je32(0)
65008 };
65009
65010 jffs2_prealloc_raw_node_refs(c, jeb, 1);
65011diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
65012index 09ed551..45684f8 100644
65013--- a/fs/jffs2/wbuf.c
65014+++ b/fs/jffs2/wbuf.c
65015@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
65016 {
65017 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
65018 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
65019- .totlen = constant_cpu_to_je32(8)
65020+ .totlen = constant_cpu_to_je32(8),
65021+ .hdr_crc = constant_cpu_to_je32(0)
65022 };
65023
65024 /*
65025diff --git a/fs/jfs/super.c b/fs/jfs/super.c
65026index 5d30c56..8c45372 100644
65027--- a/fs/jfs/super.c
65028+++ b/fs/jfs/super.c
65029@@ -901,7 +901,7 @@ static int __init init_jfs_fs(void)
65030
65031 jfs_inode_cachep =
65032 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
65033- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
65034+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
65035 init_once);
65036 if (jfs_inode_cachep == NULL)
65037 return -ENOMEM;
65038diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
65039index 6acc964..eca491f 100644
65040--- a/fs/kernfs/dir.c
65041+++ b/fs/kernfs/dir.c
65042@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
65043 *
65044 * Returns 31 bit hash of ns + name (so it fits in an off_t )
65045 */
65046-static unsigned int kernfs_name_hash(const char *name, const void *ns)
65047+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
65048 {
65049 unsigned long hash = init_name_hash();
65050 unsigned int len = strlen(name);
65051@@ -831,6 +831,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
65052 ret = scops->mkdir(parent, dentry->d_name.name, mode);
65053
65054 kernfs_put_active(parent);
65055+
65056+ if (!ret) {
65057+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
65058+ ret = PTR_ERR_OR_ZERO(dentry_ret);
65059+ }
65060+
65061 return ret;
65062 }
65063
65064diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
65065index 2bacb99..f745182 100644
65066--- a/fs/kernfs/file.c
65067+++ b/fs/kernfs/file.c
65068@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
65069
65070 struct kernfs_open_node {
65071 atomic_t refcnt;
65072- atomic_t event;
65073+ atomic_unchecked_t event;
65074 wait_queue_head_t poll;
65075 struct list_head files; /* goes through kernfs_open_file.list */
65076 };
65077@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
65078 {
65079 struct kernfs_open_file *of = sf->private;
65080
65081- of->event = atomic_read(&of->kn->attr.open->event);
65082+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
65083
65084 return of->kn->attr.ops->seq_show(sf, v);
65085 }
65086@@ -207,7 +207,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
65087 goto out_free;
65088 }
65089
65090- of->event = atomic_read(&of->kn->attr.open->event);
65091+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
65092 ops = kernfs_ops(of->kn);
65093 if (ops->read)
65094 len = ops->read(of, buf, len, *ppos);
65095@@ -272,7 +272,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
65096 {
65097 struct kernfs_open_file *of = kernfs_of(file);
65098 const struct kernfs_ops *ops;
65099- size_t len;
65100+ ssize_t len;
65101 char *buf;
65102
65103 if (of->atomic_write_len) {
65104@@ -385,12 +385,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
65105 return ret;
65106 }
65107
65108-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
65109- void *buf, int len, int write)
65110+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
65111+ void *buf, size_t len, int write)
65112 {
65113 struct file *file = vma->vm_file;
65114 struct kernfs_open_file *of = kernfs_of(file);
65115- int ret;
65116+ ssize_t ret;
65117
65118 if (!of->vm_ops)
65119 return -EINVAL;
65120@@ -569,7 +569,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
65121 return -ENOMEM;
65122
65123 atomic_set(&new_on->refcnt, 0);
65124- atomic_set(&new_on->event, 1);
65125+ atomic_set_unchecked(&new_on->event, 1);
65126 init_waitqueue_head(&new_on->poll);
65127 INIT_LIST_HEAD(&new_on->files);
65128 goto retry;
65129@@ -793,7 +793,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
65130
65131 kernfs_put_active(kn);
65132
65133- if (of->event != atomic_read(&on->event))
65134+ if (of->event != atomic_read_unchecked(&on->event))
65135 goto trigger;
65136
65137 return DEFAULT_POLLMASK;
65138@@ -824,7 +824,7 @@ repeat:
65139
65140 on = kn->attr.open;
65141 if (on) {
65142- atomic_inc(&on->event);
65143+ atomic_inc_unchecked(&on->event);
65144 wake_up_interruptible(&on->poll);
65145 }
65146
65147diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
65148index 8a19889..4c3069a 100644
65149--- a/fs/kernfs/symlink.c
65150+++ b/fs/kernfs/symlink.c
65151@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
65152 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
65153 void *cookie)
65154 {
65155- char *page = nd_get_link(nd);
65156+ const char *page = nd_get_link(nd);
65157 if (!IS_ERR(page))
65158 free_page((unsigned long)page);
65159 }
65160diff --git a/fs/libfs.c b/fs/libfs.c
65161index 0ab6512..cd9982d 100644
65162--- a/fs/libfs.c
65163+++ b/fs/libfs.c
65164@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
65165
65166 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
65167 struct dentry *next = list_entry(p, struct dentry, d_child);
65168+ char d_name[sizeof(next->d_iname)];
65169+ const unsigned char *name;
65170+
65171 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
65172 if (!simple_positive(next)) {
65173 spin_unlock(&next->d_lock);
65174@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
65175
65176 spin_unlock(&next->d_lock);
65177 spin_unlock(&dentry->d_lock);
65178- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
65179+ name = next->d_name.name;
65180+ if (name == next->d_iname) {
65181+ memcpy(d_name, name, next->d_name.len);
65182+ name = d_name;
65183+ }
65184+ if (!dir_emit(ctx, name, next->d_name.len,
65185 next->d_inode->i_ino, dt_type(next->d_inode)))
65186 return 0;
65187 spin_lock(&dentry->d_lock);
65188@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
65189 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
65190 void *cookie)
65191 {
65192- char *s = nd_get_link(nd);
65193+ const char *s = nd_get_link(nd);
65194 if (!IS_ERR(s))
65195 kfree(s);
65196 }
65197diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
65198index acd3947..1f896e2 100644
65199--- a/fs/lockd/clntproc.c
65200+++ b/fs/lockd/clntproc.c
65201@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
65202 /*
65203 * Cookie counter for NLM requests
65204 */
65205-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
65206+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
65207
65208 void nlmclnt_next_cookie(struct nlm_cookie *c)
65209 {
65210- u32 cookie = atomic_inc_return(&nlm_cookie);
65211+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
65212
65213 memcpy(c->data, &cookie, 4);
65214 c->len=4;
65215diff --git a/fs/mount.h b/fs/mount.h
65216index 6a61c2b..bd79179 100644
65217--- a/fs/mount.h
65218+++ b/fs/mount.h
65219@@ -13,7 +13,7 @@ struct mnt_namespace {
65220 u64 seq; /* Sequence number to prevent loops */
65221 wait_queue_head_t poll;
65222 u64 event;
65223-};
65224+} __randomize_layout;
65225
65226 struct mnt_pcp {
65227 int mnt_count;
65228@@ -65,7 +65,7 @@ struct mount {
65229 struct hlist_head mnt_pins;
65230 struct fs_pin mnt_umount;
65231 struct dentry *mnt_ex_mountpoint;
65232-};
65233+} __randomize_layout;
65234
65235 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
65236
65237diff --git a/fs/namei.c b/fs/namei.c
65238index 50a8583..44c470a 100644
65239--- a/fs/namei.c
65240+++ b/fs/namei.c
65241@@ -337,17 +337,32 @@ int generic_permission(struct inode *inode, int mask)
65242 if (ret != -EACCES)
65243 return ret;
65244
65245+#ifdef CONFIG_GRKERNSEC
65246+ /* we'll block if we have to log due to a denied capability use */
65247+ if (mask & MAY_NOT_BLOCK)
65248+ return -ECHILD;
65249+#endif
65250+
65251 if (S_ISDIR(inode->i_mode)) {
65252 /* DACs are overridable for directories */
65253- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65254- return 0;
65255 if (!(mask & MAY_WRITE))
65256- if (capable_wrt_inode_uidgid(inode,
65257- CAP_DAC_READ_SEARCH))
65258+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
65259+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65260 return 0;
65261+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65262+ return 0;
65263 return -EACCES;
65264 }
65265 /*
65266+ * Searching includes executable on directories, else just read.
65267+ */
65268+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
65269+ if (mask == MAY_READ)
65270+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
65271+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65272+ return 0;
65273+
65274+ /*
65275 * Read/write DACs are always overridable.
65276 * Executable DACs are overridable when there is
65277 * at least one exec bit set.
65278@@ -356,14 +371,6 @@ int generic_permission(struct inode *inode, int mask)
65279 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65280 return 0;
65281
65282- /*
65283- * Searching includes executable on directories, else just read.
65284- */
65285- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
65286- if (mask == MAY_READ)
65287- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65288- return 0;
65289-
65290 return -EACCES;
65291 }
65292 EXPORT_SYMBOL(generic_permission);
65293@@ -503,7 +510,7 @@ struct nameidata {
65294 int last_type;
65295 unsigned depth;
65296 struct file *base;
65297- char *saved_names[MAX_NESTED_LINKS + 1];
65298+ const char *saved_names[MAX_NESTED_LINKS + 1];
65299 };
65300
65301 /*
65302@@ -714,13 +721,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
65303 nd->flags |= LOOKUP_JUMPED;
65304 }
65305
65306-void nd_set_link(struct nameidata *nd, char *path)
65307+void nd_set_link(struct nameidata *nd, const char *path)
65308 {
65309 nd->saved_names[nd->depth] = path;
65310 }
65311 EXPORT_SYMBOL(nd_set_link);
65312
65313-char *nd_get_link(struct nameidata *nd)
65314+const char *nd_get_link(const struct nameidata *nd)
65315 {
65316 return nd->saved_names[nd->depth];
65317 }
65318@@ -855,7 +862,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
65319 {
65320 struct dentry *dentry = link->dentry;
65321 int error;
65322- char *s;
65323+ const char *s;
65324
65325 BUG_ON(nd->flags & LOOKUP_RCU);
65326
65327@@ -876,6 +883,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
65328 if (error)
65329 goto out_put_nd_path;
65330
65331+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
65332+ dentry->d_inode, dentry, nd->path.mnt)) {
65333+ error = -EACCES;
65334+ goto out_put_nd_path;
65335+ }
65336+
65337 nd->last_type = LAST_BIND;
65338 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
65339 error = PTR_ERR(*p);
65340@@ -1640,6 +1653,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
65341 if (res)
65342 break;
65343 res = walk_component(nd, path, LOOKUP_FOLLOW);
65344+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
65345+ res = -EACCES;
65346 put_link(nd, &link, cookie);
65347 } while (res > 0);
65348
65349@@ -1712,7 +1727,7 @@ EXPORT_SYMBOL(full_name_hash);
65350 static inline u64 hash_name(const char *name)
65351 {
65352 unsigned long a, b, adata, bdata, mask, hash, len;
65353- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
65354+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
65355
65356 hash = a = 0;
65357 len = -sizeof(unsigned long);
65358@@ -2007,6 +2022,8 @@ static int path_lookupat(int dfd, const char *name,
65359 if (err)
65360 break;
65361 err = lookup_last(nd, &path);
65362+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
65363+ err = -EACCES;
65364 put_link(nd, &link, cookie);
65365 }
65366 }
65367@@ -2014,6 +2031,13 @@ static int path_lookupat(int dfd, const char *name,
65368 if (!err)
65369 err = complete_walk(nd);
65370
65371+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
65372+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
65373+ path_put(&nd->path);
65374+ err = -ENOENT;
65375+ }
65376+ }
65377+
65378 if (!err && nd->flags & LOOKUP_DIRECTORY) {
65379 if (!d_can_lookup(nd->path.dentry)) {
65380 path_put(&nd->path);
65381@@ -2035,8 +2059,15 @@ static int filename_lookup(int dfd, struct filename *name,
65382 retval = path_lookupat(dfd, name->name,
65383 flags | LOOKUP_REVAL, nd);
65384
65385- if (likely(!retval))
65386+ if (likely(!retval)) {
65387 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
65388+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
65389+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
65390+ path_put(&nd->path);
65391+ return -ENOENT;
65392+ }
65393+ }
65394+ }
65395 return retval;
65396 }
65397
65398@@ -2615,6 +2646,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
65399 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
65400 return -EPERM;
65401
65402+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
65403+ return -EPERM;
65404+ if (gr_handle_rawio(inode))
65405+ return -EPERM;
65406+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
65407+ return -EACCES;
65408+
65409 return 0;
65410 }
65411
65412@@ -2846,7 +2884,7 @@ looked_up:
65413 * cleared otherwise prior to returning.
65414 */
65415 static int lookup_open(struct nameidata *nd, struct path *path,
65416- struct file *file,
65417+ struct path *link, struct file *file,
65418 const struct open_flags *op,
65419 bool got_write, int *opened)
65420 {
65421@@ -2881,6 +2919,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
65422 /* Negative dentry, just create the file */
65423 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
65424 umode_t mode = op->mode;
65425+
65426+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
65427+ error = -EACCES;
65428+ goto out_dput;
65429+ }
65430+
65431+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
65432+ error = -EACCES;
65433+ goto out_dput;
65434+ }
65435+
65436 if (!IS_POSIXACL(dir->d_inode))
65437 mode &= ~current_umask();
65438 /*
65439@@ -2902,6 +2951,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
65440 nd->flags & LOOKUP_EXCL);
65441 if (error)
65442 goto out_dput;
65443+ else
65444+ gr_handle_create(dentry, nd->path.mnt);
65445 }
65446 out_no_open:
65447 path->dentry = dentry;
65448@@ -2916,7 +2967,7 @@ out_dput:
65449 /*
65450 * Handle the last step of open()
65451 */
65452-static int do_last(struct nameidata *nd, struct path *path,
65453+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
65454 struct file *file, const struct open_flags *op,
65455 int *opened, struct filename *name)
65456 {
65457@@ -2966,6 +3017,15 @@ static int do_last(struct nameidata *nd, struct path *path,
65458 if (error)
65459 return error;
65460
65461+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
65462+ error = -ENOENT;
65463+ goto out;
65464+ }
65465+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65466+ error = -EACCES;
65467+ goto out;
65468+ }
65469+
65470 audit_inode(name, dir, LOOKUP_PARENT);
65471 error = -EISDIR;
65472 /* trailing slashes? */
65473@@ -2985,7 +3045,7 @@ retry_lookup:
65474 */
65475 }
65476 mutex_lock(&dir->d_inode->i_mutex);
65477- error = lookup_open(nd, path, file, op, got_write, opened);
65478+ error = lookup_open(nd, path, link, file, op, got_write, opened);
65479 mutex_unlock(&dir->d_inode->i_mutex);
65480
65481 if (error <= 0) {
65482@@ -3009,11 +3069,28 @@ retry_lookup:
65483 goto finish_open_created;
65484 }
65485
65486+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
65487+ error = -ENOENT;
65488+ goto exit_dput;
65489+ }
65490+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
65491+ error = -EACCES;
65492+ goto exit_dput;
65493+ }
65494+
65495 /*
65496 * create/update audit record if it already exists.
65497 */
65498- if (d_is_positive(path->dentry))
65499+ if (d_is_positive(path->dentry)) {
65500+ /* only check if O_CREAT is specified, all other checks need to go
65501+ into may_open */
65502+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
65503+ error = -EACCES;
65504+ goto exit_dput;
65505+ }
65506+
65507 audit_inode(name, path->dentry, 0);
65508+ }
65509
65510 /*
65511 * If atomic_open() acquired write access it is dropped now due to
65512@@ -3055,6 +3132,11 @@ finish_lookup:
65513 }
65514 }
65515 BUG_ON(inode != path->dentry->d_inode);
65516+ /* if we're resolving a symlink to another symlink */
65517+ if (link && gr_handle_symlink_owner(link, inode)) {
65518+ error = -EACCES;
65519+ goto out;
65520+ }
65521 return 1;
65522 }
65523
65524@@ -3074,7 +3156,18 @@ finish_open:
65525 path_put(&save_parent);
65526 return error;
65527 }
65528+
65529+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
65530+ error = -ENOENT;
65531+ goto out;
65532+ }
65533+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65534+ error = -EACCES;
65535+ goto out;
65536+ }
65537+
65538 audit_inode(name, nd->path.dentry, 0);
65539+
65540 error = -EISDIR;
65541 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
65542 goto out;
65543@@ -3235,7 +3328,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65544 if (unlikely(error))
65545 goto out;
65546
65547- error = do_last(nd, &path, file, op, &opened, pathname);
65548+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
65549 while (unlikely(error > 0)) { /* trailing symlink */
65550 struct path link = path;
65551 void *cookie;
65552@@ -3253,7 +3346,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65553 error = follow_link(&link, nd, &cookie);
65554 if (unlikely(error))
65555 break;
65556- error = do_last(nd, &path, file, op, &opened, pathname);
65557+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
65558 put_link(nd, &link, cookie);
65559 }
65560 out:
65561@@ -3356,9 +3449,11 @@ static struct dentry *filename_create(int dfd, struct filename *name,
65562 goto unlock;
65563
65564 error = -EEXIST;
65565- if (d_is_positive(dentry))
65566+ if (d_is_positive(dentry)) {
65567+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
65568+ error = -ENOENT;
65569 goto fail;
65570-
65571+ }
65572 /*
65573 * Special case - lookup gave negative, but... we had foo/bar/
65574 * From the vfs_mknod() POV we just have a negative dentry -
65575@@ -3423,6 +3518,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
65576 }
65577 EXPORT_SYMBOL(user_path_create);
65578
65579+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
65580+{
65581+ struct filename *tmp = getname(pathname);
65582+ struct dentry *res;
65583+ if (IS_ERR(tmp))
65584+ return ERR_CAST(tmp);
65585+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
65586+ if (IS_ERR(res))
65587+ putname(tmp);
65588+ else
65589+ *to = tmp;
65590+ return res;
65591+}
65592+
65593 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
65594 {
65595 int error = may_create(dir, dentry);
65596@@ -3486,6 +3595,17 @@ retry:
65597
65598 if (!IS_POSIXACL(path.dentry->d_inode))
65599 mode &= ~current_umask();
65600+
65601+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
65602+ error = -EPERM;
65603+ goto out;
65604+ }
65605+
65606+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
65607+ error = -EACCES;
65608+ goto out;
65609+ }
65610+
65611 error = security_path_mknod(&path, dentry, mode, dev);
65612 if (error)
65613 goto out;
65614@@ -3501,6 +3621,8 @@ retry:
65615 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
65616 break;
65617 }
65618+ if (!error)
65619+ gr_handle_create(dentry, path.mnt);
65620 out:
65621 done_path_create(&path, dentry);
65622 if (retry_estale(error, lookup_flags)) {
65623@@ -3555,9 +3677,16 @@ retry:
65624
65625 if (!IS_POSIXACL(path.dentry->d_inode))
65626 mode &= ~current_umask();
65627+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
65628+ error = -EACCES;
65629+ goto out;
65630+ }
65631 error = security_path_mkdir(&path, dentry, mode);
65632 if (!error)
65633 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
65634+ if (!error)
65635+ gr_handle_create(dentry, path.mnt);
65636+out:
65637 done_path_create(&path, dentry);
65638 if (retry_estale(error, lookup_flags)) {
65639 lookup_flags |= LOOKUP_REVAL;
65640@@ -3590,7 +3719,7 @@ void dentry_unhash(struct dentry *dentry)
65641 {
65642 shrink_dcache_parent(dentry);
65643 spin_lock(&dentry->d_lock);
65644- if (dentry->d_lockref.count == 1)
65645+ if (__lockref_read(&dentry->d_lockref) == 1)
65646 __d_drop(dentry);
65647 spin_unlock(&dentry->d_lock);
65648 }
65649@@ -3641,6 +3770,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
65650 struct filename *name;
65651 struct dentry *dentry;
65652 struct nameidata nd;
65653+ u64 saved_ino = 0;
65654+ dev_t saved_dev = 0;
65655 unsigned int lookup_flags = 0;
65656 retry:
65657 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65658@@ -3673,10 +3804,21 @@ retry:
65659 error = -ENOENT;
65660 goto exit3;
65661 }
65662+
65663+ saved_ino = gr_get_ino_from_dentry(dentry);
65664+ saved_dev = gr_get_dev_from_dentry(dentry);
65665+
65666+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
65667+ error = -EACCES;
65668+ goto exit3;
65669+ }
65670+
65671 error = security_path_rmdir(&nd.path, dentry);
65672 if (error)
65673 goto exit3;
65674 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
65675+ if (!error && (saved_dev || saved_ino))
65676+ gr_handle_delete(saved_ino, saved_dev);
65677 exit3:
65678 dput(dentry);
65679 exit2:
65680@@ -3769,6 +3911,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
65681 struct nameidata nd;
65682 struct inode *inode = NULL;
65683 struct inode *delegated_inode = NULL;
65684+ u64 saved_ino = 0;
65685+ dev_t saved_dev = 0;
65686 unsigned int lookup_flags = 0;
65687 retry:
65688 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65689@@ -3795,10 +3939,22 @@ retry_deleg:
65690 if (d_is_negative(dentry))
65691 goto slashes;
65692 ihold(inode);
65693+
65694+ if (inode->i_nlink <= 1) {
65695+ saved_ino = gr_get_ino_from_dentry(dentry);
65696+ saved_dev = gr_get_dev_from_dentry(dentry);
65697+ }
65698+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
65699+ error = -EACCES;
65700+ goto exit2;
65701+ }
65702+
65703 error = security_path_unlink(&nd.path, dentry);
65704 if (error)
65705 goto exit2;
65706 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
65707+ if (!error && (saved_ino || saved_dev))
65708+ gr_handle_delete(saved_ino, saved_dev);
65709 exit2:
65710 dput(dentry);
65711 }
65712@@ -3887,9 +4043,17 @@ retry:
65713 if (IS_ERR(dentry))
65714 goto out_putname;
65715
65716+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
65717+ error = -EACCES;
65718+ goto out;
65719+ }
65720+
65721 error = security_path_symlink(&path, dentry, from->name);
65722 if (!error)
65723 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
65724+ if (!error)
65725+ gr_handle_create(dentry, path.mnt);
65726+out:
65727 done_path_create(&path, dentry);
65728 if (retry_estale(error, lookup_flags)) {
65729 lookup_flags |= LOOKUP_REVAL;
65730@@ -3993,6 +4157,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
65731 struct dentry *new_dentry;
65732 struct path old_path, new_path;
65733 struct inode *delegated_inode = NULL;
65734+ struct filename *to = NULL;
65735 int how = 0;
65736 int error;
65737
65738@@ -4016,7 +4181,7 @@ retry:
65739 if (error)
65740 return error;
65741
65742- new_dentry = user_path_create(newdfd, newname, &new_path,
65743+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
65744 (how & LOOKUP_REVAL));
65745 error = PTR_ERR(new_dentry);
65746 if (IS_ERR(new_dentry))
65747@@ -4028,11 +4193,28 @@ retry:
65748 error = may_linkat(&old_path);
65749 if (unlikely(error))
65750 goto out_dput;
65751+
65752+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
65753+ old_path.dentry->d_inode,
65754+ old_path.dentry->d_inode->i_mode, to)) {
65755+ error = -EACCES;
65756+ goto out_dput;
65757+ }
65758+
65759+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
65760+ old_path.dentry, old_path.mnt, to)) {
65761+ error = -EACCES;
65762+ goto out_dput;
65763+ }
65764+
65765 error = security_path_link(old_path.dentry, &new_path, new_dentry);
65766 if (error)
65767 goto out_dput;
65768 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
65769+ if (!error)
65770+ gr_handle_create(new_dentry, new_path.mnt);
65771 out_dput:
65772+ putname(to);
65773 done_path_create(&new_path, new_dentry);
65774 if (delegated_inode) {
65775 error = break_deleg_wait(&delegated_inode);
65776@@ -4348,6 +4530,20 @@ retry_deleg:
65777 if (new_dentry == trap)
65778 goto exit5;
65779
65780+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
65781+ /* use EXDEV error to cause 'mv' to switch to an alternative
65782+ * method for usability
65783+ */
65784+ error = -EXDEV;
65785+ goto exit5;
65786+ }
65787+
65788+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
65789+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
65790+ to, flags);
65791+ if (error)
65792+ goto exit5;
65793+
65794 error = security_path_rename(&oldnd.path, old_dentry,
65795 &newnd.path, new_dentry, flags);
65796 if (error)
65797@@ -4355,6 +4551,9 @@ retry_deleg:
65798 error = vfs_rename(old_dir->d_inode, old_dentry,
65799 new_dir->d_inode, new_dentry,
65800 &delegated_inode, flags);
65801+ if (!error)
65802+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
65803+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
65804 exit5:
65805 dput(new_dentry);
65806 exit4:
65807@@ -4411,14 +4610,24 @@ EXPORT_SYMBOL(vfs_whiteout);
65808
65809 int readlink_copy(char __user *buffer, int buflen, const char *link)
65810 {
65811+ char tmpbuf[64];
65812+ const char *newlink;
65813 int len = PTR_ERR(link);
65814+
65815 if (IS_ERR(link))
65816 goto out;
65817
65818 len = strlen(link);
65819 if (len > (unsigned) buflen)
65820 len = buflen;
65821- if (copy_to_user(buffer, link, len))
65822+
65823+ if (len < sizeof(tmpbuf)) {
65824+ memcpy(tmpbuf, link, len);
65825+ newlink = tmpbuf;
65826+ } else
65827+ newlink = link;
65828+
65829+ if (copy_to_user(buffer, newlink, len))
65830 len = -EFAULT;
65831 out:
65832 return len;
65833diff --git a/fs/namespace.c b/fs/namespace.c
65834index 38ed1e1..8500e56 100644
65835--- a/fs/namespace.c
65836+++ b/fs/namespace.c
65837@@ -1480,6 +1480,9 @@ static int do_umount(struct mount *mnt, int flags)
65838 if (!(sb->s_flags & MS_RDONLY))
65839 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
65840 up_write(&sb->s_umount);
65841+
65842+ gr_log_remount(mnt->mnt_devname, retval);
65843+
65844 return retval;
65845 }
65846
65847@@ -1502,6 +1505,9 @@ static int do_umount(struct mount *mnt, int flags)
65848 }
65849 unlock_mount_hash();
65850 namespace_unlock();
65851+
65852+ gr_log_unmount(mnt->mnt_devname, retval);
65853+
65854 return retval;
65855 }
65856
65857@@ -1559,7 +1565,7 @@ static inline bool may_mount(void)
65858 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
65859 */
65860
65861-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
65862+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
65863 {
65864 struct path path;
65865 struct mount *mnt;
65866@@ -1604,7 +1610,7 @@ out:
65867 /*
65868 * The 2.0 compatible umount. No flags.
65869 */
65870-SYSCALL_DEFINE1(oldumount, char __user *, name)
65871+SYSCALL_DEFINE1(oldumount, const char __user *, name)
65872 {
65873 return sys_umount(name, 0);
65874 }
65875@@ -2670,6 +2676,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
65876 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
65877 MS_STRICTATIME);
65878
65879+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
65880+ retval = -EPERM;
65881+ goto dput_out;
65882+ }
65883+
65884+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
65885+ retval = -EPERM;
65886+ goto dput_out;
65887+ }
65888+
65889 if (flags & MS_REMOUNT)
65890 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
65891 data_page);
65892@@ -2683,7 +2699,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
65893 retval = do_new_mount(&path, type_page, flags, mnt_flags,
65894 dev_name, data_page);
65895 dput_out:
65896+ gr_log_mount(dev_name, &path, retval);
65897+
65898 path_put(&path);
65899+
65900 return retval;
65901 }
65902
65903@@ -2701,7 +2720,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
65904 * number incrementing at 10Ghz will take 12,427 years to wrap which
65905 * is effectively never, so we can ignore the possibility.
65906 */
65907-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
65908+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
65909
65910 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65911 {
65912@@ -2717,7 +2736,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65913 return ERR_PTR(ret);
65914 }
65915 new_ns->ns.ops = &mntns_operations;
65916- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
65917+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
65918 atomic_set(&new_ns->count, 1);
65919 new_ns->root = NULL;
65920 INIT_LIST_HEAD(&new_ns->list);
65921@@ -2727,7 +2746,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65922 return new_ns;
65923 }
65924
65925-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65926+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65927 struct user_namespace *user_ns, struct fs_struct *new_fs)
65928 {
65929 struct mnt_namespace *new_ns;
65930@@ -2848,8 +2867,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
65931 }
65932 EXPORT_SYMBOL(mount_subtree);
65933
65934-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
65935- char __user *, type, unsigned long, flags, void __user *, data)
65936+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
65937+ const char __user *, type, unsigned long, flags, void __user *, data)
65938 {
65939 int ret;
65940 char *kernel_type;
65941@@ -2955,6 +2974,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
65942 if (error)
65943 goto out2;
65944
65945+ if (gr_handle_chroot_pivot()) {
65946+ error = -EPERM;
65947+ goto out2;
65948+ }
65949+
65950 get_fs_root(current->fs, &root);
65951 old_mp = lock_mount(&old);
65952 error = PTR_ERR(old_mp);
65953@@ -3235,7 +3259,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
65954 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
65955 return -EPERM;
65956
65957- if (fs->users != 1)
65958+ if (atomic_read(&fs->users) != 1)
65959 return -EINVAL;
65960
65961 get_mnt_ns(mnt_ns);
65962diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
65963index 19ca95c..b28702c 100644
65964--- a/fs/nfs/callback_xdr.c
65965+++ b/fs/nfs/callback_xdr.c
65966@@ -51,7 +51,7 @@ struct callback_op {
65967 callback_decode_arg_t decode_args;
65968 callback_encode_res_t encode_res;
65969 long res_maxsize;
65970-};
65971+} __do_const;
65972
65973 static struct callback_op callback_ops[];
65974
65975diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
65976index d42dff6..ecbdf42 100644
65977--- a/fs/nfs/inode.c
65978+++ b/fs/nfs/inode.c
65979@@ -1270,16 +1270,16 @@ static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fat
65980 return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
65981 }
65982
65983-static atomic_long_t nfs_attr_generation_counter;
65984+static atomic_long_unchecked_t nfs_attr_generation_counter;
65985
65986 static unsigned long nfs_read_attr_generation_counter(void)
65987 {
65988- return atomic_long_read(&nfs_attr_generation_counter);
65989+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
65990 }
65991
65992 unsigned long nfs_inc_attr_generation_counter(void)
65993 {
65994- return atomic_long_inc_return(&nfs_attr_generation_counter);
65995+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
65996 }
65997 EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
65998
65999diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
66000index 5416968..0942042 100644
66001--- a/fs/nfsd/nfs4proc.c
66002+++ b/fs/nfsd/nfs4proc.c
66003@@ -1496,7 +1496,7 @@ struct nfsd4_operation {
66004 nfsd4op_rsize op_rsize_bop;
66005 stateid_getter op_get_currentstateid;
66006 stateid_setter op_set_currentstateid;
66007-};
66008+} __do_const;
66009
66010 static struct nfsd4_operation nfsd4_ops[];
66011
66012diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
66013index 5b33ce1..c2a92aa 100644
66014--- a/fs/nfsd/nfs4xdr.c
66015+++ b/fs/nfsd/nfs4xdr.c
66016@@ -1703,7 +1703,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
66017
66018 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
66019
66020-static nfsd4_dec nfsd4_dec_ops[] = {
66021+static const nfsd4_dec nfsd4_dec_ops[] = {
66022 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
66023 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
66024 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
66025diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
66026index 46ec934..f384e41 100644
66027--- a/fs/nfsd/nfscache.c
66028+++ b/fs/nfsd/nfscache.c
66029@@ -541,7 +541,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
66030 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
66031 u32 hash;
66032 struct nfsd_drc_bucket *b;
66033- int len;
66034+ long len;
66035 size_t bufsize = 0;
66036
66037 if (!rp)
66038@@ -550,11 +550,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
66039 hash = nfsd_cache_hash(rp->c_xid);
66040 b = &drc_hashtbl[hash];
66041
66042- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
66043- len >>= 2;
66044+ if (statp) {
66045+ len = (char*)statp - (char*)resv->iov_base;
66046+ len = resv->iov_len - len;
66047+ len >>= 2;
66048+ }
66049
66050 /* Don't cache excessive amounts of data and XDR failures */
66051- if (!statp || len > (256 >> 2)) {
66052+ if (!statp || len > (256 >> 2) || len < 0) {
66053 nfsd_reply_cache_free(b, rp);
66054 return;
66055 }
66056@@ -562,7 +565,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
66057 switch (cachetype) {
66058 case RC_REPLSTAT:
66059 if (len != 1)
66060- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
66061+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
66062 rp->c_replstat = *statp;
66063 break;
66064 case RC_REPLBUFF:
66065diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
66066index 3685265..e77261e 100644
66067--- a/fs/nfsd/vfs.c
66068+++ b/fs/nfsd/vfs.c
66069@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
66070
66071 oldfs = get_fs();
66072 set_fs(KERNEL_DS);
66073- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
66074+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
66075 set_fs(oldfs);
66076 return nfsd_finish_read(file, count, host_err);
66077 }
66078@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
66079
66080 /* Write the data. */
66081 oldfs = get_fs(); set_fs(KERNEL_DS);
66082- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
66083+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
66084 set_fs(oldfs);
66085 if (host_err < 0)
66086 goto out_nfserr;
66087@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
66088 */
66089
66090 oldfs = get_fs(); set_fs(KERNEL_DS);
66091- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
66092+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
66093 set_fs(oldfs);
66094
66095 if (host_err < 0)
66096diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
66097index 52ccd34..7a6b202 100644
66098--- a/fs/nls/nls_base.c
66099+++ b/fs/nls/nls_base.c
66100@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
66101
66102 int __register_nls(struct nls_table *nls, struct module *owner)
66103 {
66104- struct nls_table ** tmp = &tables;
66105+ struct nls_table *tmp = tables;
66106
66107 if (nls->next)
66108 return -EBUSY;
66109
66110- nls->owner = owner;
66111+ pax_open_kernel();
66112+ *(void **)&nls->owner = owner;
66113+ pax_close_kernel();
66114 spin_lock(&nls_lock);
66115- while (*tmp) {
66116- if (nls == *tmp) {
66117+ while (tmp) {
66118+ if (nls == tmp) {
66119 spin_unlock(&nls_lock);
66120 return -EBUSY;
66121 }
66122- tmp = &(*tmp)->next;
66123+ tmp = tmp->next;
66124 }
66125- nls->next = tables;
66126+ pax_open_kernel();
66127+ *(struct nls_table **)&nls->next = tables;
66128+ pax_close_kernel();
66129 tables = nls;
66130 spin_unlock(&nls_lock);
66131 return 0;
66132@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
66133
66134 int unregister_nls(struct nls_table * nls)
66135 {
66136- struct nls_table ** tmp = &tables;
66137+ struct nls_table * const * tmp = &tables;
66138
66139 spin_lock(&nls_lock);
66140 while (*tmp) {
66141 if (nls == *tmp) {
66142- *tmp = nls->next;
66143+ pax_open_kernel();
66144+ *(struct nls_table **)tmp = nls->next;
66145+ pax_close_kernel();
66146 spin_unlock(&nls_lock);
66147 return 0;
66148 }
66149@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
66150 return -EINVAL;
66151 }
66152
66153-static struct nls_table *find_nls(char *charset)
66154+static struct nls_table *find_nls(const char *charset)
66155 {
66156 struct nls_table *nls;
66157 spin_lock(&nls_lock);
66158@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
66159 return nls;
66160 }
66161
66162-struct nls_table *load_nls(char *charset)
66163+struct nls_table *load_nls(const char *charset)
66164 {
66165 return try_then_request_module(find_nls(charset), "nls_%s", charset);
66166 }
66167diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
66168index 162b3f1..6076a7c 100644
66169--- a/fs/nls/nls_euc-jp.c
66170+++ b/fs/nls/nls_euc-jp.c
66171@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
66172 p_nls = load_nls("cp932");
66173
66174 if (p_nls) {
66175- table.charset2upper = p_nls->charset2upper;
66176- table.charset2lower = p_nls->charset2lower;
66177+ pax_open_kernel();
66178+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
66179+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
66180+ pax_close_kernel();
66181 return register_nls(&table);
66182 }
66183
66184diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
66185index a80a741..7b96e1b 100644
66186--- a/fs/nls/nls_koi8-ru.c
66187+++ b/fs/nls/nls_koi8-ru.c
66188@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
66189 p_nls = load_nls("koi8-u");
66190
66191 if (p_nls) {
66192- table.charset2upper = p_nls->charset2upper;
66193- table.charset2lower = p_nls->charset2lower;
66194+ pax_open_kernel();
66195+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
66196+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
66197+ pax_close_kernel();
66198 return register_nls(&table);
66199 }
66200
66201diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
66202index cf27550..6c70f29d 100644
66203--- a/fs/notify/fanotify/fanotify_user.c
66204+++ b/fs/notify/fanotify/fanotify_user.c
66205@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
66206
66207 fd = fanotify_event_metadata.fd;
66208 ret = -EFAULT;
66209- if (copy_to_user(buf, &fanotify_event_metadata,
66210- fanotify_event_metadata.event_len))
66211+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
66212+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
66213 goto out_close_fd;
66214
66215 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
66216diff --git a/fs/notify/notification.c b/fs/notify/notification.c
66217index a95d8e0..a91a5fd 100644
66218--- a/fs/notify/notification.c
66219+++ b/fs/notify/notification.c
66220@@ -48,7 +48,7 @@
66221 #include <linux/fsnotify_backend.h>
66222 #include "fsnotify.h"
66223
66224-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66225+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66226
66227 /**
66228 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
66229@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66230 */
66231 u32 fsnotify_get_cookie(void)
66232 {
66233- return atomic_inc_return(&fsnotify_sync_cookie);
66234+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
66235 }
66236 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
66237
66238diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
66239index 9e38daf..5727cae 100644
66240--- a/fs/ntfs/dir.c
66241+++ b/fs/ntfs/dir.c
66242@@ -1310,7 +1310,7 @@ find_next_index_buffer:
66243 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
66244 ~(s64)(ndir->itype.index.block_size - 1)));
66245 /* Bounds checks. */
66246- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
66247+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
66248 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
66249 "inode 0x%lx or driver bug.", vdir->i_ino);
66250 goto err_out;
66251diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
66252index 1da9b2d..9cca092a 100644
66253--- a/fs/ntfs/file.c
66254+++ b/fs/ntfs/file.c
66255@@ -1281,7 +1281,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
66256 char *addr;
66257 size_t total = 0;
66258 unsigned len;
66259- int left;
66260+ unsigned left;
66261
66262 do {
66263 len = PAGE_CACHE_SIZE - ofs;
66264diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
66265index 9e1e112..241a52a 100644
66266--- a/fs/ntfs/super.c
66267+++ b/fs/ntfs/super.c
66268@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66269 if (!silent)
66270 ntfs_error(sb, "Primary boot sector is invalid.");
66271 } else if (!silent)
66272- ntfs_error(sb, read_err_str, "primary");
66273+ ntfs_error(sb, read_err_str, "%s", "primary");
66274 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
66275 if (bh_primary)
66276 brelse(bh_primary);
66277@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66278 goto hotfix_primary_boot_sector;
66279 brelse(bh_backup);
66280 } else if (!silent)
66281- ntfs_error(sb, read_err_str, "backup");
66282+ ntfs_error(sb, read_err_str, "%s", "backup");
66283 /* Try to read NT3.51- backup boot sector. */
66284 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
66285 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
66286@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66287 "sector.");
66288 brelse(bh_backup);
66289 } else if (!silent)
66290- ntfs_error(sb, read_err_str, "backup");
66291+ ntfs_error(sb, read_err_str, "%s", "backup");
66292 /* We failed. Cleanup and return. */
66293 if (bh_primary)
66294 brelse(bh_primary);
66295diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
66296index 0440134..d52c93a 100644
66297--- a/fs/ocfs2/localalloc.c
66298+++ b/fs/ocfs2/localalloc.c
66299@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
66300 goto bail;
66301 }
66302
66303- atomic_inc(&osb->alloc_stats.moves);
66304+ atomic_inc_unchecked(&osb->alloc_stats.moves);
66305
66306 bail:
66307 if (handle)
66308diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
66309index 460c6c3..b4ef513 100644
66310--- a/fs/ocfs2/ocfs2.h
66311+++ b/fs/ocfs2/ocfs2.h
66312@@ -247,11 +247,11 @@ enum ocfs2_vol_state
66313
66314 struct ocfs2_alloc_stats
66315 {
66316- atomic_t moves;
66317- atomic_t local_data;
66318- atomic_t bitmap_data;
66319- atomic_t bg_allocs;
66320- atomic_t bg_extends;
66321+ atomic_unchecked_t moves;
66322+ atomic_unchecked_t local_data;
66323+ atomic_unchecked_t bitmap_data;
66324+ atomic_unchecked_t bg_allocs;
66325+ atomic_unchecked_t bg_extends;
66326 };
66327
66328 enum ocfs2_local_alloc_state
66329diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
66330index ee541f9..df3a500 100644
66331--- a/fs/ocfs2/refcounttree.c
66332+++ b/fs/ocfs2/refcounttree.c
66333@@ -4276,7 +4276,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
66334 error = posix_acl_create(dir, &mode, &default_acl, &acl);
66335 if (error) {
66336 mlog_errno(error);
66337- goto out;
66338+ return error;
66339 }
66340
66341 error = ocfs2_create_inode_in_orphan(dir, mode,
66342diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
66343index 0cb889a..6a26b24 100644
66344--- a/fs/ocfs2/suballoc.c
66345+++ b/fs/ocfs2/suballoc.c
66346@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
66347 mlog_errno(status);
66348 goto bail;
66349 }
66350- atomic_inc(&osb->alloc_stats.bg_extends);
66351+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
66352
66353 /* You should never ask for this much metadata */
66354 BUG_ON(bits_wanted >
66355@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
66356 mlog_errno(status);
66357 goto bail;
66358 }
66359- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66360+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66361
66362 *suballoc_loc = res.sr_bg_blkno;
66363 *suballoc_bit_start = res.sr_bit_offset;
66364@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
66365 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
66366 res->sr_bits);
66367
66368- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66369+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66370
66371 BUG_ON(res->sr_bits != 1);
66372
66373@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
66374 mlog_errno(status);
66375 goto bail;
66376 }
66377- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66378+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66379
66380 BUG_ON(res.sr_bits != 1);
66381
66382@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
66383 cluster_start,
66384 num_clusters);
66385 if (!status)
66386- atomic_inc(&osb->alloc_stats.local_data);
66387+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
66388 } else {
66389 if (min_clusters > (osb->bitmap_cpg - 1)) {
66390 /* The only paths asking for contiguousness
66391@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
66392 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
66393 res.sr_bg_blkno,
66394 res.sr_bit_offset);
66395- atomic_inc(&osb->alloc_stats.bitmap_data);
66396+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
66397 *num_clusters = res.sr_bits;
66398 }
66399 }
66400diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
66401index 2667518..24bcf79 100644
66402--- a/fs/ocfs2/super.c
66403+++ b/fs/ocfs2/super.c
66404@@ -308,11 +308,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
66405 "%10s => GlobalAllocs: %d LocalAllocs: %d "
66406 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
66407 "Stats",
66408- atomic_read(&osb->alloc_stats.bitmap_data),
66409- atomic_read(&osb->alloc_stats.local_data),
66410- atomic_read(&osb->alloc_stats.bg_allocs),
66411- atomic_read(&osb->alloc_stats.moves),
66412- atomic_read(&osb->alloc_stats.bg_extends));
66413+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
66414+ atomic_read_unchecked(&osb->alloc_stats.local_data),
66415+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
66416+ atomic_read_unchecked(&osb->alloc_stats.moves),
66417+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
66418
66419 out += snprintf(buf + out, len - out,
66420 "%10s => State: %u Descriptor: %llu Size: %u bits "
66421@@ -2093,11 +2093,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
66422
66423 mutex_init(&osb->system_file_mutex);
66424
66425- atomic_set(&osb->alloc_stats.moves, 0);
66426- atomic_set(&osb->alloc_stats.local_data, 0);
66427- atomic_set(&osb->alloc_stats.bitmap_data, 0);
66428- atomic_set(&osb->alloc_stats.bg_allocs, 0);
66429- atomic_set(&osb->alloc_stats.bg_extends, 0);
66430+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
66431+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
66432+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
66433+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
66434+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
66435
66436 /* Copy the blockcheck stats from the superblock probe */
66437 osb->osb_ecc_stats = *stats;
66438diff --git a/fs/open.c b/fs/open.c
66439index 44a3be1..5e97aa1 100644
66440--- a/fs/open.c
66441+++ b/fs/open.c
66442@@ -32,6 +32,8 @@
66443 #include <linux/dnotify.h>
66444 #include <linux/compat.h>
66445
66446+#define CREATE_TRACE_POINTS
66447+#include <trace/events/fs.h>
66448 #include "internal.h"
66449
66450 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
66451@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
66452 error = locks_verify_truncate(inode, NULL, length);
66453 if (!error)
66454 error = security_path_truncate(path);
66455+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
66456+ error = -EACCES;
66457 if (!error)
66458 error = do_truncate(path->dentry, length, 0, NULL);
66459
66460@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
66461 error = locks_verify_truncate(inode, f.file, length);
66462 if (!error)
66463 error = security_path_truncate(&f.file->f_path);
66464+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
66465+ error = -EACCES;
66466 if (!error)
66467 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
66468 sb_end_write(inode->i_sb);
66469@@ -392,6 +398,9 @@ retry:
66470 if (__mnt_is_readonly(path.mnt))
66471 res = -EROFS;
66472
66473+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
66474+ res = -EACCES;
66475+
66476 out_path_release:
66477 path_put(&path);
66478 if (retry_estale(res, lookup_flags)) {
66479@@ -423,6 +432,8 @@ retry:
66480 if (error)
66481 goto dput_and_out;
66482
66483+ gr_log_chdir(path.dentry, path.mnt);
66484+
66485 set_fs_pwd(current->fs, &path);
66486
66487 dput_and_out:
66488@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
66489 goto out_putf;
66490
66491 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
66492+
66493+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
66494+ error = -EPERM;
66495+
66496+ if (!error)
66497+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
66498+
66499 if (!error)
66500 set_fs_pwd(current->fs, &f.file->f_path);
66501 out_putf:
66502@@ -481,7 +499,13 @@ retry:
66503 if (error)
66504 goto dput_and_out;
66505
66506+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
66507+ goto dput_and_out;
66508+
66509 set_fs_root(current->fs, &path);
66510+
66511+ gr_handle_chroot_chdir(&path);
66512+
66513 error = 0;
66514 dput_and_out:
66515 path_put(&path);
66516@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
66517 return error;
66518 retry_deleg:
66519 mutex_lock(&inode->i_mutex);
66520+
66521+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
66522+ error = -EACCES;
66523+ goto out_unlock;
66524+ }
66525+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
66526+ error = -EACCES;
66527+ goto out_unlock;
66528+ }
66529+
66530 error = security_path_chmod(path, mode);
66531 if (error)
66532 goto out_unlock;
66533@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
66534 uid = make_kuid(current_user_ns(), user);
66535 gid = make_kgid(current_user_ns(), group);
66536
66537+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
66538+ return -EACCES;
66539+
66540 retry_deleg:
66541 newattrs.ia_valid = ATTR_CTIME;
66542 if (user != (uid_t) -1) {
66543@@ -1017,6 +1054,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
66544 } else {
66545 fsnotify_open(f);
66546 fd_install(fd, f);
66547+ trace_do_sys_open(tmp->name, flags, mode);
66548 }
66549 }
66550 putname(tmp);
66551diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
66552index 5f0d199..13b74b9 100644
66553--- a/fs/overlayfs/super.c
66554+++ b/fs/overlayfs/super.c
66555@@ -172,7 +172,7 @@ void ovl_path_lower(struct dentry *dentry, struct path *path)
66556 {
66557 struct ovl_entry *oe = dentry->d_fsdata;
66558
66559- *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
66560+ *path = oe->numlower ? oe->lowerstack[0] : (struct path) { .dentry = NULL, .mnt = NULL };
66561 }
66562
66563 int ovl_want_write(struct dentry *dentry)
66564@@ -816,8 +816,8 @@ static unsigned int ovl_split_lowerdirs(char *str)
66565
66566 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
66567 {
66568- struct path upperpath = { NULL, NULL };
66569- struct path workpath = { NULL, NULL };
66570+ struct path upperpath = { .dentry = NULL, .mnt = NULL };
66571+ struct path workpath = { .dentry = NULL, .mnt = NULL };
66572 struct dentry *root_dentry;
66573 struct ovl_entry *oe;
66574 struct ovl_fs *ufs;
66575diff --git a/fs/pipe.c b/fs/pipe.c
66576index 21981e5..2c0bffb 100644
66577--- a/fs/pipe.c
66578+++ b/fs/pipe.c
66579@@ -37,7 +37,7 @@ unsigned int pipe_max_size = 1048576;
66580 /*
66581 * Minimum pipe size, as required by POSIX
66582 */
66583-unsigned int pipe_min_size = PAGE_SIZE;
66584+unsigned int pipe_min_size __read_only = PAGE_SIZE;
66585
66586 /*
66587 * We use a start+len construction, which provides full use of the
66588@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
66589
66590 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
66591 {
66592- if (pipe->files)
66593+ if (atomic_read(&pipe->files))
66594 mutex_lock_nested(&pipe->mutex, subclass);
66595 }
66596
66597@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
66598
66599 void pipe_unlock(struct pipe_inode_info *pipe)
66600 {
66601- if (pipe->files)
66602+ if (atomic_read(&pipe->files))
66603 mutex_unlock(&pipe->mutex);
66604 }
66605 EXPORT_SYMBOL(pipe_unlock);
66606@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
66607 }
66608 if (bufs) /* More to do? */
66609 continue;
66610- if (!pipe->writers)
66611+ if (!atomic_read(&pipe->writers))
66612 break;
66613- if (!pipe->waiting_writers) {
66614+ if (!atomic_read(&pipe->waiting_writers)) {
66615 /* syscall merging: Usually we must not sleep
66616 * if O_NONBLOCK is set, or if we got some data.
66617 * But if a writer sleeps in kernel space, then
66618@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66619
66620 __pipe_lock(pipe);
66621
66622- if (!pipe->readers) {
66623+ if (!atomic_read(&pipe->readers)) {
66624 send_sig(SIGPIPE, current, 0);
66625 ret = -EPIPE;
66626 goto out;
66627@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66628 for (;;) {
66629 int bufs;
66630
66631- if (!pipe->readers) {
66632+ if (!atomic_read(&pipe->readers)) {
66633 send_sig(SIGPIPE, current, 0);
66634 if (!ret)
66635 ret = -EPIPE;
66636@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66637 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66638 do_wakeup = 0;
66639 }
66640- pipe->waiting_writers++;
66641+ atomic_inc(&pipe->waiting_writers);
66642 pipe_wait(pipe);
66643- pipe->waiting_writers--;
66644+ atomic_dec(&pipe->waiting_writers);
66645 }
66646 out:
66647 __pipe_unlock(pipe);
66648@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66649 mask = 0;
66650 if (filp->f_mode & FMODE_READ) {
66651 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
66652- if (!pipe->writers && filp->f_version != pipe->w_counter)
66653+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
66654 mask |= POLLHUP;
66655 }
66656
66657@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66658 * Most Unices do not set POLLERR for FIFOs but on Linux they
66659 * behave exactly like pipes for poll().
66660 */
66661- if (!pipe->readers)
66662+ if (!atomic_read(&pipe->readers))
66663 mask |= POLLERR;
66664 }
66665
66666@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
66667 int kill = 0;
66668
66669 spin_lock(&inode->i_lock);
66670- if (!--pipe->files) {
66671+ if (atomic_dec_and_test(&pipe->files)) {
66672 inode->i_pipe = NULL;
66673 kill = 1;
66674 }
66675@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
66676
66677 __pipe_lock(pipe);
66678 if (file->f_mode & FMODE_READ)
66679- pipe->readers--;
66680+ atomic_dec(&pipe->readers);
66681 if (file->f_mode & FMODE_WRITE)
66682- pipe->writers--;
66683+ atomic_dec(&pipe->writers);
66684
66685- if (pipe->readers || pipe->writers) {
66686+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
66687 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
66688 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66689 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
66690@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
66691 kfree(pipe);
66692 }
66693
66694-static struct vfsmount *pipe_mnt __read_mostly;
66695+struct vfsmount *pipe_mnt __read_mostly;
66696
66697 /*
66698 * pipefs_dname() is called from d_path().
66699@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
66700 goto fail_iput;
66701
66702 inode->i_pipe = pipe;
66703- pipe->files = 2;
66704- pipe->readers = pipe->writers = 1;
66705+ atomic_set(&pipe->files, 2);
66706+ atomic_set(&pipe->readers, 1);
66707+ atomic_set(&pipe->writers, 1);
66708 inode->i_fop = &pipefifo_fops;
66709
66710 /*
66711@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
66712 spin_lock(&inode->i_lock);
66713 if (inode->i_pipe) {
66714 pipe = inode->i_pipe;
66715- pipe->files++;
66716+ atomic_inc(&pipe->files);
66717 spin_unlock(&inode->i_lock);
66718 } else {
66719 spin_unlock(&inode->i_lock);
66720 pipe = alloc_pipe_info();
66721 if (!pipe)
66722 return -ENOMEM;
66723- pipe->files = 1;
66724+ atomic_set(&pipe->files, 1);
66725 spin_lock(&inode->i_lock);
66726 if (unlikely(inode->i_pipe)) {
66727- inode->i_pipe->files++;
66728+ atomic_inc(&inode->i_pipe->files);
66729 spin_unlock(&inode->i_lock);
66730 free_pipe_info(pipe);
66731 pipe = inode->i_pipe;
66732@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
66733 * opened, even when there is no process writing the FIFO.
66734 */
66735 pipe->r_counter++;
66736- if (pipe->readers++ == 0)
66737+ if (atomic_inc_return(&pipe->readers) == 1)
66738 wake_up_partner(pipe);
66739
66740- if (!is_pipe && !pipe->writers) {
66741+ if (!is_pipe && !atomic_read(&pipe->writers)) {
66742 if ((filp->f_flags & O_NONBLOCK)) {
66743 /* suppress POLLHUP until we have
66744 * seen a writer */
66745@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
66746 * errno=ENXIO when there is no process reading the FIFO.
66747 */
66748 ret = -ENXIO;
66749- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
66750+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
66751 goto err;
66752
66753 pipe->w_counter++;
66754- if (!pipe->writers++)
66755+ if (atomic_inc_return(&pipe->writers) == 1)
66756 wake_up_partner(pipe);
66757
66758- if (!is_pipe && !pipe->readers) {
66759+ if (!is_pipe && !atomic_read(&pipe->readers)) {
66760 if (wait_for_partner(pipe, &pipe->r_counter))
66761 goto err_wr;
66762 }
66763@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
66764 * the process can at least talk to itself.
66765 */
66766
66767- pipe->readers++;
66768- pipe->writers++;
66769+ atomic_inc(&pipe->readers);
66770+ atomic_inc(&pipe->writers);
66771 pipe->r_counter++;
66772 pipe->w_counter++;
66773- if (pipe->readers == 1 || pipe->writers == 1)
66774+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
66775 wake_up_partner(pipe);
66776 break;
66777
66778@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
66779 return 0;
66780
66781 err_rd:
66782- if (!--pipe->readers)
66783+ if (atomic_dec_and_test(&pipe->readers))
66784 wake_up_interruptible(&pipe->wait);
66785 ret = -ERESTARTSYS;
66786 goto err;
66787
66788 err_wr:
66789- if (!--pipe->writers)
66790+ if (atomic_dec_and_test(&pipe->writers))
66791 wake_up_interruptible(&pipe->wait);
66792 ret = -ERESTARTSYS;
66793 goto err;
66794@@ -1010,7 +1011,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
66795 * Currently we rely on the pipe array holding a power-of-2 number
66796 * of pages.
66797 */
66798-static inline unsigned int round_pipe_size(unsigned int size)
66799+static inline unsigned long round_pipe_size(unsigned long size)
66800 {
66801 unsigned long nr_pages;
66802
66803@@ -1058,13 +1059,16 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
66804
66805 switch (cmd) {
66806 case F_SETPIPE_SZ: {
66807- unsigned int size, nr_pages;
66808+ unsigned long size, nr_pages;
66809+
66810+ ret = -EINVAL;
66811+ if (arg < pipe_min_size)
66812+ goto out;
66813
66814 size = round_pipe_size(arg);
66815 nr_pages = size >> PAGE_SHIFT;
66816
66817- ret = -EINVAL;
66818- if (!nr_pages)
66819+ if (size < pipe_min_size)
66820 goto out;
66821
66822 if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
66823diff --git a/fs/posix_acl.c b/fs/posix_acl.c
66824index 3a48bb7..403067b 100644
66825--- a/fs/posix_acl.c
66826+++ b/fs/posix_acl.c
66827@@ -20,6 +20,7 @@
66828 #include <linux/xattr.h>
66829 #include <linux/export.h>
66830 #include <linux/user_namespace.h>
66831+#include <linux/grsecurity.h>
66832
66833 struct posix_acl **acl_by_type(struct inode *inode, int type)
66834 {
66835@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
66836 }
66837 }
66838 if (mode_p)
66839- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
66840+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
66841 return not_equiv;
66842 }
66843 EXPORT_SYMBOL(posix_acl_equiv_mode);
66844@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
66845 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
66846 }
66847
66848- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
66849+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
66850 return not_equiv;
66851 }
66852
66853@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
66854 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
66855 int err = -ENOMEM;
66856 if (clone) {
66857+ *mode_p &= ~gr_acl_umask();
66858+
66859 err = posix_acl_create_masq(clone, mode_p);
66860 if (err < 0) {
66861 posix_acl_release(clone);
66862@@ -663,11 +666,12 @@ struct posix_acl *
66863 posix_acl_from_xattr(struct user_namespace *user_ns,
66864 const void *value, size_t size)
66865 {
66866- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
66867- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
66868+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
66869+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
66870 int count;
66871 struct posix_acl *acl;
66872 struct posix_acl_entry *acl_e;
66873+ umode_t umask = gr_acl_umask();
66874
66875 if (!value)
66876 return NULL;
66877@@ -693,12 +697,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
66878
66879 switch(acl_e->e_tag) {
66880 case ACL_USER_OBJ:
66881+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
66882+ break;
66883 case ACL_GROUP_OBJ:
66884 case ACL_MASK:
66885+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
66886+ break;
66887 case ACL_OTHER:
66888+ acl_e->e_perm &= ~(umask & S_IRWXO);
66889 break;
66890
66891 case ACL_USER:
66892+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
66893 acl_e->e_uid =
66894 make_kuid(user_ns,
66895 le32_to_cpu(entry->e_id));
66896@@ -706,6 +716,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
66897 goto fail;
66898 break;
66899 case ACL_GROUP:
66900+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
66901 acl_e->e_gid =
66902 make_kgid(user_ns,
66903 le32_to_cpu(entry->e_id));
66904diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
66905index 2183fcf..3c32a98 100644
66906--- a/fs/proc/Kconfig
66907+++ b/fs/proc/Kconfig
66908@@ -30,7 +30,7 @@ config PROC_FS
66909
66910 config PROC_KCORE
66911 bool "/proc/kcore support" if !ARM
66912- depends on PROC_FS && MMU
66913+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
66914 help
66915 Provides a virtual ELF core file of the live kernel. This can
66916 be read with gdb and other ELF tools. No modifications can be
66917@@ -38,8 +38,8 @@ config PROC_KCORE
66918
66919 config PROC_VMCORE
66920 bool "/proc/vmcore support"
66921- depends on PROC_FS && CRASH_DUMP
66922- default y
66923+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
66924+ default n
66925 help
66926 Exports the dump image of crashed kernel in ELF format.
66927
66928@@ -63,8 +63,8 @@ config PROC_SYSCTL
66929 limited in memory.
66930
66931 config PROC_PAGE_MONITOR
66932- default y
66933- depends on PROC_FS && MMU
66934+ default n
66935+ depends on PROC_FS && MMU && !GRKERNSEC
66936 bool "Enable /proc page monitoring" if EXPERT
66937 help
66938 Various /proc files exist to monitor process memory utilization:
66939diff --git a/fs/proc/array.c b/fs/proc/array.c
66940index 1295a00..4c91a6b 100644
66941--- a/fs/proc/array.c
66942+++ b/fs/proc/array.c
66943@@ -60,6 +60,7 @@
66944 #include <linux/tty.h>
66945 #include <linux/string.h>
66946 #include <linux/mman.h>
66947+#include <linux/grsecurity.h>
66948 #include <linux/proc_fs.h>
66949 #include <linux/ioport.h>
66950 #include <linux/uaccess.h>
66951@@ -322,6 +323,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
66952 cpumask_pr_args(&task->cpus_allowed));
66953 }
66954
66955+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66956+static inline void task_pax(struct seq_file *m, struct task_struct *p)
66957+{
66958+ if (p->mm)
66959+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
66960+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
66961+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
66962+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
66963+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
66964+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
66965+ else
66966+ seq_printf(m, "PaX:\t-----\n");
66967+}
66968+#endif
66969+
66970 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66971 struct pid *pid, struct task_struct *task)
66972 {
66973@@ -340,9 +356,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66974 task_cpus_allowed(m, task);
66975 cpuset_task_status_allowed(m, task);
66976 task_context_switch_counts(m, task);
66977+
66978+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66979+ task_pax(m, task);
66980+#endif
66981+
66982+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
66983+ task_grsec_rbac(m, task);
66984+#endif
66985+
66986 return 0;
66987 }
66988
66989+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66990+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66991+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66992+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66993+#endif
66994+
66995 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66996 struct pid *pid, struct task_struct *task, int whole)
66997 {
66998@@ -364,6 +395,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66999 char tcomm[sizeof(task->comm)];
67000 unsigned long flags;
67001
67002+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67003+ if (current->exec_id != m->exec_id) {
67004+ gr_log_badprocpid("stat");
67005+ return 0;
67006+ }
67007+#endif
67008+
67009 state = *get_task_state(task);
67010 vsize = eip = esp = 0;
67011 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
67012@@ -434,6 +472,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67013 gtime = task_gtime(task);
67014 }
67015
67016+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67017+ if (PAX_RAND_FLAGS(mm)) {
67018+ eip = 0;
67019+ esp = 0;
67020+ wchan = 0;
67021+ }
67022+#endif
67023+#ifdef CONFIG_GRKERNSEC_HIDESYM
67024+ wchan = 0;
67025+ eip =0;
67026+ esp =0;
67027+#endif
67028+
67029 /* scale priority and nice values from timeslices to -20..20 */
67030 /* to make it look like a "normal" Unix priority/nice value */
67031 priority = task_prio(task);
67032@@ -465,9 +516,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67033 seq_put_decimal_ull(m, ' ', vsize);
67034 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
67035 seq_put_decimal_ull(m, ' ', rsslim);
67036+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67037+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
67038+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
67039+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
67040+#else
67041 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
67042 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
67043 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
67044+#endif
67045 seq_put_decimal_ull(m, ' ', esp);
67046 seq_put_decimal_ull(m, ' ', eip);
67047 /* The signal information here is obsolete.
67048@@ -489,7 +546,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67049 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
67050 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
67051
67052- if (mm && permitted) {
67053+ if (mm && permitted
67054+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67055+ && !PAX_RAND_FLAGS(mm)
67056+#endif
67057+ ) {
67058 seq_put_decimal_ull(m, ' ', mm->start_data);
67059 seq_put_decimal_ull(m, ' ', mm->end_data);
67060 seq_put_decimal_ull(m, ' ', mm->start_brk);
67061@@ -527,8 +588,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
67062 struct pid *pid, struct task_struct *task)
67063 {
67064 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
67065- struct mm_struct *mm = get_task_mm(task);
67066+ struct mm_struct *mm;
67067
67068+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67069+ if (current->exec_id != m->exec_id) {
67070+ gr_log_badprocpid("statm");
67071+ return 0;
67072+ }
67073+#endif
67074+ mm = get_task_mm(task);
67075 if (mm) {
67076 size = task_statm(mm, &shared, &text, &data, &resident);
67077 mmput(mm);
67078@@ -551,6 +619,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
67079 return 0;
67080 }
67081
67082+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67083+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
67084+{
67085+ unsigned long flags;
67086+ u32 curr_ip = 0;
67087+
67088+ if (lock_task_sighand(task, &flags)) {
67089+ curr_ip = task->signal->curr_ip;
67090+ unlock_task_sighand(task, &flags);
67091+ }
67092+ return seq_printf(m, "%pI4\n", &curr_ip);
67093+}
67094+#endif
67095+
67096 #ifdef CONFIG_CHECKPOINT_RESTORE
67097 static struct pid *
67098 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
67099diff --git a/fs/proc/base.c b/fs/proc/base.c
67100index 3f3d7ae..68de109 100644
67101--- a/fs/proc/base.c
67102+++ b/fs/proc/base.c
67103@@ -113,6 +113,14 @@ struct pid_entry {
67104 union proc_op op;
67105 };
67106
67107+struct getdents_callback {
67108+ struct linux_dirent __user * current_dir;
67109+ struct linux_dirent __user * previous;
67110+ struct file * file;
67111+ int count;
67112+ int error;
67113+};
67114+
67115 #define NOD(NAME, MODE, IOP, FOP, OP) { \
67116 .name = (NAME), \
67117 .len = sizeof(NAME) - 1, \
67118@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
67119 return 0;
67120 }
67121
67122+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67123+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67124+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67125+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67126+#endif
67127+
67128 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
67129 struct pid *pid, struct task_struct *task)
67130 {
67131 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
67132 if (mm && !IS_ERR(mm)) {
67133 unsigned int nwords = 0;
67134+
67135+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67136+ /* allow if we're currently ptracing this task */
67137+ if (PAX_RAND_FLAGS(mm) &&
67138+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
67139+ mmput(mm);
67140+ return 0;
67141+ }
67142+#endif
67143+
67144 do {
67145 nwords += 2;
67146 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
67147@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
67148 }
67149
67150
67151-#ifdef CONFIG_KALLSYMS
67152+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67153 /*
67154 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
67155 * Returns the resolved symbol. If that fails, simply return the address.
67156@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
67157 mutex_unlock(&task->signal->cred_guard_mutex);
67158 }
67159
67160-#ifdef CONFIG_STACKTRACE
67161+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67162
67163 #define MAX_STACK_TRACE_DEPTH 64
67164
67165@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
67166 return 0;
67167 }
67168
67169-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67170+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67171 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
67172 struct pid *pid, struct task_struct *task)
67173 {
67174@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
67175 /************************************************************************/
67176
67177 /* permission checks */
67178-static int proc_fd_access_allowed(struct inode *inode)
67179+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
67180 {
67181 struct task_struct *task;
67182 int allowed = 0;
67183@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
67184 */
67185 task = get_proc_task(inode);
67186 if (task) {
67187- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
67188+ if (log)
67189+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
67190+ else
67191+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
67192 put_task_struct(task);
67193 }
67194 return allowed;
67195@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
67196 struct task_struct *task,
67197 int hide_pid_min)
67198 {
67199+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67200+ return false;
67201+
67202+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67203+ rcu_read_lock();
67204+ {
67205+ const struct cred *tmpcred = current_cred();
67206+ const struct cred *cred = __task_cred(task);
67207+
67208+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
67209+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67210+ || in_group_p(grsec_proc_gid)
67211+#endif
67212+ ) {
67213+ rcu_read_unlock();
67214+ return true;
67215+ }
67216+ }
67217+ rcu_read_unlock();
67218+
67219+ if (!pid->hide_pid)
67220+ return false;
67221+#endif
67222+
67223 if (pid->hide_pid < hide_pid_min)
67224 return true;
67225 if (in_group_p(pid->pid_gid))
67226 return true;
67227+
67228 return ptrace_may_access(task, PTRACE_MODE_READ);
67229 }
67230
67231@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
67232 put_task_struct(task);
67233
67234 if (!has_perms) {
67235+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67236+ {
67237+#else
67238 if (pid->hide_pid == 2) {
67239+#endif
67240 /*
67241 * Let's make getdents(), stat(), and open()
67242 * consistent with each other. If a process
67243@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
67244
67245 if (task) {
67246 mm = mm_access(task, mode);
67247+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
67248+ mmput(mm);
67249+ mm = ERR_PTR(-EPERM);
67250+ }
67251 put_task_struct(task);
67252
67253 if (!IS_ERR_OR_NULL(mm)) {
67254@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
67255 return PTR_ERR(mm);
67256
67257 file->private_data = mm;
67258+
67259+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67260+ file->f_version = current->exec_id;
67261+#endif
67262+
67263 return 0;
67264 }
67265
67266@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
67267 ssize_t copied;
67268 char *page;
67269
67270+#ifdef CONFIG_GRKERNSEC
67271+ if (write)
67272+ return -EPERM;
67273+#endif
67274+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67275+ if (file->f_version != current->exec_id) {
67276+ gr_log_badprocpid("mem");
67277+ return 0;
67278+ }
67279+#endif
67280+
67281 if (!mm)
67282 return 0;
67283
67284@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
67285 goto free;
67286
67287 while (count > 0) {
67288- int this_len = min_t(int, count, PAGE_SIZE);
67289+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
67290
67291 if (write && copy_from_user(page, buf, this_len)) {
67292 copied = -EFAULT;
67293@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
67294 if (!mm)
67295 return 0;
67296
67297+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67298+ if (file->f_version != current->exec_id) {
67299+ gr_log_badprocpid("environ");
67300+ return 0;
67301+ }
67302+#endif
67303+
67304 page = (char *)__get_free_page(GFP_TEMPORARY);
67305 if (!page)
67306 return -ENOMEM;
67307@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
67308 goto free;
67309 while (count > 0) {
67310 size_t this_len, max_len;
67311- int retval;
67312+ ssize_t retval;
67313
67314 if (src >= (mm->env_end - mm->env_start))
67315 break;
67316@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
67317 int error = -EACCES;
67318
67319 /* Are we allowed to snoop on the tasks file descriptors? */
67320- if (!proc_fd_access_allowed(inode))
67321+ if (!proc_fd_access_allowed(inode, 0))
67322 goto out;
67323
67324 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
67325@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
67326 struct path path;
67327
67328 /* Are we allowed to snoop on the tasks file descriptors? */
67329- if (!proc_fd_access_allowed(inode))
67330- goto out;
67331+ /* logging this is needed for learning on chromium to work properly,
67332+ but we don't want to flood the logs from 'ps' which does a readlink
67333+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
67334+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
67335+ */
67336+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
67337+ if (!proc_fd_access_allowed(inode,0))
67338+ goto out;
67339+ } else {
67340+ if (!proc_fd_access_allowed(inode,1))
67341+ goto out;
67342+ }
67343
67344 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
67345 if (error)
67346@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
67347 rcu_read_lock();
67348 cred = __task_cred(task);
67349 inode->i_uid = cred->euid;
67350+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67351+ inode->i_gid = grsec_proc_gid;
67352+#else
67353 inode->i_gid = cred->egid;
67354+#endif
67355 rcu_read_unlock();
67356 }
67357 security_task_to_inode(task, inode);
67358@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
67359 return -ENOENT;
67360 }
67361 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
67362+#ifdef CONFIG_GRKERNSEC_PROC_USER
67363+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
67364+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67365+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
67366+#endif
67367 task_dumpable(task)) {
67368 cred = __task_cred(task);
67369 stat->uid = cred->euid;
67370+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67371+ stat->gid = grsec_proc_gid;
67372+#else
67373 stat->gid = cred->egid;
67374+#endif
67375 }
67376 }
67377 rcu_read_unlock();
67378@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
67379
67380 if (task) {
67381 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
67382+#ifdef CONFIG_GRKERNSEC_PROC_USER
67383+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
67384+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67385+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
67386+#endif
67387 task_dumpable(task)) {
67388 rcu_read_lock();
67389 cred = __task_cred(task);
67390 inode->i_uid = cred->euid;
67391+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67392+ inode->i_gid = grsec_proc_gid;
67393+#else
67394 inode->i_gid = cred->egid;
67395+#endif
67396 rcu_read_unlock();
67397 } else {
67398 inode->i_uid = GLOBAL_ROOT_UID;
67399@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
67400 if (!task)
67401 goto out_no_task;
67402
67403+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67404+ goto out;
67405+
67406 /*
67407 * Yes, it does not scale. And it should not. Don't add
67408 * new entries into /proc/<tgid>/ without very good reasons.
67409@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
67410 if (!task)
67411 return -ENOENT;
67412
67413+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67414+ goto out;
67415+
67416 if (!dir_emit_dots(file, ctx))
67417 goto out;
67418
67419@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
67420 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
67421 #endif
67422 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
67423-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67424+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67425 ONE("syscall", S_IRUSR, proc_pid_syscall),
67426 #endif
67427 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
67428@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
67429 #ifdef CONFIG_SECURITY
67430 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
67431 #endif
67432-#ifdef CONFIG_KALLSYMS
67433+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67434 ONE("wchan", S_IRUGO, proc_pid_wchan),
67435 #endif
67436-#ifdef CONFIG_STACKTRACE
67437+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67438 ONE("stack", S_IRUSR, proc_pid_stack),
67439 #endif
67440 #ifdef CONFIG_SCHEDSTATS
67441@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
67442 #ifdef CONFIG_HARDWALL
67443 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
67444 #endif
67445+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67446+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
67447+#endif
67448 #ifdef CONFIG_USER_NS
67449 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
67450 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
67451@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
67452 if (!inode)
67453 goto out;
67454
67455+#ifdef CONFIG_GRKERNSEC_PROC_USER
67456+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
67457+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67458+ inode->i_gid = grsec_proc_gid;
67459+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
67460+#else
67461 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
67462+#endif
67463 inode->i_op = &proc_tgid_base_inode_operations;
67464 inode->i_fop = &proc_tgid_base_operations;
67465 inode->i_flags|=S_IMMUTABLE;
67466@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
67467 if (!task)
67468 goto out;
67469
67470+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67471+ goto out_put_task;
67472+
67473 result = proc_pid_instantiate(dir, dentry, task, NULL);
67474+out_put_task:
67475 put_task_struct(task);
67476 out:
67477 return ERR_PTR(result);
67478@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
67479 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
67480 #endif
67481 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
67482-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67483+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67484 ONE("syscall", S_IRUSR, proc_pid_syscall),
67485 #endif
67486 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
67487@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
67488 #ifdef CONFIG_SECURITY
67489 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
67490 #endif
67491-#ifdef CONFIG_KALLSYMS
67492+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67493 ONE("wchan", S_IRUGO, proc_pid_wchan),
67494 #endif
67495-#ifdef CONFIG_STACKTRACE
67496+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67497 ONE("stack", S_IRUSR, proc_pid_stack),
67498 #endif
67499 #ifdef CONFIG_SCHEDSTATS
67500diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
67501index cbd82df..c0407d2 100644
67502--- a/fs/proc/cmdline.c
67503+++ b/fs/proc/cmdline.c
67504@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
67505
67506 static int __init proc_cmdline_init(void)
67507 {
67508+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67509+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
67510+#else
67511 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
67512+#endif
67513 return 0;
67514 }
67515 fs_initcall(proc_cmdline_init);
67516diff --git a/fs/proc/devices.c b/fs/proc/devices.c
67517index 50493ed..248166b 100644
67518--- a/fs/proc/devices.c
67519+++ b/fs/proc/devices.c
67520@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
67521
67522 static int __init proc_devices_init(void)
67523 {
67524+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67525+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
67526+#else
67527 proc_create("devices", 0, NULL, &proc_devinfo_operations);
67528+#endif
67529 return 0;
67530 }
67531 fs_initcall(proc_devices_init);
67532diff --git a/fs/proc/fd.c b/fs/proc/fd.c
67533index 8e5ad83..1f07a8c 100644
67534--- a/fs/proc/fd.c
67535+++ b/fs/proc/fd.c
67536@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
67537 if (!task)
67538 return -ENOENT;
67539
67540- files = get_files_struct(task);
67541+ if (!gr_acl_handle_procpidmem(task))
67542+ files = get_files_struct(task);
67543 put_task_struct(task);
67544
67545 if (files) {
67546@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
67547 */
67548 int proc_fd_permission(struct inode *inode, int mask)
67549 {
67550+ struct task_struct *task;
67551 int rv = generic_permission(inode, mask);
67552- if (rv == 0)
67553- return 0;
67554+
67555 if (task_tgid(current) == proc_pid(inode))
67556 rv = 0;
67557+
67558+ task = get_proc_task(inode);
67559+ if (task == NULL)
67560+ return rv;
67561+
67562+ if (gr_acl_handle_procpidmem(task))
67563+ rv = -EACCES;
67564+
67565+ put_task_struct(task);
67566+
67567 return rv;
67568 }
67569
67570diff --git a/fs/proc/generic.c b/fs/proc/generic.c
67571index be65b20..2998ba8 100644
67572--- a/fs/proc/generic.c
67573+++ b/fs/proc/generic.c
67574@@ -22,6 +22,7 @@
67575 #include <linux/bitops.h>
67576 #include <linux/spinlock.h>
67577 #include <linux/completion.h>
67578+#include <linux/grsecurity.h>
67579 #include <asm/uaccess.h>
67580
67581 #include "internal.h"
67582@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
67583 return proc_lookup_de(PDE(dir), dir, dentry);
67584 }
67585
67586+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
67587+ unsigned int flags)
67588+{
67589+ if (gr_proc_is_restricted())
67590+ return ERR_PTR(-EACCES);
67591+
67592+ return proc_lookup_de(PDE(dir), dir, dentry);
67593+}
67594+
67595 /*
67596 * This returns non-zero if at EOF, so that the /proc
67597 * root directory can use this and check if it should
67598@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
67599 return proc_readdir_de(PDE(inode), file, ctx);
67600 }
67601
67602+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
67603+{
67604+ struct inode *inode = file_inode(file);
67605+
67606+ if (gr_proc_is_restricted())
67607+ return -EACCES;
67608+
67609+ return proc_readdir_de(PDE(inode), file, ctx);
67610+}
67611+
67612 /*
67613 * These are the generic /proc directory operations. They
67614 * use the in-memory "struct proc_dir_entry" tree to parse
67615@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
67616 .iterate = proc_readdir,
67617 };
67618
67619+static const struct file_operations proc_dir_restricted_operations = {
67620+ .llseek = generic_file_llseek,
67621+ .read = generic_read_dir,
67622+ .iterate = proc_readdir_restrict,
67623+};
67624+
67625 /*
67626 * proc directories can do almost nothing..
67627 */
67628@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
67629 .setattr = proc_notify_change,
67630 };
67631
67632+static const struct inode_operations proc_dir_restricted_inode_operations = {
67633+ .lookup = proc_lookup_restrict,
67634+ .getattr = proc_getattr,
67635+ .setattr = proc_notify_change,
67636+};
67637+
67638 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
67639 {
67640 int ret;
67641@@ -441,6 +473,31 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
67642 }
67643 EXPORT_SYMBOL_GPL(proc_mkdir_data);
67644
67645+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
67646+ struct proc_dir_entry *parent, void *data)
67647+{
67648+ struct proc_dir_entry *ent;
67649+
67650+ if (mode == 0)
67651+ mode = S_IRUGO | S_IXUGO;
67652+
67653+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
67654+ if (ent) {
67655+ ent->data = data;
67656+ ent->restricted = 1;
67657+ ent->proc_fops = &proc_dir_restricted_operations;
67658+ ent->proc_iops = &proc_dir_restricted_inode_operations;
67659+ parent->nlink++;
67660+ if (proc_register(parent, ent) < 0) {
67661+ kfree(ent);
67662+ parent->nlink--;
67663+ ent = NULL;
67664+ }
67665+ }
67666+ return ent;
67667+}
67668+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
67669+
67670 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
67671 struct proc_dir_entry *parent)
67672 {
67673@@ -455,6 +512,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
67674 }
67675 EXPORT_SYMBOL(proc_mkdir);
67676
67677+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
67678+ struct proc_dir_entry *parent)
67679+{
67680+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
67681+}
67682+EXPORT_SYMBOL(proc_mkdir_restrict);
67683+
67684 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
67685 struct proc_dir_entry *parent,
67686 const struct file_operations *proc_fops,
67687diff --git a/fs/proc/inode.c b/fs/proc/inode.c
67688index 7697b66..8d8e541 100644
67689--- a/fs/proc/inode.c
67690+++ b/fs/proc/inode.c
67691@@ -24,11 +24,17 @@
67692 #include <linux/mount.h>
67693 #include <linux/magic.h>
67694 #include <linux/namei.h>
67695+#include <linux/grsecurity.h>
67696
67697 #include <asm/uaccess.h>
67698
67699 #include "internal.h"
67700
67701+#ifdef CONFIG_PROC_SYSCTL
67702+extern const struct inode_operations proc_sys_inode_operations;
67703+extern const struct inode_operations proc_sys_dir_operations;
67704+#endif
67705+
67706 static void proc_evict_inode(struct inode *inode)
67707 {
67708 struct proc_dir_entry *de;
67709@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
67710 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
67711 sysctl_head_put(head);
67712 }
67713+
67714+#ifdef CONFIG_PROC_SYSCTL
67715+ if (inode->i_op == &proc_sys_inode_operations ||
67716+ inode->i_op == &proc_sys_dir_operations)
67717+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
67718+#endif
67719+
67720 }
67721
67722 static struct kmem_cache * proc_inode_cachep;
67723@@ -426,7 +439,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
67724 if (de->mode) {
67725 inode->i_mode = de->mode;
67726 inode->i_uid = de->uid;
67727+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67728+ inode->i_gid = grsec_proc_gid;
67729+#else
67730 inode->i_gid = de->gid;
67731+#endif
67732 }
67733 if (de->size)
67734 inode->i_size = de->size;
67735diff --git a/fs/proc/internal.h b/fs/proc/internal.h
67736index c835b94..c9e01a3 100644
67737--- a/fs/proc/internal.h
67738+++ b/fs/proc/internal.h
67739@@ -47,9 +47,10 @@ struct proc_dir_entry {
67740 struct completion *pde_unload_completion;
67741 struct list_head pde_openers; /* who did ->open, but not ->release */
67742 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
67743+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
67744 u8 namelen;
67745 char name[];
67746-};
67747+} __randomize_layout;
67748
67749 union proc_op {
67750 int (*proc_get_link)(struct dentry *, struct path *);
67751@@ -67,7 +68,7 @@ struct proc_inode {
67752 struct ctl_table *sysctl_entry;
67753 const struct proc_ns_operations *ns_ops;
67754 struct inode vfs_inode;
67755-};
67756+} __randomize_layout;
67757
67758 /*
67759 * General functions
67760@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
67761 struct pid *, struct task_struct *);
67762 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
67763 struct pid *, struct task_struct *);
67764+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67765+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
67766+ struct pid *, struct task_struct *);
67767+#endif
67768
67769 /*
67770 * base.c
67771@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
67772 * generic.c
67773 */
67774 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
67775+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
67776 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
67777 struct dentry *);
67778 extern int proc_readdir(struct file *, struct dir_context *);
67779+extern int proc_readdir_restrict(struct file *, struct dir_context *);
67780 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
67781
67782 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
67783diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
67784index a352d57..cb94a5c 100644
67785--- a/fs/proc/interrupts.c
67786+++ b/fs/proc/interrupts.c
67787@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
67788
67789 static int __init proc_interrupts_init(void)
67790 {
67791+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67792+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
67793+#else
67794 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
67795+#endif
67796 return 0;
67797 }
67798 fs_initcall(proc_interrupts_init);
67799diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
67800index 91a4e64..14bf8fa 100644
67801--- a/fs/proc/kcore.c
67802+++ b/fs/proc/kcore.c
67803@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67804 * the addresses in the elf_phdr on our list.
67805 */
67806 start = kc_offset_to_vaddr(*fpos - elf_buflen);
67807- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
67808+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
67809+ if (tsz > buflen)
67810 tsz = buflen;
67811-
67812+
67813 while (buflen) {
67814 struct kcore_list *m;
67815
67816@@ -515,19 +516,20 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67817 } else {
67818 if (kern_addr_valid(start)) {
67819 unsigned long n;
67820+ char *elf_buf;
67821+ mm_segment_t oldfs;
67822
67823- n = copy_to_user(buffer, (char *)start, tsz);
67824- /*
67825- * We cannot distinguish between fault on source
67826- * and fault on destination. When this happens
67827- * we clear too and hope it will trigger the
67828- * EFAULT again.
67829- */
67830- if (n) {
67831- if (clear_user(buffer + tsz - n,
67832- n))
67833- return -EFAULT;
67834- }
67835+ elf_buf = kzalloc(tsz, GFP_KERNEL);
67836+ if (!elf_buf)
67837+ return -ENOMEM;
67838+ oldfs = get_fs();
67839+ set_fs(KERNEL_DS);
67840+ n = __copy_from_user(elf_buf, (const void __user *)start, tsz);
67841+ set_fs(oldfs);
67842+ n = copy_to_user(buffer, elf_buf, tsz);
67843+ kfree(elf_buf);
67844+ if (n)
67845+ return -EFAULT;
67846 } else {
67847 if (clear_user(buffer, tsz))
67848 return -EFAULT;
67849@@ -547,6 +549,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67850
67851 static int open_kcore(struct inode *inode, struct file *filp)
67852 {
67853+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
67854+ return -EPERM;
67855+#endif
67856 if (!capable(CAP_SYS_RAWIO))
67857 return -EPERM;
67858 if (kcore_need_update)
67859@@ -580,7 +585,7 @@ static int __meminit kcore_callback(struct notifier_block *self,
67860 return NOTIFY_OK;
67861 }
67862
67863-static struct notifier_block kcore_callback_nb __meminitdata = {
67864+static struct notifier_block kcore_callback_nb __meminitconst = {
67865 .notifier_call = kcore_callback,
67866 .priority = 0,
67867 };
67868diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
67869index d3ebf2e..6ad42d1 100644
67870--- a/fs/proc/meminfo.c
67871+++ b/fs/proc/meminfo.c
67872@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
67873 vmi.used >> 10,
67874 vmi.largest_chunk >> 10
67875 #ifdef CONFIG_MEMORY_FAILURE
67876- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
67877+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
67878 #endif
67879 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
67880 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
67881diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
67882index d4a3574..b421ce9 100644
67883--- a/fs/proc/nommu.c
67884+++ b/fs/proc/nommu.c
67885@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
67886
67887 if (file) {
67888 seq_pad(m, ' ');
67889- seq_path(m, &file->f_path, "");
67890+ seq_path(m, &file->f_path, "\n\\");
67891 }
67892
67893 seq_putc(m, '\n');
67894diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
67895index 1bde894..22ac7eb 100644
67896--- a/fs/proc/proc_net.c
67897+++ b/fs/proc/proc_net.c
67898@@ -23,9 +23,27 @@
67899 #include <linux/nsproxy.h>
67900 #include <net/net_namespace.h>
67901 #include <linux/seq_file.h>
67902+#include <linux/grsecurity.h>
67903
67904 #include "internal.h"
67905
67906+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67907+static struct seq_operations *ipv6_seq_ops_addr;
67908+
67909+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
67910+{
67911+ ipv6_seq_ops_addr = addr;
67912+}
67913+
67914+void unregister_ipv6_seq_ops_addr(void)
67915+{
67916+ ipv6_seq_ops_addr = NULL;
67917+}
67918+
67919+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
67920+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
67921+#endif
67922+
67923 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
67924 {
67925 return pde->parent->data;
67926@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
67927 return maybe_get_net(PDE_NET(PDE(inode)));
67928 }
67929
67930+extern const struct seq_operations dev_seq_ops;
67931+
67932 int seq_open_net(struct inode *ino, struct file *f,
67933 const struct seq_operations *ops, int size)
67934 {
67935@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
67936
67937 BUG_ON(size < sizeof(*p));
67938
67939+ /* only permit access to /proc/net/dev */
67940+ if (
67941+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67942+ ops != ipv6_seq_ops_addr &&
67943+#endif
67944+ ops != &dev_seq_ops && gr_proc_is_restricted())
67945+ return -EACCES;
67946+
67947 net = get_proc_net(ino);
67948 if (net == NULL)
67949 return -ENXIO;
67950@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
67951 int err;
67952 struct net *net;
67953
67954+ if (gr_proc_is_restricted())
67955+ return -EACCES;
67956+
67957 err = -ENXIO;
67958 net = get_proc_net(inode);
67959 if (net == NULL)
67960diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
67961index f92d5dd..26398ac 100644
67962--- a/fs/proc/proc_sysctl.c
67963+++ b/fs/proc/proc_sysctl.c
67964@@ -11,13 +11,21 @@
67965 #include <linux/namei.h>
67966 #include <linux/mm.h>
67967 #include <linux/module.h>
67968+#include <linux/nsproxy.h>
67969+#ifdef CONFIG_GRKERNSEC
67970+#include <net/net_namespace.h>
67971+#endif
67972 #include "internal.h"
67973
67974+extern int gr_handle_chroot_sysctl(const int op);
67975+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
67976+ const int op);
67977+
67978 static const struct dentry_operations proc_sys_dentry_operations;
67979 static const struct file_operations proc_sys_file_operations;
67980-static const struct inode_operations proc_sys_inode_operations;
67981+const struct inode_operations proc_sys_inode_operations;
67982 static const struct file_operations proc_sys_dir_file_operations;
67983-static const struct inode_operations proc_sys_dir_operations;
67984+const struct inode_operations proc_sys_dir_operations;
67985
67986 void proc_sys_poll_notify(struct ctl_table_poll *poll)
67987 {
67988@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
67989
67990 err = NULL;
67991 d_set_d_op(dentry, &proc_sys_dentry_operations);
67992+
67993+ gr_handle_proc_create(dentry, inode);
67994+
67995 d_add(dentry, inode);
67996
67997 out:
67998@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67999 struct inode *inode = file_inode(filp);
68000 struct ctl_table_header *head = grab_header(inode);
68001 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
68002+ int op = write ? MAY_WRITE : MAY_READ;
68003 ssize_t error;
68004 size_t res;
68005
68006@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
68007 * and won't be until we finish.
68008 */
68009 error = -EPERM;
68010- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
68011+ if (sysctl_perm(head, table, op))
68012 goto out;
68013
68014 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
68015@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
68016 if (!table->proc_handler)
68017 goto out;
68018
68019+#ifdef CONFIG_GRKERNSEC
68020+ error = -EPERM;
68021+ if (gr_handle_chroot_sysctl(op))
68022+ goto out;
68023+ dget(filp->f_path.dentry);
68024+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
68025+ dput(filp->f_path.dentry);
68026+ goto out;
68027+ }
68028+ dput(filp->f_path.dentry);
68029+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
68030+ goto out;
68031+ if (write) {
68032+ if (current->nsproxy->net_ns != table->extra2) {
68033+ if (!capable(CAP_SYS_ADMIN))
68034+ goto out;
68035+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
68036+ goto out;
68037+ }
68038+#endif
68039+
68040 /* careful: calling conventions are nasty here */
68041 res = count;
68042 error = table->proc_handler(table, write, buf, &res, ppos);
68043@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
68044 return false;
68045 } else {
68046 d_set_d_op(child, &proc_sys_dentry_operations);
68047+
68048+ gr_handle_proc_create(child, inode);
68049+
68050 d_add(child, inode);
68051 }
68052 } else {
68053@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
68054 if ((*pos)++ < ctx->pos)
68055 return true;
68056
68057+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
68058+ return 0;
68059+
68060 if (unlikely(S_ISLNK(table->mode)))
68061 res = proc_sys_link_fill_cache(file, ctx, head, table);
68062 else
68063@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
68064 if (IS_ERR(head))
68065 return PTR_ERR(head);
68066
68067+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
68068+ return -ENOENT;
68069+
68070 generic_fillattr(inode, stat);
68071 if (table)
68072 stat->mode = (stat->mode & S_IFMT) | table->mode;
68073@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
68074 .llseek = generic_file_llseek,
68075 };
68076
68077-static const struct inode_operations proc_sys_inode_operations = {
68078+const struct inode_operations proc_sys_inode_operations = {
68079 .permission = proc_sys_permission,
68080 .setattr = proc_sys_setattr,
68081 .getattr = proc_sys_getattr,
68082 };
68083
68084-static const struct inode_operations proc_sys_dir_operations = {
68085+const struct inode_operations proc_sys_dir_operations = {
68086 .lookup = proc_sys_lookup,
68087 .permission = proc_sys_permission,
68088 .setattr = proc_sys_setattr,
68089@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
68090 static struct ctl_dir *new_dir(struct ctl_table_set *set,
68091 const char *name, int namelen)
68092 {
68093- struct ctl_table *table;
68094+ ctl_table_no_const *table;
68095 struct ctl_dir *new;
68096 struct ctl_node *node;
68097 char *new_name;
68098@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
68099 return NULL;
68100
68101 node = (struct ctl_node *)(new + 1);
68102- table = (struct ctl_table *)(node + 1);
68103+ table = (ctl_table_no_const *)(node + 1);
68104 new_name = (char *)(table + 2);
68105 memcpy(new_name, name, namelen);
68106 new_name[namelen] = '\0';
68107@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
68108 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
68109 struct ctl_table_root *link_root)
68110 {
68111- struct ctl_table *link_table, *entry, *link;
68112+ ctl_table_no_const *link_table, *link;
68113+ struct ctl_table *entry;
68114 struct ctl_table_header *links;
68115 struct ctl_node *node;
68116 char *link_name;
68117@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
68118 return NULL;
68119
68120 node = (struct ctl_node *)(links + 1);
68121- link_table = (struct ctl_table *)(node + nr_entries);
68122+ link_table = (ctl_table_no_const *)(node + nr_entries);
68123 link_name = (char *)&link_table[nr_entries + 1];
68124
68125 for (link = link_table, entry = table; entry->procname; link++, entry++) {
68126@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68127 struct ctl_table_header ***subheader, struct ctl_table_set *set,
68128 struct ctl_table *table)
68129 {
68130- struct ctl_table *ctl_table_arg = NULL;
68131- struct ctl_table *entry, *files;
68132+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
68133+ struct ctl_table *entry;
68134 int nr_files = 0;
68135 int nr_dirs = 0;
68136 int err = -ENOMEM;
68137@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68138 nr_files++;
68139 }
68140
68141- files = table;
68142 /* If there are mixed files and directories we need a new table */
68143 if (nr_dirs && nr_files) {
68144- struct ctl_table *new;
68145+ ctl_table_no_const *new;
68146 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
68147 GFP_KERNEL);
68148 if (!files)
68149@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68150 /* Register everything except a directory full of subdirectories */
68151 if (nr_files || !nr_dirs) {
68152 struct ctl_table_header *header;
68153- header = __register_sysctl_table(set, path, files);
68154+ header = __register_sysctl_table(set, path, files ? files : table);
68155 if (!header) {
68156 kfree(ctl_table_arg);
68157 goto out;
68158diff --git a/fs/proc/root.c b/fs/proc/root.c
68159index e74ac9f..35e89f4 100644
68160--- a/fs/proc/root.c
68161+++ b/fs/proc/root.c
68162@@ -188,7 +188,15 @@ void __init proc_root_init(void)
68163 proc_mkdir("openprom", NULL);
68164 #endif
68165 proc_tty_init();
68166+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68167+#ifdef CONFIG_GRKERNSEC_PROC_USER
68168+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
68169+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68170+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
68171+#endif
68172+#else
68173 proc_mkdir("bus", NULL);
68174+#endif
68175 proc_sys_init();
68176 }
68177
68178diff --git a/fs/proc/stat.c b/fs/proc/stat.c
68179index 510413eb..34d9a8c 100644
68180--- a/fs/proc/stat.c
68181+++ b/fs/proc/stat.c
68182@@ -11,6 +11,7 @@
68183 #include <linux/irqnr.h>
68184 #include <linux/cputime.h>
68185 #include <linux/tick.h>
68186+#include <linux/grsecurity.h>
68187
68188 #ifndef arch_irq_stat_cpu
68189 #define arch_irq_stat_cpu(cpu) 0
68190@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
68191 u64 sum_softirq = 0;
68192 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
68193 struct timespec boottime;
68194+ int unrestricted = 1;
68195+
68196+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68197+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68198+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
68199+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
68200+ && !in_group_p(grsec_proc_gid)
68201+#endif
68202+ )
68203+ unrestricted = 0;
68204+#endif
68205+#endif
68206
68207 user = nice = system = idle = iowait =
68208 irq = softirq = steal = 0;
68209@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
68210 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
68211 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
68212 idle += get_idle_time(i);
68213- iowait += get_iowait_time(i);
68214- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68215- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68216- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68217- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68218- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68219- sum += kstat_cpu_irqs_sum(i);
68220- sum += arch_irq_stat_cpu(i);
68221+ if (unrestricted) {
68222+ iowait += get_iowait_time(i);
68223+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68224+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68225+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68226+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68227+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68228+ sum += kstat_cpu_irqs_sum(i);
68229+ sum += arch_irq_stat_cpu(i);
68230+ for (j = 0; j < NR_SOFTIRQS; j++) {
68231+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
68232
68233- for (j = 0; j < NR_SOFTIRQS; j++) {
68234- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
68235-
68236- per_softirq_sums[j] += softirq_stat;
68237- sum_softirq += softirq_stat;
68238+ per_softirq_sums[j] += softirq_stat;
68239+ sum_softirq += softirq_stat;
68240+ }
68241 }
68242 }
68243- sum += arch_irq_stat();
68244+ if (unrestricted)
68245+ sum += arch_irq_stat();
68246
68247 seq_puts(p, "cpu ");
68248 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
68249@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
68250 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
68251 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
68252 idle = get_idle_time(i);
68253- iowait = get_iowait_time(i);
68254- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68255- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68256- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68257- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68258- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68259+ if (unrestricted) {
68260+ iowait = get_iowait_time(i);
68261+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68262+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68263+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68264+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68265+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68266+ }
68267 seq_printf(p, "cpu%d", i);
68268 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
68269 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
68270@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
68271
68272 /* sum again ? it could be updated? */
68273 for_each_irq_nr(j)
68274- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
68275+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
68276
68277 seq_printf(p,
68278 "\nctxt %llu\n"
68279@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
68280 "processes %lu\n"
68281 "procs_running %lu\n"
68282 "procs_blocked %lu\n",
68283- nr_context_switches(),
68284+ unrestricted ? nr_context_switches() : 0ULL,
68285 (unsigned long)jif,
68286- total_forks,
68287- nr_running(),
68288- nr_iowait());
68289+ unrestricted ? total_forks : 0UL,
68290+ unrestricted ? nr_running() : 0UL,
68291+ unrestricted ? nr_iowait() : 0UL);
68292
68293 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
68294
68295diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
68296index 6dee68d..1b4add0 100644
68297--- a/fs/proc/task_mmu.c
68298+++ b/fs/proc/task_mmu.c
68299@@ -13,12 +13,19 @@
68300 #include <linux/swap.h>
68301 #include <linux/swapops.h>
68302 #include <linux/mmu_notifier.h>
68303+#include <linux/grsecurity.h>
68304
68305 #include <asm/elf.h>
68306 #include <asm/uaccess.h>
68307 #include <asm/tlbflush.h>
68308 #include "internal.h"
68309
68310+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68311+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
68312+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
68313+ _mm->pax_flags & MF_PAX_SEGMEXEC))
68314+#endif
68315+
68316 void task_mem(struct seq_file *m, struct mm_struct *mm)
68317 {
68318 unsigned long data, text, lib, swap, ptes, pmds;
68319@@ -57,8 +64,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68320 "VmLib:\t%8lu kB\n"
68321 "VmPTE:\t%8lu kB\n"
68322 "VmPMD:\t%8lu kB\n"
68323- "VmSwap:\t%8lu kB\n",
68324- hiwater_vm << (PAGE_SHIFT-10),
68325+ "VmSwap:\t%8lu kB\n"
68326+
68327+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68328+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
68329+#endif
68330+
68331+ ,hiwater_vm << (PAGE_SHIFT-10),
68332 total_vm << (PAGE_SHIFT-10),
68333 mm->locked_vm << (PAGE_SHIFT-10),
68334 mm->pinned_vm << (PAGE_SHIFT-10),
68335@@ -68,7 +80,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68336 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
68337 ptes >> 10,
68338 pmds >> 10,
68339- swap << (PAGE_SHIFT-10));
68340+ swap << (PAGE_SHIFT-10)
68341+
68342+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68343+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68344+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
68345+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
68346+#else
68347+ , mm->context.user_cs_base
68348+ , mm->context.user_cs_limit
68349+#endif
68350+#endif
68351+
68352+ );
68353 }
68354
68355 unsigned long task_vsize(struct mm_struct *mm)
68356@@ -285,13 +309,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68357 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
68358 }
68359
68360- /* We don't show the stack guard page in /proc/maps */
68361+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68362+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
68363+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
68364+#else
68365 start = vma->vm_start;
68366- if (stack_guard_page_start(vma, start))
68367- start += PAGE_SIZE;
68368 end = vma->vm_end;
68369- if (stack_guard_page_end(vma, end))
68370- end -= PAGE_SIZE;
68371+#endif
68372
68373 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
68374 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
68375@@ -301,7 +325,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68376 flags & VM_WRITE ? 'w' : '-',
68377 flags & VM_EXEC ? 'x' : '-',
68378 flags & VM_MAYSHARE ? 's' : 'p',
68379+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68380+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
68381+#else
68382 pgoff,
68383+#endif
68384 MAJOR(dev), MINOR(dev), ino);
68385
68386 /*
68387@@ -310,7 +338,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68388 */
68389 if (file) {
68390 seq_pad(m, ' ');
68391- seq_path(m, &file->f_path, "\n");
68392+ seq_path(m, &file->f_path, "\n\\");
68393 goto done;
68394 }
68395
68396@@ -341,8 +369,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68397 * Thread stack in /proc/PID/task/TID/maps or
68398 * the main process stack.
68399 */
68400- if (!is_pid || (vma->vm_start <= mm->start_stack &&
68401- vma->vm_end >= mm->start_stack)) {
68402+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
68403+ (vma->vm_start <= mm->start_stack &&
68404+ vma->vm_end >= mm->start_stack)) {
68405 name = "[stack]";
68406 } else {
68407 /* Thread stack in /proc/PID/maps */
68408@@ -362,6 +391,12 @@ done:
68409
68410 static int show_map(struct seq_file *m, void *v, int is_pid)
68411 {
68412+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68413+ if (current->exec_id != m->exec_id) {
68414+ gr_log_badprocpid("maps");
68415+ return 0;
68416+ }
68417+#endif
68418 show_map_vma(m, v, is_pid);
68419 m_cache_vma(m, v);
68420 return 0;
68421@@ -620,9 +655,18 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
68422 .private = &mss,
68423 };
68424
68425+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68426+ if (current->exec_id != m->exec_id) {
68427+ gr_log_badprocpid("smaps");
68428+ return 0;
68429+ }
68430+#endif
68431 memset(&mss, 0, sizeof mss);
68432- /* mmap_sem is held in m_start */
68433- walk_page_vma(vma, &smaps_walk);
68434+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68435+ if (!PAX_RAND_FLAGS(vma->vm_mm))
68436+#endif
68437+ /* mmap_sem is held in m_start */
68438+ walk_page_vma(vma, &smaps_walk);
68439
68440 show_map_vma(m, vma, is_pid);
68441
68442@@ -641,7 +685,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
68443 "KernelPageSize: %8lu kB\n"
68444 "MMUPageSize: %8lu kB\n"
68445 "Locked: %8lu kB\n",
68446+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68447+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
68448+#else
68449 (vma->vm_end - vma->vm_start) >> 10,
68450+#endif
68451 mss.resident >> 10,
68452 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
68453 mss.shared_clean >> 10,
68454@@ -1491,6 +1539,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
68455 char buffer[64];
68456 int nid;
68457
68458+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68459+ if (current->exec_id != m->exec_id) {
68460+ gr_log_badprocpid("numa_maps");
68461+ return 0;
68462+ }
68463+#endif
68464+
68465 if (!mm)
68466 return 0;
68467
68468@@ -1505,11 +1560,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
68469 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
68470 }
68471
68472+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68473+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
68474+#else
68475 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
68476+#endif
68477
68478 if (file) {
68479 seq_puts(m, " file=");
68480- seq_path(m, &file->f_path, "\n\t= ");
68481+ seq_path(m, &file->f_path, "\n\t\\= ");
68482 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
68483 seq_puts(m, " heap");
68484 } else {
68485diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
68486index 599ec2e..f1413ae 100644
68487--- a/fs/proc/task_nommu.c
68488+++ b/fs/proc/task_nommu.c
68489@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68490 else
68491 bytes += kobjsize(mm);
68492
68493- if (current->fs && current->fs->users > 1)
68494+ if (current->fs && atomic_read(&current->fs->users) > 1)
68495 sbytes += kobjsize(current->fs);
68496 else
68497 bytes += kobjsize(current->fs);
68498@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
68499
68500 if (file) {
68501 seq_pad(m, ' ');
68502- seq_path(m, &file->f_path, "");
68503+ seq_path(m, &file->f_path, "\n\\");
68504 } else if (mm) {
68505 pid_t tid = pid_of_stack(priv, vma, is_pid);
68506
68507diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
68508index 4e61388..1a2523d 100644
68509--- a/fs/proc/vmcore.c
68510+++ b/fs/proc/vmcore.c
68511@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
68512 nr_bytes = count;
68513
68514 /* If pfn is not ram, return zeros for sparse dump files */
68515- if (pfn_is_ram(pfn) == 0)
68516- memset(buf, 0, nr_bytes);
68517- else {
68518+ if (pfn_is_ram(pfn) == 0) {
68519+ if (userbuf) {
68520+ if (clear_user((char __force_user *)buf, nr_bytes))
68521+ return -EFAULT;
68522+ } else
68523+ memset(buf, 0, nr_bytes);
68524+ } else {
68525 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
68526 offset, userbuf);
68527 if (tmp < 0)
68528@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
68529 static int copy_to(void *target, void *src, size_t size, int userbuf)
68530 {
68531 if (userbuf) {
68532- if (copy_to_user((char __user *) target, src, size))
68533+ if (copy_to_user((char __force_user *) target, src, size))
68534 return -EFAULT;
68535 } else {
68536 memcpy(target, src, size);
68537@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68538 if (*fpos < m->offset + m->size) {
68539 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
68540 start = m->paddr + *fpos - m->offset;
68541- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
68542+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
68543 if (tmp < 0)
68544 return tmp;
68545 buflen -= tsz;
68546@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68547 static ssize_t read_vmcore(struct file *file, char __user *buffer,
68548 size_t buflen, loff_t *fpos)
68549 {
68550- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
68551+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
68552 }
68553
68554 /*
68555diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
68556index d3fb2b6..43a8140 100644
68557--- a/fs/qnx6/qnx6.h
68558+++ b/fs/qnx6/qnx6.h
68559@@ -74,7 +74,7 @@ enum {
68560 BYTESEX_BE,
68561 };
68562
68563-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68564+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68565 {
68566 if (sbi->s_bytesex == BYTESEX_LE)
68567 return le64_to_cpu((__force __le64)n);
68568@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
68569 return (__force __fs64)cpu_to_be64(n);
68570 }
68571
68572-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68573+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68574 {
68575 if (sbi->s_bytesex == BYTESEX_LE)
68576 return le32_to_cpu((__force __le32)n);
68577diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
68578index bb2869f..d34ada8 100644
68579--- a/fs/quota/netlink.c
68580+++ b/fs/quota/netlink.c
68581@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
68582 void quota_send_warning(struct kqid qid, dev_t dev,
68583 const char warntype)
68584 {
68585- static atomic_t seq;
68586+ static atomic_unchecked_t seq;
68587 struct sk_buff *skb;
68588 void *msg_head;
68589 int ret;
68590@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
68591 "VFS: Not enough memory to send quota warning.\n");
68592 return;
68593 }
68594- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
68595+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
68596 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
68597 if (!msg_head) {
68598 printk(KERN_ERR
68599diff --git a/fs/read_write.c b/fs/read_write.c
68600index 8e1b687..bad2eec 100644
68601--- a/fs/read_write.c
68602+++ b/fs/read_write.c
68603@@ -553,7 +553,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
68604
68605 old_fs = get_fs();
68606 set_fs(get_ds());
68607- p = (__force const char __user *)buf;
68608+ p = (const char __force_user *)buf;
68609 if (count > MAX_RW_COUNT)
68610 count = MAX_RW_COUNT;
68611 if (file->f_op->write)
68612diff --git a/fs/readdir.c b/fs/readdir.c
68613index ced6791..936687b 100644
68614--- a/fs/readdir.c
68615+++ b/fs/readdir.c
68616@@ -18,6 +18,7 @@
68617 #include <linux/security.h>
68618 #include <linux/syscalls.h>
68619 #include <linux/unistd.h>
68620+#include <linux/namei.h>
68621
68622 #include <asm/uaccess.h>
68623
68624@@ -71,6 +72,7 @@ struct old_linux_dirent {
68625 struct readdir_callback {
68626 struct dir_context ctx;
68627 struct old_linux_dirent __user * dirent;
68628+ struct file * file;
68629 int result;
68630 };
68631
68632@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
68633 buf->result = -EOVERFLOW;
68634 return -EOVERFLOW;
68635 }
68636+
68637+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68638+ return 0;
68639+
68640 buf->result++;
68641 dirent = buf->dirent;
68642 if (!access_ok(VERIFY_WRITE, dirent,
68643@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
68644 if (!f.file)
68645 return -EBADF;
68646
68647+ buf.file = f.file;
68648 error = iterate_dir(f.file, &buf.ctx);
68649 if (buf.result)
68650 error = buf.result;
68651@@ -145,6 +152,7 @@ struct getdents_callback {
68652 struct dir_context ctx;
68653 struct linux_dirent __user * current_dir;
68654 struct linux_dirent __user * previous;
68655+ struct file * file;
68656 int count;
68657 int error;
68658 };
68659@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
68660 buf->error = -EOVERFLOW;
68661 return -EOVERFLOW;
68662 }
68663+
68664+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68665+ return 0;
68666+
68667 dirent = buf->previous;
68668 if (dirent) {
68669 if (__put_user(offset, &dirent->d_off))
68670@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
68671 if (!f.file)
68672 return -EBADF;
68673
68674+ buf.file = f.file;
68675 error = iterate_dir(f.file, &buf.ctx);
68676 if (error >= 0)
68677 error = buf.error;
68678@@ -230,6 +243,7 @@ struct getdents_callback64 {
68679 struct dir_context ctx;
68680 struct linux_dirent64 __user * current_dir;
68681 struct linux_dirent64 __user * previous;
68682+ struct file *file;
68683 int count;
68684 int error;
68685 };
68686@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
68687 buf->error = -EINVAL; /* only used if we fail.. */
68688 if (reclen > buf->count)
68689 return -EINVAL;
68690+
68691+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68692+ return 0;
68693+
68694 dirent = buf->previous;
68695 if (dirent) {
68696 if (__put_user(offset, &dirent->d_off))
68697@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
68698 if (!f.file)
68699 return -EBADF;
68700
68701+ buf.file = f.file;
68702 error = iterate_dir(f.file, &buf.ctx);
68703 if (error >= 0)
68704 error = buf.error;
68705diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
68706index 9c02d96..6562c10 100644
68707--- a/fs/reiserfs/do_balan.c
68708+++ b/fs/reiserfs/do_balan.c
68709@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
68710 return;
68711 }
68712
68713- atomic_inc(&fs_generation(tb->tb_sb));
68714+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
68715 do_balance_starts(tb);
68716
68717 /*
68718diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
68719index aca73dd..e3c558d 100644
68720--- a/fs/reiserfs/item_ops.c
68721+++ b/fs/reiserfs/item_ops.c
68722@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
68723 }
68724
68725 static struct item_operations errcatch_ops = {
68726- errcatch_bytes_number,
68727- errcatch_decrement_key,
68728- errcatch_is_left_mergeable,
68729- errcatch_print_item,
68730- errcatch_check_item,
68731+ .bytes_number = errcatch_bytes_number,
68732+ .decrement_key = errcatch_decrement_key,
68733+ .is_left_mergeable = errcatch_is_left_mergeable,
68734+ .print_item = errcatch_print_item,
68735+ .check_item = errcatch_check_item,
68736
68737- errcatch_create_vi,
68738- errcatch_check_left,
68739- errcatch_check_right,
68740- errcatch_part_size,
68741- errcatch_unit_num,
68742- errcatch_print_vi
68743+ .create_vi = errcatch_create_vi,
68744+ .check_left = errcatch_check_left,
68745+ .check_right = errcatch_check_right,
68746+ .part_size = errcatch_part_size,
68747+ .unit_num = errcatch_unit_num,
68748+ .print_vi = errcatch_print_vi
68749 };
68750
68751 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
68752diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
68753index 621b9f3..af527fd 100644
68754--- a/fs/reiserfs/procfs.c
68755+++ b/fs/reiserfs/procfs.c
68756@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
68757 "SMALL_TAILS " : "NO_TAILS ",
68758 replay_only(sb) ? "REPLAY_ONLY " : "",
68759 convert_reiserfs(sb) ? "CONV " : "",
68760- atomic_read(&r->s_generation_counter),
68761+ atomic_read_unchecked(&r->s_generation_counter),
68762 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
68763 SF(s_do_balance), SF(s_unneeded_left_neighbor),
68764 SF(s_good_search_by_key_reada), SF(s_bmaps),
68765diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
68766index bb79cdd..fcf49ef 100644
68767--- a/fs/reiserfs/reiserfs.h
68768+++ b/fs/reiserfs/reiserfs.h
68769@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
68770 /* Comment? -Hans */
68771 wait_queue_head_t s_wait;
68772 /* increased by one every time the tree gets re-balanced */
68773- atomic_t s_generation_counter;
68774+ atomic_unchecked_t s_generation_counter;
68775
68776 /* File system properties. Currently holds on-disk FS format */
68777 unsigned long s_properties;
68778@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
68779 #define REISERFS_USER_MEM 1 /* user memory mode */
68780
68781 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
68782-#define get_generation(s) atomic_read (&fs_generation(s))
68783+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
68784 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
68785 #define __fs_changed(gen,s) (gen != get_generation (s))
68786 #define fs_changed(gen,s) \
68787diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
68788index 71fbbe3..eff29ba 100644
68789--- a/fs/reiserfs/super.c
68790+++ b/fs/reiserfs/super.c
68791@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
68792 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
68793 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
68794 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
68795+#ifdef CONFIG_REISERFS_FS_XATTR
68796+ /* turn on user xattrs by default */
68797+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
68798+#endif
68799 /* no preallocation minimum, be smart in reiserfs_file_write instead */
68800 sbi->s_alloc_options.preallocmin = 0;
68801 /* Preallocate by 16 blocks (17-1) at once */
68802diff --git a/fs/select.c b/fs/select.c
68803index f684c75..4117611 100644
68804--- a/fs/select.c
68805+++ b/fs/select.c
68806@@ -20,6 +20,7 @@
68807 #include <linux/export.h>
68808 #include <linux/slab.h>
68809 #include <linux/poll.h>
68810+#include <linux/security.h>
68811 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
68812 #include <linux/file.h>
68813 #include <linux/fdtable.h>
68814@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
68815 struct poll_list *walk = head;
68816 unsigned long todo = nfds;
68817
68818+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
68819 if (nfds > rlimit(RLIMIT_NOFILE))
68820 return -EINVAL;
68821
68822diff --git a/fs/seq_file.c b/fs/seq_file.c
68823index 555f821..34684d7 100644
68824--- a/fs/seq_file.c
68825+++ b/fs/seq_file.c
68826@@ -12,6 +12,8 @@
68827 #include <linux/slab.h>
68828 #include <linux/cred.h>
68829 #include <linux/mm.h>
68830+#include <linux/sched.h>
68831+#include <linux/grsecurity.h>
68832
68833 #include <asm/uaccess.h>
68834 #include <asm/page.h>
68835@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
68836
68837 static void *seq_buf_alloc(unsigned long size)
68838 {
68839- void *buf;
68840-
68841- /*
68842- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
68843- * it's better to fall back to vmalloc() than to kill things.
68844- */
68845- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
68846- if (!buf && size > PAGE_SIZE)
68847- buf = vmalloc(size);
68848- return buf;
68849+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
68850 }
68851
68852 /**
68853@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
68854 #ifdef CONFIG_USER_NS
68855 p->user_ns = file->f_cred->user_ns;
68856 #endif
68857+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68858+ p->exec_id = current->exec_id;
68859+#endif
68860
68861 /*
68862 * Wrappers around seq_open(e.g. swaps_open) need to be
68863@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
68864 }
68865 EXPORT_SYMBOL(seq_open);
68866
68867+
68868+int seq_open_restrict(struct file *file, const struct seq_operations *op)
68869+{
68870+ if (gr_proc_is_restricted())
68871+ return -EACCES;
68872+
68873+ return seq_open(file, op);
68874+}
68875+EXPORT_SYMBOL(seq_open_restrict);
68876+
68877 static int traverse(struct seq_file *m, loff_t offset)
68878 {
68879 loff_t pos = 0, index;
68880@@ -158,7 +164,7 @@ Eoverflow:
68881 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
68882 {
68883 struct seq_file *m = file->private_data;
68884- size_t copied = 0;
68885+ ssize_t copied = 0;
68886 loff_t pos;
68887 size_t n;
68888 void *p;
68889@@ -557,7 +563,7 @@ static void single_stop(struct seq_file *p, void *v)
68890 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
68891 void *data)
68892 {
68893- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
68894+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
68895 int res = -ENOMEM;
68896
68897 if (op) {
68898@@ -593,6 +599,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
68899 }
68900 EXPORT_SYMBOL(single_open_size);
68901
68902+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
68903+ void *data)
68904+{
68905+ if (gr_proc_is_restricted())
68906+ return -EACCES;
68907+
68908+ return single_open(file, show, data);
68909+}
68910+EXPORT_SYMBOL(single_open_restrict);
68911+
68912+
68913 int single_release(struct inode *inode, struct file *file)
68914 {
68915 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
68916diff --git a/fs/splice.c b/fs/splice.c
68917index 7968da9..4ce985b 100644
68918--- a/fs/splice.c
68919+++ b/fs/splice.c
68920@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68921 pipe_lock(pipe);
68922
68923 for (;;) {
68924- if (!pipe->readers) {
68925+ if (!atomic_read(&pipe->readers)) {
68926 send_sig(SIGPIPE, current, 0);
68927 if (!ret)
68928 ret = -EPIPE;
68929@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68930 page_nr++;
68931 ret += buf->len;
68932
68933- if (pipe->files)
68934+ if (atomic_read(&pipe->files))
68935 do_wakeup = 1;
68936
68937 if (!--spd->nr_pages)
68938@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68939 do_wakeup = 0;
68940 }
68941
68942- pipe->waiting_writers++;
68943+ atomic_inc(&pipe->waiting_writers);
68944 pipe_wait(pipe);
68945- pipe->waiting_writers--;
68946+ atomic_dec(&pipe->waiting_writers);
68947 }
68948
68949 pipe_unlock(pipe);
68950@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
68951 old_fs = get_fs();
68952 set_fs(get_ds());
68953 /* The cast to a user pointer is valid due to the set_fs() */
68954- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
68955+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
68956 set_fs(old_fs);
68957
68958 return res;
68959@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
68960 old_fs = get_fs();
68961 set_fs(get_ds());
68962 /* The cast to a user pointer is valid due to the set_fs() */
68963- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
68964+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
68965 set_fs(old_fs);
68966
68967 return res;
68968@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
68969 goto err;
68970
68971 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
68972- vec[i].iov_base = (void __user *) page_address(page);
68973+ vec[i].iov_base = (void __force_user *) page_address(page);
68974 vec[i].iov_len = this_len;
68975 spd.pages[i] = page;
68976 spd.nr_pages++;
68977@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68978 ops->release(pipe, buf);
68979 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
68980 pipe->nrbufs--;
68981- if (pipe->files)
68982+ if (atomic_read(&pipe->files))
68983 sd->need_wakeup = true;
68984 }
68985
68986@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68987 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
68988 {
68989 while (!pipe->nrbufs) {
68990- if (!pipe->writers)
68991+ if (!atomic_read(&pipe->writers))
68992 return 0;
68993
68994- if (!pipe->waiting_writers && sd->num_spliced)
68995+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
68996 return 0;
68997
68998 if (sd->flags & SPLICE_F_NONBLOCK)
68999@@ -1025,7 +1025,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
69000 ops->release(pipe, buf);
69001 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
69002 pipe->nrbufs--;
69003- if (pipe->files)
69004+ if (atomic_read(&pipe->files))
69005 sd.need_wakeup = true;
69006 } else {
69007 buf->offset += ret;
69008@@ -1159,7 +1159,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
69009 long ret, bytes;
69010 umode_t i_mode;
69011 size_t len;
69012- int i, flags;
69013+ int i, flags, more;
69014
69015 /*
69016 * We require the input being a regular file, as we don't want to
69017@@ -1185,7 +1185,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
69018 * out of the pipe right after the splice_to_pipe(). So set
69019 * PIPE_READERS appropriately.
69020 */
69021- pipe->readers = 1;
69022+ atomic_set(&pipe->readers, 1);
69023
69024 current->splice_pipe = pipe;
69025 }
69026@@ -1202,6 +1202,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
69027 * Don't block on output, we have to drain the direct pipe.
69028 */
69029 sd->flags &= ~SPLICE_F_NONBLOCK;
69030+ more = sd->flags & SPLICE_F_MORE;
69031
69032 while (len) {
69033 size_t read_len;
69034@@ -1215,6 +1216,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
69035 sd->total_len = read_len;
69036
69037 /*
69038+ * If more data is pending, set SPLICE_F_MORE
69039+ * If this is the last data and SPLICE_F_MORE was not set
69040+ * initially, clears it.
69041+ */
69042+ if (read_len < len)
69043+ sd->flags |= SPLICE_F_MORE;
69044+ else if (!more)
69045+ sd->flags &= ~SPLICE_F_MORE;
69046+ /*
69047 * NOTE: nonblocking mode only applies to the input. We
69048 * must not do the output in nonblocking mode as then we
69049 * could get stuck data in the internal pipe:
69050@@ -1482,6 +1492,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
69051
69052 partial[buffers].offset = off;
69053 partial[buffers].len = plen;
69054+ partial[buffers].private = 0;
69055
69056 off = 0;
69057 len -= plen;
69058@@ -1718,9 +1729,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
69059 ret = -ERESTARTSYS;
69060 break;
69061 }
69062- if (!pipe->writers)
69063+ if (!atomic_read(&pipe->writers))
69064 break;
69065- if (!pipe->waiting_writers) {
69066+ if (!atomic_read(&pipe->waiting_writers)) {
69067 if (flags & SPLICE_F_NONBLOCK) {
69068 ret = -EAGAIN;
69069 break;
69070@@ -1752,7 +1763,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
69071 pipe_lock(pipe);
69072
69073 while (pipe->nrbufs >= pipe->buffers) {
69074- if (!pipe->readers) {
69075+ if (!atomic_read(&pipe->readers)) {
69076 send_sig(SIGPIPE, current, 0);
69077 ret = -EPIPE;
69078 break;
69079@@ -1765,9 +1776,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
69080 ret = -ERESTARTSYS;
69081 break;
69082 }
69083- pipe->waiting_writers++;
69084+ atomic_inc(&pipe->waiting_writers);
69085 pipe_wait(pipe);
69086- pipe->waiting_writers--;
69087+ atomic_dec(&pipe->waiting_writers);
69088 }
69089
69090 pipe_unlock(pipe);
69091@@ -1803,14 +1814,14 @@ retry:
69092 pipe_double_lock(ipipe, opipe);
69093
69094 do {
69095- if (!opipe->readers) {
69096+ if (!atomic_read(&opipe->readers)) {
69097 send_sig(SIGPIPE, current, 0);
69098 if (!ret)
69099 ret = -EPIPE;
69100 break;
69101 }
69102
69103- if (!ipipe->nrbufs && !ipipe->writers)
69104+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
69105 break;
69106
69107 /*
69108@@ -1907,7 +1918,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
69109 pipe_double_lock(ipipe, opipe);
69110
69111 do {
69112- if (!opipe->readers) {
69113+ if (!atomic_read(&opipe->readers)) {
69114 send_sig(SIGPIPE, current, 0);
69115 if (!ret)
69116 ret = -EPIPE;
69117@@ -1952,7 +1963,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
69118 * return EAGAIN if we have the potential of some data in the
69119 * future, otherwise just return 0
69120 */
69121- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
69122+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
69123 ret = -EAGAIN;
69124
69125 pipe_unlock(ipipe);
69126diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
69127index 92fcde7..1687329 100644
69128--- a/fs/squashfs/xattr.c
69129+++ b/fs/squashfs/xattr.c
69130@@ -46,8 +46,8 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
69131 + msblk->xattr_table;
69132 int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
69133 int count = squashfs_i(inode)->xattr_count;
69134- size_t rest = buffer_size;
69135- int err;
69136+ size_t used = 0;
69137+ ssize_t err;
69138
69139 /* check that the file system has xattrs */
69140 if (msblk->xattr_id_table == NULL)
69141@@ -68,11 +68,11 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
69142 name_size = le16_to_cpu(entry.size);
69143 handler = squashfs_xattr_handler(le16_to_cpu(entry.type));
69144 if (handler)
69145- prefix_size = handler->list(d, buffer, rest, NULL,
69146+ prefix_size = handler->list(d, buffer, buffer ? buffer_size - used : 0, NULL,
69147 name_size, handler->flags);
69148 if (prefix_size) {
69149 if (buffer) {
69150- if (prefix_size + name_size + 1 > rest) {
69151+ if (prefix_size + name_size + 1 > buffer_size - used) {
69152 err = -ERANGE;
69153 goto failed;
69154 }
69155@@ -86,7 +86,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
69156 buffer[name_size] = '\0';
69157 buffer += name_size + 1;
69158 }
69159- rest -= prefix_size + name_size + 1;
69160+ used += prefix_size + name_size + 1;
69161 } else {
69162 /* no handler or insuffficient privileges, so skip */
69163 err = squashfs_read_metadata(sb, NULL, &start,
69164@@ -107,7 +107,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
69165 if (err < 0)
69166 goto failed;
69167 }
69168- err = buffer_size - rest;
69169+ err = used;
69170
69171 failed:
69172 return err;
69173diff --git a/fs/stat.c b/fs/stat.c
69174index ae0c3ce..9ee641c 100644
69175--- a/fs/stat.c
69176+++ b/fs/stat.c
69177@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
69178 stat->gid = inode->i_gid;
69179 stat->rdev = inode->i_rdev;
69180 stat->size = i_size_read(inode);
69181- stat->atime = inode->i_atime;
69182- stat->mtime = inode->i_mtime;
69183+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
69184+ stat->atime = inode->i_ctime;
69185+ stat->mtime = inode->i_ctime;
69186+ } else {
69187+ stat->atime = inode->i_atime;
69188+ stat->mtime = inode->i_mtime;
69189+ }
69190 stat->ctime = inode->i_ctime;
69191 stat->blksize = (1 << inode->i_blkbits);
69192 stat->blocks = inode->i_blocks;
69193@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
69194 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
69195 {
69196 struct inode *inode = path->dentry->d_inode;
69197+ int retval;
69198
69199- if (inode->i_op->getattr)
69200- return inode->i_op->getattr(path->mnt, path->dentry, stat);
69201+ if (inode->i_op->getattr) {
69202+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
69203+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
69204+ stat->atime = stat->ctime;
69205+ stat->mtime = stat->ctime;
69206+ }
69207+ return retval;
69208+ }
69209
69210 generic_fillattr(inode, stat);
69211 return 0;
69212diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
69213index 0b45ff4..edf9d3a 100644
69214--- a/fs/sysfs/dir.c
69215+++ b/fs/sysfs/dir.c
69216@@ -33,6 +33,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
69217 kfree(buf);
69218 }
69219
69220+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
69221+extern int grsec_enable_sysfs_restrict;
69222+#endif
69223+
69224 /**
69225 * sysfs_create_dir_ns - create a directory for an object with a namespace tag
69226 * @kobj: object we're creating directory for
69227@@ -41,9 +45,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
69228 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
69229 {
69230 struct kernfs_node *parent, *kn;
69231+ const char *name;
69232+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
69233+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
69234+ const char *parent_name;
69235+#endif
69236
69237 BUG_ON(!kobj);
69238
69239+ name = kobject_name(kobj);
69240+
69241 if (kobj->parent)
69242 parent = kobj->parent->sd;
69243 else
69244@@ -52,11 +63,24 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
69245 if (!parent)
69246 return -ENOENT;
69247
69248- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
69249- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
69250+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
69251+ parent_name = parent->name;
69252+ mode = S_IRWXU;
69253+
69254+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
69255+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
69256+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
69257+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
69258+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
69259+ if (!grsec_enable_sysfs_restrict)
69260+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
69261+#endif
69262+
69263+ kn = kernfs_create_dir_ns(parent, name,
69264+ mode, kobj, ns);
69265 if (IS_ERR(kn)) {
69266 if (PTR_ERR(kn) == -EEXIST)
69267- sysfs_warn_dup(parent, kobject_name(kobj));
69268+ sysfs_warn_dup(parent, name);
69269 return PTR_ERR(kn);
69270 }
69271
69272diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
69273index 69d4889..a810bd4 100644
69274--- a/fs/sysv/sysv.h
69275+++ b/fs/sysv/sysv.h
69276@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
69277 #endif
69278 }
69279
69280-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
69281+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
69282 {
69283 if (sbi->s_bytesex == BYTESEX_PDP)
69284 return PDP_swab((__force __u32)n);
69285diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
69286index fb08b0c..65fcc7e 100644
69287--- a/fs/ubifs/io.c
69288+++ b/fs/ubifs/io.c
69289@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
69290 return err;
69291 }
69292
69293-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
69294+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
69295 {
69296 int err;
69297
69298diff --git a/fs/udf/misc.c b/fs/udf/misc.c
69299index c175b4d..8f36a16 100644
69300--- a/fs/udf/misc.c
69301+++ b/fs/udf/misc.c
69302@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
69303
69304 u8 udf_tag_checksum(const struct tag *t)
69305 {
69306- u8 *data = (u8 *)t;
69307+ const u8 *data = (const u8 *)t;
69308 u8 checksum = 0;
69309 int i;
69310 for (i = 0; i < sizeof(struct tag); ++i)
69311diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
69312index 8d974c4..b82f6ec 100644
69313--- a/fs/ufs/swab.h
69314+++ b/fs/ufs/swab.h
69315@@ -22,7 +22,7 @@ enum {
69316 BYTESEX_BE
69317 };
69318
69319-static inline u64
69320+static inline u64 __intentional_overflow(-1)
69321 fs64_to_cpu(struct super_block *sbp, __fs64 n)
69322 {
69323 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
69324@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
69325 return (__force __fs64)cpu_to_be64(n);
69326 }
69327
69328-static inline u32
69329+static inline u32 __intentional_overflow(-1)
69330 fs32_to_cpu(struct super_block *sbp, __fs32 n)
69331 {
69332 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
69333diff --git a/fs/utimes.c b/fs/utimes.c
69334index aa138d6..5f3a811 100644
69335--- a/fs/utimes.c
69336+++ b/fs/utimes.c
69337@@ -1,6 +1,7 @@
69338 #include <linux/compiler.h>
69339 #include <linux/file.h>
69340 #include <linux/fs.h>
69341+#include <linux/security.h>
69342 #include <linux/linkage.h>
69343 #include <linux/mount.h>
69344 #include <linux/namei.h>
69345@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
69346 }
69347 }
69348 retry_deleg:
69349+
69350+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
69351+ error = -EACCES;
69352+ goto mnt_drop_write_and_out;
69353+ }
69354+
69355 mutex_lock(&inode->i_mutex);
69356 error = notify_change(path->dentry, &newattrs, &delegated_inode);
69357 mutex_unlock(&inode->i_mutex);
69358diff --git a/fs/xattr.c b/fs/xattr.c
69359index 4ef6985..a6cd6567 100644
69360--- a/fs/xattr.c
69361+++ b/fs/xattr.c
69362@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
69363 return rc;
69364 }
69365
69366+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
69367+ssize_t
69368+pax_getxattr(struct dentry *dentry, void *value, size_t size)
69369+{
69370+ struct inode *inode = dentry->d_inode;
69371+ ssize_t error;
69372+
69373+ error = inode_permission(inode, MAY_EXEC);
69374+ if (error)
69375+ return error;
69376+
69377+ if (inode->i_op->getxattr)
69378+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
69379+ else
69380+ error = -EOPNOTSUPP;
69381+
69382+ return error;
69383+}
69384+EXPORT_SYMBOL(pax_getxattr);
69385+#endif
69386+
69387 ssize_t
69388 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
69389 {
69390@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
69391 * Extended attribute SET operations
69392 */
69393 static long
69394-setxattr(struct dentry *d, const char __user *name, const void __user *value,
69395+setxattr(struct path *path, const char __user *name, const void __user *value,
69396 size_t size, int flags)
69397 {
69398 int error;
69399@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
69400 posix_acl_fix_xattr_from_user(kvalue, size);
69401 }
69402
69403- error = vfs_setxattr(d, kname, kvalue, size, flags);
69404+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
69405+ error = -EACCES;
69406+ goto out;
69407+ }
69408+
69409+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
69410 out:
69411 if (vvalue)
69412 vfree(vvalue);
69413@@ -376,7 +402,7 @@ retry:
69414 return error;
69415 error = mnt_want_write(path.mnt);
69416 if (!error) {
69417- error = setxattr(path.dentry, name, value, size, flags);
69418+ error = setxattr(&path, name, value, size, flags);
69419 mnt_drop_write(path.mnt);
69420 }
69421 path_put(&path);
69422@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
69423 audit_file(f.file);
69424 error = mnt_want_write_file(f.file);
69425 if (!error) {
69426- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
69427+ error = setxattr(&f.file->f_path, name, value, size, flags);
69428 mnt_drop_write_file(f.file);
69429 }
69430 fdput(f);
69431@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
69432 * Extended attribute REMOVE operations
69433 */
69434 static long
69435-removexattr(struct dentry *d, const char __user *name)
69436+removexattr(struct path *path, const char __user *name)
69437 {
69438 int error;
69439 char kname[XATTR_NAME_MAX + 1];
69440@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
69441 if (error < 0)
69442 return error;
69443
69444- return vfs_removexattr(d, kname);
69445+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
69446+ return -EACCES;
69447+
69448+ return vfs_removexattr(path->dentry, kname);
69449 }
69450
69451 static int path_removexattr(const char __user *pathname,
69452@@ -623,7 +652,7 @@ retry:
69453 return error;
69454 error = mnt_want_write(path.mnt);
69455 if (!error) {
69456- error = removexattr(path.dentry, name);
69457+ error = removexattr(&path, name);
69458 mnt_drop_write(path.mnt);
69459 }
69460 path_put(&path);
69461@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
69462 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
69463 {
69464 struct fd f = fdget(fd);
69465+ struct path *path;
69466 int error = -EBADF;
69467
69468 if (!f.file)
69469 return error;
69470+ path = &f.file->f_path;
69471 audit_file(f.file);
69472 error = mnt_want_write_file(f.file);
69473 if (!error) {
69474- error = removexattr(f.file->f_path.dentry, name);
69475+ error = removexattr(path, name);
69476 mnt_drop_write_file(f.file);
69477 }
69478 fdput(f);
69479diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
69480index 61ec015..7c18807 100644
69481--- a/fs/xfs/libxfs/xfs_bmap.c
69482+++ b/fs/xfs/libxfs/xfs_bmap.c
69483@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
69484
69485 #else
69486 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
69487-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
69488+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
69489 #endif /* DEBUG */
69490
69491 /*
69492diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
69493index 098cd78..724d3f8 100644
69494--- a/fs/xfs/xfs_dir2_readdir.c
69495+++ b/fs/xfs/xfs_dir2_readdir.c
69496@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
69497 ino = dp->d_ops->sf_get_ino(sfp, sfep);
69498 filetype = dp->d_ops->sf_get_ftype(sfep);
69499 ctx->pos = off & 0x7fffffff;
69500- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
69501+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
69502+ char name[sfep->namelen];
69503+ memcpy(name, sfep->name, sfep->namelen);
69504+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
69505+ return 0;
69506+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
69507 xfs_dir3_get_dtype(dp->i_mount, filetype)))
69508 return 0;
69509 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
69510diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
69511index ac4feae..386d551 100644
69512--- a/fs/xfs/xfs_ioctl.c
69513+++ b/fs/xfs/xfs_ioctl.c
69514@@ -120,7 +120,7 @@ xfs_find_handle(
69515 }
69516
69517 error = -EFAULT;
69518- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
69519+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
69520 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
69521 goto out_put;
69522
69523diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
69524index c31d2c2..6ec8f62 100644
69525--- a/fs/xfs/xfs_linux.h
69526+++ b/fs/xfs/xfs_linux.h
69527@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
69528 * of the compiler which do not like us using do_div in the middle
69529 * of large functions.
69530 */
69531-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
69532+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
69533 {
69534 __u32 mod;
69535
69536@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
69537 return 0;
69538 }
69539 #else
69540-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
69541+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
69542 {
69543 __u32 mod;
69544
69545diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
69546new file mode 100644
69547index 0000000..31f8fe4
69548--- /dev/null
69549+++ b/grsecurity/Kconfig
69550@@ -0,0 +1,1182 @@
69551+#
69552+# grecurity configuration
69553+#
69554+menu "Memory Protections"
69555+depends on GRKERNSEC
69556+
69557+config GRKERNSEC_KMEM
69558+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
69559+ default y if GRKERNSEC_CONFIG_AUTO
69560+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
69561+ help
69562+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
69563+ be written to or read from to modify or leak the contents of the running
69564+ kernel. /dev/port will also not be allowed to be opened, writing to
69565+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
69566+ If you have module support disabled, enabling this will close up several
69567+ ways that are currently used to insert malicious code into the running
69568+ kernel.
69569+
69570+ Even with this feature enabled, we still highly recommend that
69571+ you use the RBAC system, as it is still possible for an attacker to
69572+ modify the running kernel through other more obscure methods.
69573+
69574+ It is highly recommended that you say Y here if you meet all the
69575+ conditions above.
69576+
69577+config GRKERNSEC_VM86
69578+ bool "Restrict VM86 mode"
69579+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69580+ depends on X86_32
69581+
69582+ help
69583+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
69584+ make use of a special execution mode on 32bit x86 processors called
69585+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
69586+ video cards and will still work with this option enabled. The purpose
69587+ of the option is to prevent exploitation of emulation errors in
69588+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
69589+ Nearly all users should be able to enable this option.
69590+
69591+config GRKERNSEC_IO
69592+ bool "Disable privileged I/O"
69593+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69594+ depends on X86
69595+ select RTC_CLASS
69596+ select RTC_INTF_DEV
69597+ select RTC_DRV_CMOS
69598+
69599+ help
69600+ If you say Y here, all ioperm and iopl calls will return an error.
69601+ Ioperm and iopl can be used to modify the running kernel.
69602+ Unfortunately, some programs need this access to operate properly,
69603+ the most notable of which are XFree86 and hwclock. hwclock can be
69604+ remedied by having RTC support in the kernel, so real-time
69605+ clock support is enabled if this option is enabled, to ensure
69606+ that hwclock operates correctly. If hwclock still does not work,
69607+ either update udev or symlink /dev/rtc to /dev/rtc0.
69608+
69609+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
69610+ you may not be able to boot into a graphical environment with this
69611+ option enabled. In this case, you should use the RBAC system instead.
69612+
69613+config GRKERNSEC_BPF_HARDEN
69614+ bool "Harden BPF interpreter"
69615+ default y if GRKERNSEC_CONFIG_AUTO
69616+ help
69617+ Unlike previous versions of grsecurity that hardened both the BPF
69618+ interpreted code against corruption at rest as well as the JIT code
69619+ against JIT-spray attacks and attacker-controlled immediate values
69620+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
69621+ and will ensure the interpreted code is read-only at rest. This feature
69622+ may be removed at a later time when eBPF stabilizes to entirely revert
69623+ back to the more secure pre-3.16 BPF interpreter/JIT.
69624+
69625+ If you're using KERNEXEC, it's recommended that you enable this option
69626+ to supplement the hardening of the kernel.
69627+
69628+config GRKERNSEC_PERF_HARDEN
69629+ bool "Disable unprivileged PERF_EVENTS usage by default"
69630+ default y if GRKERNSEC_CONFIG_AUTO
69631+ depends on PERF_EVENTS
69632+ help
69633+ If you say Y here, the range of acceptable values for the
69634+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
69635+ default to a new value: 3. When the sysctl is set to this value, no
69636+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
69637+
69638+ Though PERF_EVENTS can be used legitimately for performance monitoring
69639+ and low-level application profiling, it is forced on regardless of
69640+ configuration, has been at fault for several vulnerabilities, and
69641+ creates new opportunities for side channels and other information leaks.
69642+
69643+ This feature puts PERF_EVENTS into a secure default state and permits
69644+ the administrator to change out of it temporarily if unprivileged
69645+ application profiling is needed.
69646+
69647+config GRKERNSEC_RAND_THREADSTACK
69648+ bool "Insert random gaps between thread stacks"
69649+ default y if GRKERNSEC_CONFIG_AUTO
69650+ depends on PAX_RANDMMAP && !PPC
69651+ help
69652+ If you say Y here, a random-sized gap will be enforced between allocated
69653+ thread stacks. Glibc's NPTL and other threading libraries that
69654+ pass MAP_STACK to the kernel for thread stack allocation are supported.
69655+ The implementation currently provides 8 bits of entropy for the gap.
69656+
69657+ Many distributions do not compile threaded remote services with the
69658+ -fstack-check argument to GCC, causing the variable-sized stack-based
69659+ allocator, alloca(), to not probe the stack on allocation. This
69660+ permits an unbounded alloca() to skip over any guard page and potentially
69661+ modify another thread's stack reliably. An enforced random gap
69662+ reduces the reliability of such an attack and increases the chance
69663+ that such a read/write to another thread's stack instead lands in
69664+ an unmapped area, causing a crash and triggering grsecurity's
69665+ anti-bruteforcing logic.
69666+
69667+config GRKERNSEC_PROC_MEMMAP
69668+ bool "Harden ASLR against information leaks and entropy reduction"
69669+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
69670+ depends on PAX_NOEXEC || PAX_ASLR
69671+ help
69672+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
69673+ give no information about the addresses of its mappings if
69674+ PaX features that rely on random addresses are enabled on the task.
69675+ In addition to sanitizing this information and disabling other
69676+ dangerous sources of information, this option causes reads of sensitive
69677+ /proc/<pid> entries where the file descriptor was opened in a different
69678+ task than the one performing the read. Such attempts are logged.
69679+ This option also limits argv/env strings for suid/sgid binaries
69680+ to 512KB to prevent a complete exhaustion of the stack entropy provided
69681+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
69682+ binaries to prevent alternative mmap layouts from being abused.
69683+
69684+ If you use PaX it is essential that you say Y here as it closes up
69685+ several holes that make full ASLR useless locally.
69686+
69687+
69688+config GRKERNSEC_KSTACKOVERFLOW
69689+ bool "Prevent kernel stack overflows"
69690+ default y if GRKERNSEC_CONFIG_AUTO
69691+ depends on !IA64 && 64BIT
69692+ help
69693+ If you say Y here, the kernel's process stacks will be allocated
69694+ with vmalloc instead of the kernel's default allocator. This
69695+ introduces guard pages that in combination with the alloca checking
69696+ of the STACKLEAK feature prevents all forms of kernel process stack
69697+ overflow abuse. Note that this is different from kernel stack
69698+ buffer overflows.
69699+
69700+config GRKERNSEC_BRUTE
69701+ bool "Deter exploit bruteforcing"
69702+ default y if GRKERNSEC_CONFIG_AUTO
69703+ help
69704+ If you say Y here, attempts to bruteforce exploits against forking
69705+ daemons such as apache or sshd, as well as against suid/sgid binaries
69706+ will be deterred. When a child of a forking daemon is killed by PaX
69707+ or crashes due to an illegal instruction or other suspicious signal,
69708+ the parent process will be delayed 30 seconds upon every subsequent
69709+ fork until the administrator is able to assess the situation and
69710+ restart the daemon.
69711+ In the suid/sgid case, the attempt is logged, the user has all their
69712+ existing instances of the suid/sgid binary terminated and will
69713+ be unable to execute any suid/sgid binaries for 15 minutes.
69714+
69715+ It is recommended that you also enable signal logging in the auditing
69716+ section so that logs are generated when a process triggers a suspicious
69717+ signal.
69718+ If the sysctl option is enabled, a sysctl option with name
69719+ "deter_bruteforce" is created.
69720+
69721+config GRKERNSEC_MODHARDEN
69722+ bool "Harden module auto-loading"
69723+ default y if GRKERNSEC_CONFIG_AUTO
69724+ depends on MODULES
69725+ help
69726+ If you say Y here, module auto-loading in response to use of some
69727+ feature implemented by an unloaded module will be restricted to
69728+ root users. Enabling this option helps defend against attacks
69729+ by unprivileged users who abuse the auto-loading behavior to
69730+ cause a vulnerable module to load that is then exploited.
69731+
69732+ If this option prevents a legitimate use of auto-loading for a
69733+ non-root user, the administrator can execute modprobe manually
69734+ with the exact name of the module mentioned in the alert log.
69735+ Alternatively, the administrator can add the module to the list
69736+ of modules loaded at boot by modifying init scripts.
69737+
69738+ Modification of init scripts will most likely be needed on
69739+ Ubuntu servers with encrypted home directory support enabled,
69740+ as the first non-root user logging in will cause the ecb(aes),
69741+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
69742+
69743+config GRKERNSEC_HIDESYM
69744+ bool "Hide kernel symbols"
69745+ default y if GRKERNSEC_CONFIG_AUTO
69746+ select PAX_USERCOPY_SLABS
69747+ help
69748+ If you say Y here, getting information on loaded modules, and
69749+ displaying all kernel symbols through a syscall will be restricted
69750+ to users with CAP_SYS_MODULE. For software compatibility reasons,
69751+ /proc/kallsyms will be restricted to the root user. The RBAC
69752+ system can hide that entry even from root.
69753+
69754+ This option also prevents leaking of kernel addresses through
69755+ several /proc entries.
69756+
69757+ Note that this option is only effective provided the following
69758+ conditions are met:
69759+ 1) The kernel using grsecurity is not precompiled by some distribution
69760+ 2) You have also enabled GRKERNSEC_DMESG
69761+ 3) You are using the RBAC system and hiding other files such as your
69762+ kernel image and System.map. Alternatively, enabling this option
69763+ causes the permissions on /boot, /lib/modules, and the kernel
69764+ source directory to change at compile time to prevent
69765+ reading by non-root users.
69766+ If the above conditions are met, this option will aid in providing a
69767+ useful protection against local kernel exploitation of overflows
69768+ and arbitrary read/write vulnerabilities.
69769+
69770+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
69771+ in addition to this feature.
69772+
69773+config GRKERNSEC_RANDSTRUCT
69774+ bool "Randomize layout of sensitive kernel structures"
69775+ default y if GRKERNSEC_CONFIG_AUTO
69776+ select GRKERNSEC_HIDESYM
69777+ select MODVERSIONS if MODULES
69778+ help
69779+ If you say Y here, the layouts of a number of sensitive kernel
69780+ structures (task, fs, cred, etc) and all structures composed entirely
69781+ of function pointers (aka "ops" structs) will be randomized at compile-time.
69782+ This can introduce the requirement of an additional infoleak
69783+ vulnerability for exploits targeting these structure types.
69784+
69785+ Enabling this feature will introduce some performance impact, slightly
69786+ increase memory usage, and prevent the use of forensic tools like
69787+ Volatility against the system (unless the kernel source tree isn't
69788+ cleaned after kernel installation).
69789+
69790+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
69791+ It remains after a make clean to allow for external modules to be compiled
69792+ with the existing seed and will be removed by a make mrproper or
69793+ make distclean.
69794+
69795+ Note that the implementation requires gcc 4.6.4. or newer. You may need
69796+ to install the supporting headers explicitly in addition to the normal
69797+ gcc package.
69798+
69799+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
69800+ bool "Use cacheline-aware structure randomization"
69801+ depends on GRKERNSEC_RANDSTRUCT
69802+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
69803+ help
69804+ If you say Y here, the RANDSTRUCT randomization will make a best effort
69805+ at restricting randomization to cacheline-sized groups of elements. It
69806+ will further not randomize bitfields in structures. This reduces the
69807+ performance hit of RANDSTRUCT at the cost of weakened randomization.
69808+
69809+config GRKERNSEC_KERN_LOCKOUT
69810+ bool "Active kernel exploit response"
69811+ default y if GRKERNSEC_CONFIG_AUTO
69812+ depends on X86 || ARM || PPC || SPARC
69813+ help
69814+ If you say Y here, when a PaX alert is triggered due to suspicious
69815+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
69816+ or an OOPS occurs due to bad memory accesses, instead of just
69817+ terminating the offending process (and potentially allowing
69818+ a subsequent exploit from the same user), we will take one of two
69819+ actions:
69820+ If the user was root, we will panic the system
69821+ If the user was non-root, we will log the attempt, terminate
69822+ all processes owned by the user, then prevent them from creating
69823+ any new processes until the system is restarted
69824+ This deters repeated kernel exploitation/bruteforcing attempts
69825+ and is useful for later forensics.
69826+
69827+config GRKERNSEC_OLD_ARM_USERLAND
69828+ bool "Old ARM userland compatibility"
69829+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
69830+ help
69831+ If you say Y here, stubs of executable code to perform such operations
69832+ as "compare-exchange" will be placed at fixed locations in the ARM vector
69833+ table. This is unfortunately needed for old ARM userland meant to run
69834+ across a wide range of processors. Without this option enabled,
69835+ the get_tls and data memory barrier stubs will be emulated by the kernel,
69836+ which is enough for Linaro userlands or other userlands designed for v6
69837+ and newer ARM CPUs. It's recommended that you try without this option enabled
69838+ first, and only enable it if your userland does not boot (it will likely fail
69839+ at init time).
69840+
69841+endmenu
69842+menu "Role Based Access Control Options"
69843+depends on GRKERNSEC
69844+
69845+config GRKERNSEC_RBAC_DEBUG
69846+ bool
69847+
69848+config GRKERNSEC_NO_RBAC
69849+ bool "Disable RBAC system"
69850+ help
69851+ If you say Y here, the /dev/grsec device will be removed from the kernel,
69852+ preventing the RBAC system from being enabled. You should only say Y
69853+ here if you have no intention of using the RBAC system, so as to prevent
69854+ an attacker with root access from misusing the RBAC system to hide files
69855+ and processes when loadable module support and /dev/[k]mem have been
69856+ locked down.
69857+
69858+config GRKERNSEC_ACL_HIDEKERN
69859+ bool "Hide kernel processes"
69860+ help
69861+ If you say Y here, all kernel threads will be hidden to all
69862+ processes but those whose subject has the "view hidden processes"
69863+ flag.
69864+
69865+config GRKERNSEC_ACL_MAXTRIES
69866+ int "Maximum tries before password lockout"
69867+ default 3
69868+ help
69869+ This option enforces the maximum number of times a user can attempt
69870+ to authorize themselves with the grsecurity RBAC system before being
69871+ denied the ability to attempt authorization again for a specified time.
69872+ The lower the number, the harder it will be to brute-force a password.
69873+
69874+config GRKERNSEC_ACL_TIMEOUT
69875+ int "Time to wait after max password tries, in seconds"
69876+ default 30
69877+ help
69878+ This option specifies the time the user must wait after attempting to
69879+ authorize to the RBAC system with the maximum number of invalid
69880+ passwords. The higher the number, the harder it will be to brute-force
69881+ a password.
69882+
69883+endmenu
69884+menu "Filesystem Protections"
69885+depends on GRKERNSEC
69886+
69887+config GRKERNSEC_PROC
69888+ bool "Proc restrictions"
69889+ default y if GRKERNSEC_CONFIG_AUTO
69890+ help
69891+ If you say Y here, the permissions of the /proc filesystem
69892+ will be altered to enhance system security and privacy. You MUST
69893+ choose either a user only restriction or a user and group restriction.
69894+ Depending upon the option you choose, you can either restrict users to
69895+ see only the processes they themselves run, or choose a group that can
69896+ view all processes and files normally restricted to root if you choose
69897+ the "restrict to user only" option. NOTE: If you're running identd or
69898+ ntpd as a non-root user, you will have to run it as the group you
69899+ specify here.
69900+
69901+config GRKERNSEC_PROC_USER
69902+ bool "Restrict /proc to user only"
69903+ depends on GRKERNSEC_PROC
69904+ help
69905+ If you say Y here, non-root users will only be able to view their own
69906+ processes, and restricts them from viewing network-related information,
69907+ and viewing kernel symbol and module information.
69908+
69909+config GRKERNSEC_PROC_USERGROUP
69910+ bool "Allow special group"
69911+ default y if GRKERNSEC_CONFIG_AUTO
69912+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
69913+ help
69914+ If you say Y here, you will be able to select a group that will be
69915+ able to view all processes and network-related information. If you've
69916+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
69917+ remain hidden. This option is useful if you want to run identd as
69918+ a non-root user. The group you select may also be chosen at boot time
69919+ via "grsec_proc_gid=" on the kernel commandline.
69920+
69921+config GRKERNSEC_PROC_GID
69922+ int "GID for special group"
69923+ depends on GRKERNSEC_PROC_USERGROUP
69924+ default 1001
69925+
69926+config GRKERNSEC_PROC_ADD
69927+ bool "Additional restrictions"
69928+ default y if GRKERNSEC_CONFIG_AUTO
69929+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
69930+ help
69931+ If you say Y here, additional restrictions will be placed on
69932+ /proc that keep normal users from viewing device information and
69933+ slabinfo information that could be useful for exploits.
69934+
69935+config GRKERNSEC_LINK
69936+ bool "Linking restrictions"
69937+ default y if GRKERNSEC_CONFIG_AUTO
69938+ help
69939+ If you say Y here, /tmp race exploits will be prevented, since users
69940+ will no longer be able to follow symlinks owned by other users in
69941+ world-writable +t directories (e.g. /tmp), unless the owner of the
69942+ symlink is the owner of the directory. users will also not be
69943+ able to hardlink to files they do not own. If the sysctl option is
69944+ enabled, a sysctl option with name "linking_restrictions" is created.
69945+
69946+config GRKERNSEC_SYMLINKOWN
69947+ bool "Kernel-enforced SymlinksIfOwnerMatch"
69948+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
69949+ help
69950+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
69951+ that prevents it from being used as a security feature. As Apache
69952+ verifies the symlink by performing a stat() against the target of
69953+ the symlink before it is followed, an attacker can setup a symlink
69954+ to point to a same-owned file, then replace the symlink with one
69955+ that targets another user's file just after Apache "validates" the
69956+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
69957+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
69958+ will be in place for the group you specify. If the sysctl option
69959+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
69960+ created.
69961+
69962+config GRKERNSEC_SYMLINKOWN_GID
69963+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
69964+ depends on GRKERNSEC_SYMLINKOWN
69965+ default 1006
69966+ help
69967+ Setting this GID determines what group kernel-enforced
69968+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
69969+ is enabled, a sysctl option with name "symlinkown_gid" is created.
69970+
69971+config GRKERNSEC_FIFO
69972+ bool "FIFO restrictions"
69973+ default y if GRKERNSEC_CONFIG_AUTO
69974+ help
69975+ If you say Y here, users will not be able to write to FIFOs they don't
69976+ own in world-writable +t directories (e.g. /tmp), unless the owner of
69977+ the FIFO is the same owner of the directory it's held in. If the sysctl
69978+ option is enabled, a sysctl option with name "fifo_restrictions" is
69979+ created.
69980+
69981+config GRKERNSEC_SYSFS_RESTRICT
69982+ bool "Sysfs/debugfs restriction"
69983+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69984+ depends on SYSFS
69985+ help
69986+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
69987+ any filesystem normally mounted under it (e.g. debugfs) will be
69988+ mostly accessible only by root. These filesystems generally provide access
69989+ to hardware and debug information that isn't appropriate for unprivileged
69990+ users of the system. Sysfs and debugfs have also become a large source
69991+ of new vulnerabilities, ranging from infoleaks to local compromise.
69992+ There has been very little oversight with an eye toward security involved
69993+ in adding new exporters of information to these filesystems, so their
69994+ use is discouraged.
69995+ For reasons of compatibility, a few directories have been whitelisted
69996+ for access by non-root users:
69997+ /sys/fs/selinux
69998+ /sys/fs/fuse
69999+ /sys/devices/system/cpu
70000+
70001+config GRKERNSEC_ROFS
70002+ bool "Runtime read-only mount protection"
70003+ depends on SYSCTL
70004+ help
70005+ If you say Y here, a sysctl option with name "romount_protect" will
70006+ be created. By setting this option to 1 at runtime, filesystems
70007+ will be protected in the following ways:
70008+ * No new writable mounts will be allowed
70009+ * Existing read-only mounts won't be able to be remounted read/write
70010+ * Write operations will be denied on all block devices
70011+ This option acts independently of grsec_lock: once it is set to 1,
70012+ it cannot be turned off. Therefore, please be mindful of the resulting
70013+ behavior if this option is enabled in an init script on a read-only
70014+ filesystem.
70015+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
70016+ and GRKERNSEC_IO should be enabled and module loading disabled via
70017+ config or at runtime.
70018+ This feature is mainly intended for secure embedded systems.
70019+
70020+
70021+config GRKERNSEC_DEVICE_SIDECHANNEL
70022+ bool "Eliminate stat/notify-based device sidechannels"
70023+ default y if GRKERNSEC_CONFIG_AUTO
70024+ help
70025+ If you say Y here, timing analyses on block or character
70026+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
70027+ will be thwarted for unprivileged users. If a process without
70028+ CAP_MKNOD stats such a device, the last access and last modify times
70029+ will match the device's create time. No access or modify events
70030+ will be triggered through inotify/dnotify/fanotify for such devices.
70031+ This feature will prevent attacks that may at a minimum
70032+ allow an attacker to determine the administrator's password length.
70033+
70034+config GRKERNSEC_CHROOT
70035+ bool "Chroot jail restrictions"
70036+ default y if GRKERNSEC_CONFIG_AUTO
70037+ help
70038+ If you say Y here, you will be able to choose several options that will
70039+ make breaking out of a chrooted jail much more difficult. If you
70040+ encounter no software incompatibilities with the following options, it
70041+ is recommended that you enable each one.
70042+
70043+ Note that the chroot restrictions are not intended to apply to "chroots"
70044+ to directories that are simple bind mounts of the global root filesystem.
70045+ For several other reasons, a user shouldn't expect any significant
70046+ security by performing such a chroot.
70047+
70048+config GRKERNSEC_CHROOT_MOUNT
70049+ bool "Deny mounts"
70050+ default y if GRKERNSEC_CONFIG_AUTO
70051+ depends on GRKERNSEC_CHROOT
70052+ help
70053+ If you say Y here, processes inside a chroot will not be able to
70054+ mount or remount filesystems. If the sysctl option is enabled, a
70055+ sysctl option with name "chroot_deny_mount" is created.
70056+
70057+config GRKERNSEC_CHROOT_DOUBLE
70058+ bool "Deny double-chroots"
70059+ default y if GRKERNSEC_CONFIG_AUTO
70060+ depends on GRKERNSEC_CHROOT
70061+ help
70062+ If you say Y here, processes inside a chroot will not be able to chroot
70063+ again outside the chroot. This is a widely used method of breaking
70064+ out of a chroot jail and should not be allowed. If the sysctl
70065+ option is enabled, a sysctl option with name
70066+ "chroot_deny_chroot" is created.
70067+
70068+config GRKERNSEC_CHROOT_PIVOT
70069+ bool "Deny pivot_root in chroot"
70070+ default y if GRKERNSEC_CONFIG_AUTO
70071+ depends on GRKERNSEC_CHROOT
70072+ help
70073+ If you say Y here, processes inside a chroot will not be able to use
70074+ a function called pivot_root() that was introduced in Linux 2.3.41. It
70075+ works similar to chroot in that it changes the root filesystem. This
70076+ function could be misused in a chrooted process to attempt to break out
70077+ of the chroot, and therefore should not be allowed. If the sysctl
70078+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
70079+ created.
70080+
70081+config GRKERNSEC_CHROOT_CHDIR
70082+ bool "Enforce chdir(\"/\") on all chroots"
70083+ default y if GRKERNSEC_CONFIG_AUTO
70084+ depends on GRKERNSEC_CHROOT
70085+ help
70086+ If you say Y here, the current working directory of all newly-chrooted
70087+ applications will be set to the the root directory of the chroot.
70088+ The man page on chroot(2) states:
70089+ Note that this call does not change the current working
70090+ directory, so that `.' can be outside the tree rooted at
70091+ `/'. In particular, the super-user can escape from a
70092+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
70093+
70094+ It is recommended that you say Y here, since it's not known to break
70095+ any software. If the sysctl option is enabled, a sysctl option with
70096+ name "chroot_enforce_chdir" is created.
70097+
70098+config GRKERNSEC_CHROOT_CHMOD
70099+ bool "Deny (f)chmod +s"
70100+ default y if GRKERNSEC_CONFIG_AUTO
70101+ depends on GRKERNSEC_CHROOT
70102+ help
70103+ If you say Y here, processes inside a chroot will not be able to chmod
70104+ or fchmod files to make them have suid or sgid bits. This protects
70105+ against another published method of breaking a chroot. If the sysctl
70106+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
70107+ created.
70108+
70109+config GRKERNSEC_CHROOT_FCHDIR
70110+ bool "Deny fchdir and fhandle out of chroot"
70111+ default y if GRKERNSEC_CONFIG_AUTO
70112+ depends on GRKERNSEC_CHROOT
70113+ help
70114+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
70115+ to a file descriptor of the chrooting process that points to a directory
70116+ outside the filesystem will be stopped. Additionally, this option prevents
70117+ use of the recently-created syscall for opening files by a guessable "file
70118+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
70119+ with name "chroot_deny_fchdir" is created.
70120+
70121+config GRKERNSEC_CHROOT_MKNOD
70122+ bool "Deny mknod"
70123+ default y if GRKERNSEC_CONFIG_AUTO
70124+ depends on GRKERNSEC_CHROOT
70125+ help
70126+ If you say Y here, processes inside a chroot will not be allowed to
70127+ mknod. The problem with using mknod inside a chroot is that it
70128+ would allow an attacker to create a device entry that is the same
70129+ as one on the physical root of your system, which could range from
70130+ anything from the console device to a device for your harddrive (which
70131+ they could then use to wipe the drive or steal data). It is recommended
70132+ that you say Y here, unless you run into software incompatibilities.
70133+ If the sysctl option is enabled, a sysctl option with name
70134+ "chroot_deny_mknod" is created.
70135+
70136+config GRKERNSEC_CHROOT_SHMAT
70137+ bool "Deny shmat() out of chroot"
70138+ default y if GRKERNSEC_CONFIG_AUTO
70139+ depends on GRKERNSEC_CHROOT
70140+ help
70141+ If you say Y here, processes inside a chroot will not be able to attach
70142+ to shared memory segments that were created outside of the chroot jail.
70143+ It is recommended that you say Y here. If the sysctl option is enabled,
70144+ a sysctl option with name "chroot_deny_shmat" is created.
70145+
70146+config GRKERNSEC_CHROOT_UNIX
70147+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
70148+ default y if GRKERNSEC_CONFIG_AUTO
70149+ depends on GRKERNSEC_CHROOT
70150+ help
70151+ If you say Y here, processes inside a chroot will not be able to
70152+ connect to abstract (meaning not belonging to a filesystem) Unix
70153+ domain sockets that were bound outside of a chroot. It is recommended
70154+ that you say Y here. If the sysctl option is enabled, a sysctl option
70155+ with name "chroot_deny_unix" is created.
70156+
70157+config GRKERNSEC_CHROOT_FINDTASK
70158+ bool "Protect outside processes"
70159+ default y if GRKERNSEC_CONFIG_AUTO
70160+ depends on GRKERNSEC_CHROOT
70161+ help
70162+ If you say Y here, processes inside a chroot will not be able to
70163+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
70164+ getsid, or view any process outside of the chroot. If the sysctl
70165+ option is enabled, a sysctl option with name "chroot_findtask" is
70166+ created.
70167+
70168+config GRKERNSEC_CHROOT_NICE
70169+ bool "Restrict priority changes"
70170+ default y if GRKERNSEC_CONFIG_AUTO
70171+ depends on GRKERNSEC_CHROOT
70172+ help
70173+ If you say Y here, processes inside a chroot will not be able to raise
70174+ the priority of processes in the chroot, or alter the priority of
70175+ processes outside the chroot. This provides more security than simply
70176+ removing CAP_SYS_NICE from the process' capability set. If the
70177+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
70178+ is created.
70179+
70180+config GRKERNSEC_CHROOT_SYSCTL
70181+ bool "Deny sysctl writes"
70182+ default y if GRKERNSEC_CONFIG_AUTO
70183+ depends on GRKERNSEC_CHROOT
70184+ help
70185+ If you say Y here, an attacker in a chroot will not be able to
70186+ write to sysctl entries, either by sysctl(2) or through a /proc
70187+ interface. It is strongly recommended that you say Y here. If the
70188+ sysctl option is enabled, a sysctl option with name
70189+ "chroot_deny_sysctl" is created.
70190+
70191+config GRKERNSEC_CHROOT_RENAME
70192+ bool "Deny bad renames"
70193+ default y if GRKERNSEC_CONFIG_AUTO
70194+ depends on GRKERNSEC_CHROOT
70195+ help
70196+ If you say Y here, an attacker in a chroot will not be able to
70197+ abuse the ability to create double chroots to break out of the
70198+ chroot by exploiting a race condition between a rename of a directory
70199+ within a chroot against an open of a symlink with relative path
70200+ components. This feature will likewise prevent an accomplice outside
70201+ a chroot from enabling a user inside the chroot to break out and make
70202+ use of their credentials on the global filesystem. Enabling this
70203+ feature is essential to prevent root users from breaking out of a
70204+ chroot. If the sysctl option is enabled, a sysctl option with name
70205+ "chroot_deny_bad_rename" is created.
70206+
70207+config GRKERNSEC_CHROOT_CAPS
70208+ bool "Capability restrictions"
70209+ default y if GRKERNSEC_CONFIG_AUTO
70210+ depends on GRKERNSEC_CHROOT
70211+ help
70212+ If you say Y here, the capabilities on all processes within a
70213+ chroot jail will be lowered to stop module insertion, raw i/o,
70214+ system and net admin tasks, rebooting the system, modifying immutable
70215+ files, modifying IPC owned by another, and changing the system time.
70216+ This is left an option because it can break some apps. Disable this
70217+ if your chrooted apps are having problems performing those kinds of
70218+ tasks. If the sysctl option is enabled, a sysctl option with
70219+ name "chroot_caps" is created.
70220+
70221+config GRKERNSEC_CHROOT_INITRD
70222+ bool "Exempt initrd tasks from restrictions"
70223+ default y if GRKERNSEC_CONFIG_AUTO
70224+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
70225+ help
70226+ If you say Y here, tasks started prior to init will be exempted from
70227+ grsecurity's chroot restrictions. This option is mainly meant to
70228+ resolve Plymouth's performing privileged operations unnecessarily
70229+ in a chroot.
70230+
70231+endmenu
70232+menu "Kernel Auditing"
70233+depends on GRKERNSEC
70234+
70235+config GRKERNSEC_AUDIT_GROUP
70236+ bool "Single group for auditing"
70237+ help
70238+ If you say Y here, the exec and chdir logging features will only operate
70239+ on a group you specify. This option is recommended if you only want to
70240+ watch certain users instead of having a large amount of logs from the
70241+ entire system. If the sysctl option is enabled, a sysctl option with
70242+ name "audit_group" is created.
70243+
70244+config GRKERNSEC_AUDIT_GID
70245+ int "GID for auditing"
70246+ depends on GRKERNSEC_AUDIT_GROUP
70247+ default 1007
70248+
70249+config GRKERNSEC_EXECLOG
70250+ bool "Exec logging"
70251+ help
70252+ If you say Y here, all execve() calls will be logged (since the
70253+ other exec*() calls are frontends to execve(), all execution
70254+ will be logged). Useful for shell-servers that like to keep track
70255+ of their users. If the sysctl option is enabled, a sysctl option with
70256+ name "exec_logging" is created.
70257+ WARNING: This option when enabled will produce a LOT of logs, especially
70258+ on an active system.
70259+
70260+config GRKERNSEC_RESLOG
70261+ bool "Resource logging"
70262+ default y if GRKERNSEC_CONFIG_AUTO
70263+ help
70264+ If you say Y here, all attempts to overstep resource limits will
70265+ be logged with the resource name, the requested size, and the current
70266+ limit. It is highly recommended that you say Y here. If the sysctl
70267+ option is enabled, a sysctl option with name "resource_logging" is
70268+ created. If the RBAC system is enabled, the sysctl value is ignored.
70269+
70270+config GRKERNSEC_CHROOT_EXECLOG
70271+ bool "Log execs within chroot"
70272+ help
70273+ If you say Y here, all executions inside a chroot jail will be logged
70274+ to syslog. This can cause a large amount of logs if certain
70275+ applications (eg. djb's daemontools) are installed on the system, and
70276+ is therefore left as an option. If the sysctl option is enabled, a
70277+ sysctl option with name "chroot_execlog" is created.
70278+
70279+config GRKERNSEC_AUDIT_PTRACE
70280+ bool "Ptrace logging"
70281+ help
70282+ If you say Y here, all attempts to attach to a process via ptrace
70283+ will be logged. If the sysctl option is enabled, a sysctl option
70284+ with name "audit_ptrace" is created.
70285+
70286+config GRKERNSEC_AUDIT_CHDIR
70287+ bool "Chdir logging"
70288+ help
70289+ If you say Y here, all chdir() calls will be logged. If the sysctl
70290+ option is enabled, a sysctl option with name "audit_chdir" is created.
70291+
70292+config GRKERNSEC_AUDIT_MOUNT
70293+ bool "(Un)Mount logging"
70294+ help
70295+ If you say Y here, all mounts and unmounts will be logged. If the
70296+ sysctl option is enabled, a sysctl option with name "audit_mount" is
70297+ created.
70298+
70299+config GRKERNSEC_SIGNAL
70300+ bool "Signal logging"
70301+ default y if GRKERNSEC_CONFIG_AUTO
70302+ help
70303+ If you say Y here, certain important signals will be logged, such as
70304+ SIGSEGV, which will as a result inform you of when a error in a program
70305+ occurred, which in some cases could mean a possible exploit attempt.
70306+ If the sysctl option is enabled, a sysctl option with name
70307+ "signal_logging" is created.
70308+
70309+config GRKERNSEC_FORKFAIL
70310+ bool "Fork failure logging"
70311+ help
70312+ If you say Y here, all failed fork() attempts will be logged.
70313+ This could suggest a fork bomb, or someone attempting to overstep
70314+ their process limit. If the sysctl option is enabled, a sysctl option
70315+ with name "forkfail_logging" is created.
70316+
70317+config GRKERNSEC_TIME
70318+ bool "Time change logging"
70319+ default y if GRKERNSEC_CONFIG_AUTO
70320+ help
70321+ If you say Y here, any changes of the system clock will be logged.
70322+ If the sysctl option is enabled, a sysctl option with name
70323+ "timechange_logging" is created.
70324+
70325+config GRKERNSEC_PROC_IPADDR
70326+ bool "/proc/<pid>/ipaddr support"
70327+ default y if GRKERNSEC_CONFIG_AUTO
70328+ help
70329+ If you say Y here, a new entry will be added to each /proc/<pid>
70330+ directory that contains the IP address of the person using the task.
70331+ The IP is carried across local TCP and AF_UNIX stream sockets.
70332+ This information can be useful for IDS/IPSes to perform remote response
70333+ to a local attack. The entry is readable by only the owner of the
70334+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
70335+ the RBAC system), and thus does not create privacy concerns.
70336+
70337+config GRKERNSEC_RWXMAP_LOG
70338+ bool 'Denied RWX mmap/mprotect logging'
70339+ default y if GRKERNSEC_CONFIG_AUTO
70340+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
70341+ help
70342+ If you say Y here, calls to mmap() and mprotect() with explicit
70343+ usage of PROT_WRITE and PROT_EXEC together will be logged when
70344+ denied by the PAX_MPROTECT feature. This feature will also
70345+ log other problematic scenarios that can occur when PAX_MPROTECT
70346+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
70347+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
70348+ is created.
70349+
70350+endmenu
70351+
70352+menu "Executable Protections"
70353+depends on GRKERNSEC
70354+
70355+config GRKERNSEC_DMESG
70356+ bool "Dmesg(8) restriction"
70357+ default y if GRKERNSEC_CONFIG_AUTO
70358+ help
70359+ If you say Y here, non-root users will not be able to use dmesg(8)
70360+ to view the contents of the kernel's circular log buffer.
70361+ The kernel's log buffer often contains kernel addresses and other
70362+ identifying information useful to an attacker in fingerprinting a
70363+ system for a targeted exploit.
70364+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
70365+ created.
70366+
70367+config GRKERNSEC_HARDEN_PTRACE
70368+ bool "Deter ptrace-based process snooping"
70369+ default y if GRKERNSEC_CONFIG_AUTO
70370+ help
70371+ If you say Y here, TTY sniffers and other malicious monitoring
70372+ programs implemented through ptrace will be defeated. If you
70373+ have been using the RBAC system, this option has already been
70374+ enabled for several years for all users, with the ability to make
70375+ fine-grained exceptions.
70376+
70377+ This option only affects the ability of non-root users to ptrace
70378+ processes that are not a descendent of the ptracing process.
70379+ This means that strace ./binary and gdb ./binary will still work,
70380+ but attaching to arbitrary processes will not. If the sysctl
70381+ option is enabled, a sysctl option with name "harden_ptrace" is
70382+ created.
70383+
70384+config GRKERNSEC_PTRACE_READEXEC
70385+ bool "Require read access to ptrace sensitive binaries"
70386+ default y if GRKERNSEC_CONFIG_AUTO
70387+ help
70388+ If you say Y here, unprivileged users will not be able to ptrace unreadable
70389+ binaries. This option is useful in environments that
70390+ remove the read bits (e.g. file mode 4711) from suid binaries to
70391+ prevent infoleaking of their contents. This option adds
70392+ consistency to the use of that file mode, as the binary could normally
70393+ be read out when run without privileges while ptracing.
70394+
70395+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
70396+ is created.
70397+
70398+config GRKERNSEC_SETXID
70399+ bool "Enforce consistent multithreaded privileges"
70400+ default y if GRKERNSEC_CONFIG_AUTO
70401+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
70402+ help
70403+ If you say Y here, a change from a root uid to a non-root uid
70404+ in a multithreaded application will cause the resulting uids,
70405+ gids, supplementary groups, and capabilities in that thread
70406+ to be propagated to the other threads of the process. In most
70407+ cases this is unnecessary, as glibc will emulate this behavior
70408+ on behalf of the application. Other libcs do not act in the
70409+ same way, allowing the other threads of the process to continue
70410+ running with root privileges. If the sysctl option is enabled,
70411+ a sysctl option with name "consistent_setxid" is created.
70412+
70413+config GRKERNSEC_HARDEN_IPC
70414+ bool "Disallow access to overly-permissive IPC objects"
70415+ default y if GRKERNSEC_CONFIG_AUTO
70416+ depends on SYSVIPC
70417+ help
70418+ If you say Y here, access to overly-permissive IPC objects (shared
70419+ memory, message queues, and semaphores) will be denied for processes
70420+ given the following criteria beyond normal permission checks:
70421+ 1) If the IPC object is world-accessible and the euid doesn't match
70422+ that of the creator or current uid for the IPC object
70423+ 2) If the IPC object is group-accessible and the egid doesn't
70424+ match that of the creator or current gid for the IPC object
70425+ It's a common error to grant too much permission to these objects,
70426+ with impact ranging from denial of service and information leaking to
70427+ privilege escalation. This feature was developed in response to
70428+ research by Tim Brown:
70429+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
70430+ who found hundreds of such insecure usages. Processes with
70431+ CAP_IPC_OWNER are still permitted to access these IPC objects.
70432+ If the sysctl option is enabled, a sysctl option with name
70433+ "harden_ipc" is created.
70434+
70435+config GRKERNSEC_TPE
70436+ bool "Trusted Path Execution (TPE)"
70437+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
70438+ help
70439+ If you say Y here, you will be able to choose a gid to add to the
70440+ supplementary groups of users you want to mark as "untrusted."
70441+ These users will not be able to execute any files that are not in
70442+ root-owned directories writable only by root. If the sysctl option
70443+ is enabled, a sysctl option with name "tpe" is created.
70444+
70445+config GRKERNSEC_TPE_ALL
70446+ bool "Partially restrict all non-root users"
70447+ depends on GRKERNSEC_TPE
70448+ help
70449+ If you say Y here, all non-root users will be covered under
70450+ a weaker TPE restriction. This is separate from, and in addition to,
70451+ the main TPE options that you have selected elsewhere. Thus, if a
70452+ "trusted" GID is chosen, this restriction applies to even that GID.
70453+ Under this restriction, all non-root users will only be allowed to
70454+ execute files in directories they own that are not group or
70455+ world-writable, or in directories owned by root and writable only by
70456+ root. If the sysctl option is enabled, a sysctl option with name
70457+ "tpe_restrict_all" is created.
70458+
70459+config GRKERNSEC_TPE_INVERT
70460+ bool "Invert GID option"
70461+ depends on GRKERNSEC_TPE
70462+ help
70463+ If you say Y here, the group you specify in the TPE configuration will
70464+ decide what group TPE restrictions will be *disabled* for. This
70465+ option is useful if you want TPE restrictions to be applied to most
70466+ users on the system. If the sysctl option is enabled, a sysctl option
70467+ with name "tpe_invert" is created. Unlike other sysctl options, this
70468+ entry will default to on for backward-compatibility.
70469+
70470+config GRKERNSEC_TPE_GID
70471+ int
70472+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
70473+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
70474+
70475+config GRKERNSEC_TPE_UNTRUSTED_GID
70476+ int "GID for TPE-untrusted users"
70477+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
70478+ default 1005
70479+ help
70480+ Setting this GID determines what group TPE restrictions will be
70481+ *enabled* for. If the sysctl option is enabled, a sysctl option
70482+ with name "tpe_gid" is created.
70483+
70484+config GRKERNSEC_TPE_TRUSTED_GID
70485+ int "GID for TPE-trusted users"
70486+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
70487+ default 1005
70488+ help
70489+ Setting this GID determines what group TPE restrictions will be
70490+ *disabled* for. If the sysctl option is enabled, a sysctl option
70491+ with name "tpe_gid" is created.
70492+
70493+endmenu
70494+menu "Network Protections"
70495+depends on GRKERNSEC
70496+
70497+config GRKERNSEC_BLACKHOLE
70498+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
70499+ default y if GRKERNSEC_CONFIG_AUTO
70500+ depends on NET
70501+ help
70502+ If you say Y here, neither TCP resets nor ICMP
70503+ destination-unreachable packets will be sent in response to packets
70504+ sent to ports for which no associated listening process exists.
70505+ It will also prevent the sending of ICMP protocol unreachable packets
70506+ in response to packets with unknown protocols.
70507+ This feature supports both IPV4 and IPV6 and exempts the
70508+ loopback interface from blackholing. Enabling this feature
70509+ makes a host more resilient to DoS attacks and reduces network
70510+ visibility against scanners.
70511+
70512+ The blackhole feature as-implemented is equivalent to the FreeBSD
70513+ blackhole feature, as it prevents RST responses to all packets, not
70514+ just SYNs. Under most application behavior this causes no
70515+ problems, but applications (like haproxy) may not close certain
70516+ connections in a way that cleanly terminates them on the remote
70517+ end, leaving the remote host in LAST_ACK state. Because of this
70518+ side-effect and to prevent intentional LAST_ACK DoSes, this
70519+ feature also adds automatic mitigation against such attacks.
70520+ The mitigation drastically reduces the amount of time a socket
70521+ can spend in LAST_ACK state. If you're using haproxy and not
70522+ all servers it connects to have this option enabled, consider
70523+ disabling this feature on the haproxy host.
70524+
70525+ If the sysctl option is enabled, two sysctl options with names
70526+ "ip_blackhole" and "lastack_retries" will be created.
70527+ While "ip_blackhole" takes the standard zero/non-zero on/off
70528+ toggle, "lastack_retries" uses the same kinds of values as
70529+ "tcp_retries1" and "tcp_retries2". The default value of 4
70530+ prevents a socket from lasting more than 45 seconds in LAST_ACK
70531+ state.
70532+
70533+config GRKERNSEC_NO_SIMULT_CONNECT
70534+ bool "Disable TCP Simultaneous Connect"
70535+ default y if GRKERNSEC_CONFIG_AUTO
70536+ depends on NET
70537+ help
70538+ If you say Y here, a feature by Willy Tarreau will be enabled that
70539+ removes a weakness in Linux's strict implementation of TCP that
70540+ allows two clients to connect to each other without either entering
70541+ a listening state. The weakness allows an attacker to easily prevent
70542+ a client from connecting to a known server provided the source port
70543+ for the connection is guessed correctly.
70544+
70545+ As the weakness could be used to prevent an antivirus or IPS from
70546+ fetching updates, or prevent an SSL gateway from fetching a CRL,
70547+ it should be eliminated by enabling this option. Though Linux is
70548+ one of few operating systems supporting simultaneous connect, it
70549+ has no legitimate use in practice and is rarely supported by firewalls.
70550+
70551+config GRKERNSEC_SOCKET
70552+ bool "Socket restrictions"
70553+ depends on NET
70554+ help
70555+ If you say Y here, you will be able to choose from several options.
70556+ If you assign a GID on your system and add it to the supplementary
70557+ groups of users you want to restrict socket access to, this patch
70558+ will perform up to three things, based on the option(s) you choose.
70559+
70560+config GRKERNSEC_SOCKET_ALL
70561+ bool "Deny any sockets to group"
70562+ depends on GRKERNSEC_SOCKET
70563+ help
70564+ If you say Y here, you will be able to choose a GID of whose users will
70565+ be unable to connect to other hosts from your machine or run server
70566+ applications from your machine. If the sysctl option is enabled, a
70567+ sysctl option with name "socket_all" is created.
70568+
70569+config GRKERNSEC_SOCKET_ALL_GID
70570+ int "GID to deny all sockets for"
70571+ depends on GRKERNSEC_SOCKET_ALL
70572+ default 1004
70573+ help
70574+ Here you can choose the GID to disable socket access for. Remember to
70575+ add the users you want socket access disabled for to the GID
70576+ specified here. If the sysctl option is enabled, a sysctl option
70577+ with name "socket_all_gid" is created.
70578+
70579+config GRKERNSEC_SOCKET_CLIENT
70580+ bool "Deny client sockets to group"
70581+ depends on GRKERNSEC_SOCKET
70582+ help
70583+ If you say Y here, you will be able to choose a GID of whose users will
70584+ be unable to connect to other hosts from your machine, but will be
70585+ able to run servers. If this option is enabled, all users in the group
70586+ you specify will have to use passive mode when initiating ftp transfers
70587+ from the shell on your machine. If the sysctl option is enabled, a
70588+ sysctl option with name "socket_client" is created.
70589+
70590+config GRKERNSEC_SOCKET_CLIENT_GID
70591+ int "GID to deny client sockets for"
70592+ depends on GRKERNSEC_SOCKET_CLIENT
70593+ default 1003
70594+ help
70595+ Here you can choose the GID to disable client socket access for.
70596+ Remember to add the users you want client socket access disabled for to
70597+ the GID specified here. If the sysctl option is enabled, a sysctl
70598+ option with name "socket_client_gid" is created.
70599+
70600+config GRKERNSEC_SOCKET_SERVER
70601+ bool "Deny server sockets to group"
70602+ depends on GRKERNSEC_SOCKET
70603+ help
70604+ If you say Y here, you will be able to choose a GID of whose users will
70605+ be unable to run server applications from your machine. If the sysctl
70606+ option is enabled, a sysctl option with name "socket_server" is created.
70607+
70608+config GRKERNSEC_SOCKET_SERVER_GID
70609+ int "GID to deny server sockets for"
70610+ depends on GRKERNSEC_SOCKET_SERVER
70611+ default 1002
70612+ help
70613+ Here you can choose the GID to disable server socket access for.
70614+ Remember to add the users you want server socket access disabled for to
70615+ the GID specified here. If the sysctl option is enabled, a sysctl
70616+ option with name "socket_server_gid" is created.
70617+
70618+endmenu
70619+
70620+menu "Physical Protections"
70621+depends on GRKERNSEC
70622+
70623+config GRKERNSEC_DENYUSB
70624+ bool "Deny new USB connections after toggle"
70625+ default y if GRKERNSEC_CONFIG_AUTO
70626+ depends on SYSCTL && USB_SUPPORT
70627+ help
70628+ If you say Y here, a new sysctl option with name "deny_new_usb"
70629+ will be created. Setting its value to 1 will prevent any new
70630+ USB devices from being recognized by the OS. Any attempted USB
70631+ device insertion will be logged. This option is intended to be
70632+ used against custom USB devices designed to exploit vulnerabilities
70633+ in various USB device drivers.
70634+
70635+ For greatest effectiveness, this sysctl should be set after any
70636+ relevant init scripts. This option is safe to enable in distros
70637+ as each user can choose whether or not to toggle the sysctl.
70638+
70639+config GRKERNSEC_DENYUSB_FORCE
70640+ bool "Reject all USB devices not connected at boot"
70641+ select USB
70642+ depends on GRKERNSEC_DENYUSB
70643+ help
70644+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
70645+ that doesn't involve a sysctl entry. This option should only be
70646+ enabled if you're sure you want to deny all new USB connections
70647+ at runtime and don't want to modify init scripts. This should not
70648+ be enabled by distros. It forces the core USB code to be built
70649+ into the kernel image so that all devices connected at boot time
70650+ can be recognized and new USB device connections can be prevented
70651+ prior to init running.
70652+
70653+endmenu
70654+
70655+menu "Sysctl Support"
70656+depends on GRKERNSEC && SYSCTL
70657+
70658+config GRKERNSEC_SYSCTL
70659+ bool "Sysctl support"
70660+ default y if GRKERNSEC_CONFIG_AUTO
70661+ help
70662+ If you say Y here, you will be able to change the options that
70663+ grsecurity runs with at bootup, without having to recompile your
70664+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
70665+ to enable (1) or disable (0) various features. All the sysctl entries
70666+ are mutable until the "grsec_lock" entry is set to a non-zero value.
70667+ All features enabled in the kernel configuration are disabled at boot
70668+ if you do not say Y to the "Turn on features by default" option.
70669+ All options should be set at startup, and the grsec_lock entry should
70670+ be set to a non-zero value after all the options are set.
70671+ *THIS IS EXTREMELY IMPORTANT*
70672+
70673+config GRKERNSEC_SYSCTL_DISTRO
70674+ bool "Extra sysctl support for distro makers (READ HELP)"
70675+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
70676+ help
70677+ If you say Y here, additional sysctl options will be created
70678+ for features that affect processes running as root. Therefore,
70679+ it is critical when using this option that the grsec_lock entry be
70680+ enabled after boot. Only distros with prebuilt kernel packages
70681+ with this option enabled that can ensure grsec_lock is enabled
70682+ after boot should use this option.
70683+ *Failure to set grsec_lock after boot makes all grsec features
70684+ this option covers useless*
70685+
70686+ Currently this option creates the following sysctl entries:
70687+ "Disable Privileged I/O": "disable_priv_io"
70688+
70689+config GRKERNSEC_SYSCTL_ON
70690+ bool "Turn on features by default"
70691+ default y if GRKERNSEC_CONFIG_AUTO
70692+ depends on GRKERNSEC_SYSCTL
70693+ help
70694+ If you say Y here, instead of having all features enabled in the
70695+ kernel configuration disabled at boot time, the features will be
70696+ enabled at boot time. It is recommended you say Y here unless
70697+ there is some reason you would want all sysctl-tunable features to
70698+ be disabled by default. As mentioned elsewhere, it is important
70699+ to enable the grsec_lock entry once you have finished modifying
70700+ the sysctl entries.
70701+
70702+endmenu
70703+menu "Logging Options"
70704+depends on GRKERNSEC
70705+
70706+config GRKERNSEC_FLOODTIME
70707+ int "Seconds in between log messages (minimum)"
70708+ default 10
70709+ help
70710+ This option allows you to enforce the number of seconds between
70711+ grsecurity log messages. The default should be suitable for most
70712+ people, however, if you choose to change it, choose a value small enough
70713+ to allow informative logs to be produced, but large enough to
70714+ prevent flooding.
70715+
70716+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
70717+ any rate limiting on grsecurity log messages.
70718+
70719+config GRKERNSEC_FLOODBURST
70720+ int "Number of messages in a burst (maximum)"
70721+ default 6
70722+ help
70723+ This option allows you to choose the maximum number of messages allowed
70724+ within the flood time interval you chose in a separate option. The
70725+ default should be suitable for most people, however if you find that
70726+ many of your logs are being interpreted as flooding, you may want to
70727+ raise this value.
70728+
70729+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
70730+ any rate limiting on grsecurity log messages.
70731+
70732+endmenu
70733diff --git a/grsecurity/Makefile b/grsecurity/Makefile
70734new file mode 100644
70735index 0000000..30ababb
70736--- /dev/null
70737+++ b/grsecurity/Makefile
70738@@ -0,0 +1,54 @@
70739+# grsecurity – access control and security hardening for Linux
70740+# All code in this directory and various hooks located throughout the Linux kernel are
70741+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
70742+# http://www.grsecurity.net spender@grsecurity.net
70743+#
70744+# This program is free software; you can redistribute it and/or
70745+# modify it under the terms of the GNU General Public License version 2
70746+# as published by the Free Software Foundation.
70747+#
70748+# This program is distributed in the hope that it will be useful,
70749+# but WITHOUT ANY WARRANTY; without even the implied warranty of
70750+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
70751+# GNU General Public License for more details.
70752+#
70753+# You should have received a copy of the GNU General Public License
70754+# along with this program; if not, write to the Free Software
70755+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
70756+
70757+KBUILD_CFLAGS += -Werror
70758+
70759+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
70760+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
70761+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
70762+ grsec_usb.o grsec_ipc.o grsec_proc.o
70763+
70764+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
70765+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
70766+ gracl_learn.o grsec_log.o gracl_policy.o
70767+ifdef CONFIG_COMPAT
70768+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
70769+endif
70770+
70771+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
70772+
70773+ifdef CONFIG_NET
70774+obj-y += grsec_sock.o
70775+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
70776+endif
70777+
70778+ifndef CONFIG_GRKERNSEC
70779+obj-y += grsec_disabled.o
70780+endif
70781+
70782+ifdef CONFIG_GRKERNSEC_HIDESYM
70783+extra-y := grsec_hidesym.o
70784+$(obj)/grsec_hidesym.o:
70785+ @-chmod -f 500 /boot
70786+ @-chmod -f 500 /lib/modules
70787+ @-chmod -f 500 /lib64/modules
70788+ @-chmod -f 500 /lib32/modules
70789+ @-chmod -f 700 .
70790+ @-chmod -f 700 $(objtree)
70791+ @echo ' grsec: protected kernel image paths'
70792+endif
70793diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
70794new file mode 100644
70795index 0000000..f83bf15
70796--- /dev/null
70797+++ b/grsecurity/gracl.c
70798@@ -0,0 +1,2749 @@
70799+#include <linux/kernel.h>
70800+#include <linux/module.h>
70801+#include <linux/sched.h>
70802+#include <linux/mm.h>
70803+#include <linux/file.h>
70804+#include <linux/fs.h>
70805+#include <linux/namei.h>
70806+#include <linux/mount.h>
70807+#include <linux/tty.h>
70808+#include <linux/proc_fs.h>
70809+#include <linux/lglock.h>
70810+#include <linux/slab.h>
70811+#include <linux/vmalloc.h>
70812+#include <linux/types.h>
70813+#include <linux/sysctl.h>
70814+#include <linux/netdevice.h>
70815+#include <linux/ptrace.h>
70816+#include <linux/gracl.h>
70817+#include <linux/gralloc.h>
70818+#include <linux/security.h>
70819+#include <linux/grinternal.h>
70820+#include <linux/pid_namespace.h>
70821+#include <linux/stop_machine.h>
70822+#include <linux/fdtable.h>
70823+#include <linux/percpu.h>
70824+#include <linux/lglock.h>
70825+#include <linux/hugetlb.h>
70826+#include <linux/posix-timers.h>
70827+#include <linux/prefetch.h>
70828+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70829+#include <linux/magic.h>
70830+#include <linux/pagemap.h>
70831+#include "../fs/btrfs/async-thread.h"
70832+#include "../fs/btrfs/ctree.h"
70833+#include "../fs/btrfs/btrfs_inode.h"
70834+#endif
70835+#include "../fs/mount.h"
70836+
70837+#include <asm/uaccess.h>
70838+#include <asm/errno.h>
70839+#include <asm/mman.h>
70840+
70841+#define FOR_EACH_ROLE_START(role) \
70842+ role = running_polstate.role_list; \
70843+ while (role) {
70844+
70845+#define FOR_EACH_ROLE_END(role) \
70846+ role = role->prev; \
70847+ }
70848+
70849+extern struct path gr_real_root;
70850+
70851+static struct gr_policy_state running_polstate;
70852+struct gr_policy_state *polstate = &running_polstate;
70853+extern struct gr_alloc_state *current_alloc_state;
70854+
70855+extern char *gr_shared_page[4];
70856+DEFINE_RWLOCK(gr_inode_lock);
70857+
70858+static unsigned int gr_status __read_only = GR_STATUS_INIT;
70859+
70860+#ifdef CONFIG_NET
70861+extern struct vfsmount *sock_mnt;
70862+#endif
70863+
70864+extern struct vfsmount *pipe_mnt;
70865+extern struct vfsmount *shm_mnt;
70866+
70867+#ifdef CONFIG_HUGETLBFS
70868+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
70869+#endif
70870+
70871+extern u16 acl_sp_role_value;
70872+extern struct acl_object_label *fakefs_obj_rw;
70873+extern struct acl_object_label *fakefs_obj_rwx;
70874+
70875+int gr_acl_is_enabled(void)
70876+{
70877+ return (gr_status & GR_READY);
70878+}
70879+
70880+void gr_enable_rbac_system(void)
70881+{
70882+ pax_open_kernel();
70883+ gr_status |= GR_READY;
70884+ pax_close_kernel();
70885+}
70886+
70887+int gr_rbac_disable(void *unused)
70888+{
70889+ pax_open_kernel();
70890+ gr_status &= ~GR_READY;
70891+ pax_close_kernel();
70892+
70893+ return 0;
70894+}
70895+
70896+static inline dev_t __get_dev(const struct dentry *dentry)
70897+{
70898+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70899+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
70900+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
70901+ else
70902+#endif
70903+ return dentry->d_sb->s_dev;
70904+}
70905+
70906+static inline u64 __get_ino(const struct dentry *dentry)
70907+{
70908+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70909+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
70910+ return btrfs_ino(dentry->d_inode);
70911+ else
70912+#endif
70913+ return dentry->d_inode->i_ino;
70914+}
70915+
70916+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
70917+{
70918+ return __get_dev(dentry);
70919+}
70920+
70921+u64 gr_get_ino_from_dentry(struct dentry *dentry)
70922+{
70923+ return __get_ino(dentry);
70924+}
70925+
70926+static char gr_task_roletype_to_char(struct task_struct *task)
70927+{
70928+ switch (task->role->roletype &
70929+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
70930+ GR_ROLE_SPECIAL)) {
70931+ case GR_ROLE_DEFAULT:
70932+ return 'D';
70933+ case GR_ROLE_USER:
70934+ return 'U';
70935+ case GR_ROLE_GROUP:
70936+ return 'G';
70937+ case GR_ROLE_SPECIAL:
70938+ return 'S';
70939+ }
70940+
70941+ return 'X';
70942+}
70943+
70944+char gr_roletype_to_char(void)
70945+{
70946+ return gr_task_roletype_to_char(current);
70947+}
70948+
70949+__inline__ int
70950+gr_acl_tpe_check(void)
70951+{
70952+ if (unlikely(!(gr_status & GR_READY)))
70953+ return 0;
70954+ if (current->role->roletype & GR_ROLE_TPE)
70955+ return 1;
70956+ else
70957+ return 0;
70958+}
70959+
70960+int
70961+gr_handle_rawio(const struct inode *inode)
70962+{
70963+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70964+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
70965+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
70966+ !capable(CAP_SYS_RAWIO))
70967+ return 1;
70968+#endif
70969+ return 0;
70970+}
70971+
70972+int
70973+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
70974+{
70975+ if (likely(lena != lenb))
70976+ return 0;
70977+
70978+ return !memcmp(a, b, lena);
70979+}
70980+
70981+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
70982+{
70983+ *buflen -= namelen;
70984+ if (*buflen < 0)
70985+ return -ENAMETOOLONG;
70986+ *buffer -= namelen;
70987+ memcpy(*buffer, str, namelen);
70988+ return 0;
70989+}
70990+
70991+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
70992+{
70993+ return prepend(buffer, buflen, name->name, name->len);
70994+}
70995+
70996+static int prepend_path(const struct path *path, struct path *root,
70997+ char **buffer, int *buflen)
70998+{
70999+ struct dentry *dentry = path->dentry;
71000+ struct vfsmount *vfsmnt = path->mnt;
71001+ struct mount *mnt = real_mount(vfsmnt);
71002+ bool slash = false;
71003+ int error = 0;
71004+
71005+ while (dentry != root->dentry || vfsmnt != root->mnt) {
71006+ struct dentry * parent;
71007+
71008+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
71009+ /* Global root? */
71010+ if (!mnt_has_parent(mnt)) {
71011+ goto out;
71012+ }
71013+ dentry = mnt->mnt_mountpoint;
71014+ mnt = mnt->mnt_parent;
71015+ vfsmnt = &mnt->mnt;
71016+ continue;
71017+ }
71018+ parent = dentry->d_parent;
71019+ prefetch(parent);
71020+ spin_lock(&dentry->d_lock);
71021+ error = prepend_name(buffer, buflen, &dentry->d_name);
71022+ spin_unlock(&dentry->d_lock);
71023+ if (!error)
71024+ error = prepend(buffer, buflen, "/", 1);
71025+ if (error)
71026+ break;
71027+
71028+ slash = true;
71029+ dentry = parent;
71030+ }
71031+
71032+out:
71033+ if (!error && !slash)
71034+ error = prepend(buffer, buflen, "/", 1);
71035+
71036+ return error;
71037+}
71038+
71039+/* this must be called with mount_lock and rename_lock held */
71040+
71041+static char *__our_d_path(const struct path *path, struct path *root,
71042+ char *buf, int buflen)
71043+{
71044+ char *res = buf + buflen;
71045+ int error;
71046+
71047+ prepend(&res, &buflen, "\0", 1);
71048+ error = prepend_path(path, root, &res, &buflen);
71049+ if (error)
71050+ return ERR_PTR(error);
71051+
71052+ return res;
71053+}
71054+
71055+static char *
71056+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
71057+{
71058+ char *retval;
71059+
71060+ retval = __our_d_path(path, root, buf, buflen);
71061+ if (unlikely(IS_ERR(retval)))
71062+ retval = strcpy(buf, "<path too long>");
71063+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
71064+ retval[1] = '\0';
71065+
71066+ return retval;
71067+}
71068+
71069+static char *
71070+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
71071+ char *buf, int buflen)
71072+{
71073+ struct path path;
71074+ char *res;
71075+
71076+ path.dentry = (struct dentry *)dentry;
71077+ path.mnt = (struct vfsmount *)vfsmnt;
71078+
71079+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
71080+ by the RBAC system */
71081+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
71082+
71083+ return res;
71084+}
71085+
71086+static char *
71087+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
71088+ char *buf, int buflen)
71089+{
71090+ char *res;
71091+ struct path path;
71092+ struct path root;
71093+ struct task_struct *reaper = init_pid_ns.child_reaper;
71094+
71095+ path.dentry = (struct dentry *)dentry;
71096+ path.mnt = (struct vfsmount *)vfsmnt;
71097+
71098+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
71099+ get_fs_root(reaper->fs, &root);
71100+
71101+ read_seqlock_excl(&mount_lock);
71102+ write_seqlock(&rename_lock);
71103+ res = gen_full_path(&path, &root, buf, buflen);
71104+ write_sequnlock(&rename_lock);
71105+ read_sequnlock_excl(&mount_lock);
71106+
71107+ path_put(&root);
71108+ return res;
71109+}
71110+
71111+char *
71112+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
71113+{
71114+ char *ret;
71115+ read_seqlock_excl(&mount_lock);
71116+ write_seqlock(&rename_lock);
71117+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
71118+ PAGE_SIZE);
71119+ write_sequnlock(&rename_lock);
71120+ read_sequnlock_excl(&mount_lock);
71121+ return ret;
71122+}
71123+
71124+static char *
71125+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
71126+{
71127+ char *ret;
71128+ char *buf;
71129+ int buflen;
71130+
71131+ read_seqlock_excl(&mount_lock);
71132+ write_seqlock(&rename_lock);
71133+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
71134+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
71135+ buflen = (int)(ret - buf);
71136+ if (buflen >= 5)
71137+ prepend(&ret, &buflen, "/proc", 5);
71138+ else
71139+ ret = strcpy(buf, "<path too long>");
71140+ write_sequnlock(&rename_lock);
71141+ read_sequnlock_excl(&mount_lock);
71142+ return ret;
71143+}
71144+
71145+char *
71146+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
71147+{
71148+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
71149+ PAGE_SIZE);
71150+}
71151+
71152+char *
71153+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
71154+{
71155+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
71156+ PAGE_SIZE);
71157+}
71158+
71159+char *
71160+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
71161+{
71162+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
71163+ PAGE_SIZE);
71164+}
71165+
71166+char *
71167+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
71168+{
71169+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
71170+ PAGE_SIZE);
71171+}
71172+
71173+char *
71174+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
71175+{
71176+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
71177+ PAGE_SIZE);
71178+}
71179+
71180+__inline__ __u32
71181+to_gr_audit(const __u32 reqmode)
71182+{
71183+ /* masks off auditable permission flags, then shifts them to create
71184+ auditing flags, and adds the special case of append auditing if
71185+ we're requesting write */
71186+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
71187+}
71188+
71189+struct acl_role_label *
71190+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
71191+ const gid_t gid)
71192+{
71193+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
71194+ struct acl_role_label *match;
71195+ struct role_allowed_ip *ipp;
71196+ unsigned int x;
71197+ u32 curr_ip = task->signal->saved_ip;
71198+
71199+ match = state->acl_role_set.r_hash[index];
71200+
71201+ while (match) {
71202+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
71203+ for (x = 0; x < match->domain_child_num; x++) {
71204+ if (match->domain_children[x] == uid)
71205+ goto found;
71206+ }
71207+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
71208+ break;
71209+ match = match->next;
71210+ }
71211+found:
71212+ if (match == NULL) {
71213+ try_group:
71214+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
71215+ match = state->acl_role_set.r_hash[index];
71216+
71217+ while (match) {
71218+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
71219+ for (x = 0; x < match->domain_child_num; x++) {
71220+ if (match->domain_children[x] == gid)
71221+ goto found2;
71222+ }
71223+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
71224+ break;
71225+ match = match->next;
71226+ }
71227+found2:
71228+ if (match == NULL)
71229+ match = state->default_role;
71230+ if (match->allowed_ips == NULL)
71231+ return match;
71232+ else {
71233+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
71234+ if (likely
71235+ ((ntohl(curr_ip) & ipp->netmask) ==
71236+ (ntohl(ipp->addr) & ipp->netmask)))
71237+ return match;
71238+ }
71239+ match = state->default_role;
71240+ }
71241+ } else if (match->allowed_ips == NULL) {
71242+ return match;
71243+ } else {
71244+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
71245+ if (likely
71246+ ((ntohl(curr_ip) & ipp->netmask) ==
71247+ (ntohl(ipp->addr) & ipp->netmask)))
71248+ return match;
71249+ }
71250+ goto try_group;
71251+ }
71252+
71253+ return match;
71254+}
71255+
71256+static struct acl_role_label *
71257+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
71258+ const gid_t gid)
71259+{
71260+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
71261+}
71262+
71263+struct acl_subject_label *
71264+lookup_acl_subj_label(const u64 ino, const dev_t dev,
71265+ const struct acl_role_label *role)
71266+{
71267+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
71268+ struct acl_subject_label *match;
71269+
71270+ match = role->subj_hash[index];
71271+
71272+ while (match && (match->inode != ino || match->device != dev ||
71273+ (match->mode & GR_DELETED))) {
71274+ match = match->next;
71275+ }
71276+
71277+ if (match && !(match->mode & GR_DELETED))
71278+ return match;
71279+ else
71280+ return NULL;
71281+}
71282+
71283+struct acl_subject_label *
71284+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
71285+ const struct acl_role_label *role)
71286+{
71287+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
71288+ struct acl_subject_label *match;
71289+
71290+ match = role->subj_hash[index];
71291+
71292+ while (match && (match->inode != ino || match->device != dev ||
71293+ !(match->mode & GR_DELETED))) {
71294+ match = match->next;
71295+ }
71296+
71297+ if (match && (match->mode & GR_DELETED))
71298+ return match;
71299+ else
71300+ return NULL;
71301+}
71302+
71303+static struct acl_object_label *
71304+lookup_acl_obj_label(const u64 ino, const dev_t dev,
71305+ const struct acl_subject_label *subj)
71306+{
71307+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
71308+ struct acl_object_label *match;
71309+
71310+ match = subj->obj_hash[index];
71311+
71312+ while (match && (match->inode != ino || match->device != dev ||
71313+ (match->mode & GR_DELETED))) {
71314+ match = match->next;
71315+ }
71316+
71317+ if (match && !(match->mode & GR_DELETED))
71318+ return match;
71319+ else
71320+ return NULL;
71321+}
71322+
71323+static struct acl_object_label *
71324+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
71325+ const struct acl_subject_label *subj)
71326+{
71327+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
71328+ struct acl_object_label *match;
71329+
71330+ match = subj->obj_hash[index];
71331+
71332+ while (match && (match->inode != ino || match->device != dev ||
71333+ !(match->mode & GR_DELETED))) {
71334+ match = match->next;
71335+ }
71336+
71337+ if (match && (match->mode & GR_DELETED))
71338+ return match;
71339+
71340+ match = subj->obj_hash[index];
71341+
71342+ while (match && (match->inode != ino || match->device != dev ||
71343+ (match->mode & GR_DELETED))) {
71344+ match = match->next;
71345+ }
71346+
71347+ if (match && !(match->mode & GR_DELETED))
71348+ return match;
71349+ else
71350+ return NULL;
71351+}
71352+
71353+struct name_entry *
71354+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
71355+{
71356+ unsigned int len = strlen(name);
71357+ unsigned int key = full_name_hash(name, len);
71358+ unsigned int index = key % state->name_set.n_size;
71359+ struct name_entry *match;
71360+
71361+ match = state->name_set.n_hash[index];
71362+
71363+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
71364+ match = match->next;
71365+
71366+ return match;
71367+}
71368+
71369+static struct name_entry *
71370+lookup_name_entry(const char *name)
71371+{
71372+ return __lookup_name_entry(&running_polstate, name);
71373+}
71374+
71375+static struct name_entry *
71376+lookup_name_entry_create(const char *name)
71377+{
71378+ unsigned int len = strlen(name);
71379+ unsigned int key = full_name_hash(name, len);
71380+ unsigned int index = key % running_polstate.name_set.n_size;
71381+ struct name_entry *match;
71382+
71383+ match = running_polstate.name_set.n_hash[index];
71384+
71385+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
71386+ !match->deleted))
71387+ match = match->next;
71388+
71389+ if (match && match->deleted)
71390+ return match;
71391+
71392+ match = running_polstate.name_set.n_hash[index];
71393+
71394+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
71395+ match->deleted))
71396+ match = match->next;
71397+
71398+ if (match && !match->deleted)
71399+ return match;
71400+ else
71401+ return NULL;
71402+}
71403+
71404+static struct inodev_entry *
71405+lookup_inodev_entry(const u64 ino, const dev_t dev)
71406+{
71407+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
71408+ struct inodev_entry *match;
71409+
71410+ match = running_polstate.inodev_set.i_hash[index];
71411+
71412+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
71413+ match = match->next;
71414+
71415+ return match;
71416+}
71417+
71418+void
71419+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
71420+{
71421+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
71422+ state->inodev_set.i_size);
71423+ struct inodev_entry **curr;
71424+
71425+ entry->prev = NULL;
71426+
71427+ curr = &state->inodev_set.i_hash[index];
71428+ if (*curr != NULL)
71429+ (*curr)->prev = entry;
71430+
71431+ entry->next = *curr;
71432+ *curr = entry;
71433+
71434+ return;
71435+}
71436+
71437+static void
71438+insert_inodev_entry(struct inodev_entry *entry)
71439+{
71440+ __insert_inodev_entry(&running_polstate, entry);
71441+}
71442+
71443+void
71444+insert_acl_obj_label(struct acl_object_label *obj,
71445+ struct acl_subject_label *subj)
71446+{
71447+ unsigned int index =
71448+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
71449+ struct acl_object_label **curr;
71450+
71451+ obj->prev = NULL;
71452+
71453+ curr = &subj->obj_hash[index];
71454+ if (*curr != NULL)
71455+ (*curr)->prev = obj;
71456+
71457+ obj->next = *curr;
71458+ *curr = obj;
71459+
71460+ return;
71461+}
71462+
71463+void
71464+insert_acl_subj_label(struct acl_subject_label *obj,
71465+ struct acl_role_label *role)
71466+{
71467+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
71468+ struct acl_subject_label **curr;
71469+
71470+ obj->prev = NULL;
71471+
71472+ curr = &role->subj_hash[index];
71473+ if (*curr != NULL)
71474+ (*curr)->prev = obj;
71475+
71476+ obj->next = *curr;
71477+ *curr = obj;
71478+
71479+ return;
71480+}
71481+
71482+/* derived from glibc fnmatch() 0: match, 1: no match*/
71483+
71484+static int
71485+glob_match(const char *p, const char *n)
71486+{
71487+ char c;
71488+
71489+ while ((c = *p++) != '\0') {
71490+ switch (c) {
71491+ case '?':
71492+ if (*n == '\0')
71493+ return 1;
71494+ else if (*n == '/')
71495+ return 1;
71496+ break;
71497+ case '\\':
71498+ if (*n != c)
71499+ return 1;
71500+ break;
71501+ case '*':
71502+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
71503+ if (*n == '/')
71504+ return 1;
71505+ else if (c == '?') {
71506+ if (*n == '\0')
71507+ return 1;
71508+ else
71509+ ++n;
71510+ }
71511+ }
71512+ if (c == '\0') {
71513+ return 0;
71514+ } else {
71515+ const char *endp;
71516+
71517+ if ((endp = strchr(n, '/')) == NULL)
71518+ endp = n + strlen(n);
71519+
71520+ if (c == '[') {
71521+ for (--p; n < endp; ++n)
71522+ if (!glob_match(p, n))
71523+ return 0;
71524+ } else if (c == '/') {
71525+ while (*n != '\0' && *n != '/')
71526+ ++n;
71527+ if (*n == '/' && !glob_match(p, n + 1))
71528+ return 0;
71529+ } else {
71530+ for (--p; n < endp; ++n)
71531+ if (*n == c && !glob_match(p, n))
71532+ return 0;
71533+ }
71534+
71535+ return 1;
71536+ }
71537+ case '[':
71538+ {
71539+ int not;
71540+ char cold;
71541+
71542+ if (*n == '\0' || *n == '/')
71543+ return 1;
71544+
71545+ not = (*p == '!' || *p == '^');
71546+ if (not)
71547+ ++p;
71548+
71549+ c = *p++;
71550+ for (;;) {
71551+ unsigned char fn = (unsigned char)*n;
71552+
71553+ if (c == '\0')
71554+ return 1;
71555+ else {
71556+ if (c == fn)
71557+ goto matched;
71558+ cold = c;
71559+ c = *p++;
71560+
71561+ if (c == '-' && *p != ']') {
71562+ unsigned char cend = *p++;
71563+
71564+ if (cend == '\0')
71565+ return 1;
71566+
71567+ if (cold <= fn && fn <= cend)
71568+ goto matched;
71569+
71570+ c = *p++;
71571+ }
71572+ }
71573+
71574+ if (c == ']')
71575+ break;
71576+ }
71577+ if (!not)
71578+ return 1;
71579+ break;
71580+ matched:
71581+ while (c != ']') {
71582+ if (c == '\0')
71583+ return 1;
71584+
71585+ c = *p++;
71586+ }
71587+ if (not)
71588+ return 1;
71589+ }
71590+ break;
71591+ default:
71592+ if (c != *n)
71593+ return 1;
71594+ }
71595+
71596+ ++n;
71597+ }
71598+
71599+ if (*n == '\0')
71600+ return 0;
71601+
71602+ if (*n == '/')
71603+ return 0;
71604+
71605+ return 1;
71606+}
71607+
71608+static struct acl_object_label *
71609+chk_glob_label(struct acl_object_label *globbed,
71610+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
71611+{
71612+ struct acl_object_label *tmp;
71613+
71614+ if (*path == NULL)
71615+ *path = gr_to_filename_nolock(dentry, mnt);
71616+
71617+ tmp = globbed;
71618+
71619+ while (tmp) {
71620+ if (!glob_match(tmp->filename, *path))
71621+ return tmp;
71622+ tmp = tmp->next;
71623+ }
71624+
71625+ return NULL;
71626+}
71627+
71628+static struct acl_object_label *
71629+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71630+ const u64 curr_ino, const dev_t curr_dev,
71631+ const struct acl_subject_label *subj, char **path, const int checkglob)
71632+{
71633+ struct acl_subject_label *tmpsubj;
71634+ struct acl_object_label *retval;
71635+ struct acl_object_label *retval2;
71636+
71637+ tmpsubj = (struct acl_subject_label *) subj;
71638+ read_lock(&gr_inode_lock);
71639+ do {
71640+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
71641+ if (retval) {
71642+ if (checkglob && retval->globbed) {
71643+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
71644+ if (retval2)
71645+ retval = retval2;
71646+ }
71647+ break;
71648+ }
71649+ } while ((tmpsubj = tmpsubj->parent_subject));
71650+ read_unlock(&gr_inode_lock);
71651+
71652+ return retval;
71653+}
71654+
71655+static __inline__ struct acl_object_label *
71656+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71657+ struct dentry *curr_dentry,
71658+ const struct acl_subject_label *subj, char **path, const int checkglob)
71659+{
71660+ int newglob = checkglob;
71661+ u64 inode;
71662+ dev_t device;
71663+
71664+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
71665+ as we don't want a / * rule to match instead of the / object
71666+ don't do this for create lookups that call this function though, since they're looking up
71667+ on the parent and thus need globbing checks on all paths
71668+ */
71669+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
71670+ newglob = GR_NO_GLOB;
71671+
71672+ spin_lock(&curr_dentry->d_lock);
71673+ inode = __get_ino(curr_dentry);
71674+ device = __get_dev(curr_dentry);
71675+ spin_unlock(&curr_dentry->d_lock);
71676+
71677+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
71678+}
71679+
71680+#ifdef CONFIG_HUGETLBFS
71681+static inline bool
71682+is_hugetlbfs_mnt(const struct vfsmount *mnt)
71683+{
71684+ int i;
71685+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
71686+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
71687+ return true;
71688+ }
71689+
71690+ return false;
71691+}
71692+#endif
71693+
71694+static struct acl_object_label *
71695+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71696+ const struct acl_subject_label *subj, char *path, const int checkglob)
71697+{
71698+ struct dentry *dentry = (struct dentry *) l_dentry;
71699+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71700+ struct mount *real_mnt = real_mount(mnt);
71701+ struct acl_object_label *retval;
71702+ struct dentry *parent;
71703+
71704+ read_seqlock_excl(&mount_lock);
71705+ write_seqlock(&rename_lock);
71706+
71707+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
71708+#ifdef CONFIG_NET
71709+ mnt == sock_mnt ||
71710+#endif
71711+#ifdef CONFIG_HUGETLBFS
71712+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
71713+#endif
71714+ /* ignore Eric Biederman */
71715+ IS_PRIVATE(l_dentry->d_inode))) {
71716+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
71717+ goto out;
71718+ }
71719+
71720+ for (;;) {
71721+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71722+ break;
71723+
71724+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
71725+ if (!mnt_has_parent(real_mnt))
71726+ break;
71727+
71728+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71729+ if (retval != NULL)
71730+ goto out;
71731+
71732+ dentry = real_mnt->mnt_mountpoint;
71733+ real_mnt = real_mnt->mnt_parent;
71734+ mnt = &real_mnt->mnt;
71735+ continue;
71736+ }
71737+
71738+ parent = dentry->d_parent;
71739+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71740+ if (retval != NULL)
71741+ goto out;
71742+
71743+ dentry = parent;
71744+ }
71745+
71746+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71747+
71748+ /* gr_real_root is pinned so we don't have to hold a reference */
71749+ if (retval == NULL)
71750+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
71751+out:
71752+ write_sequnlock(&rename_lock);
71753+ read_sequnlock_excl(&mount_lock);
71754+
71755+ BUG_ON(retval == NULL);
71756+
71757+ return retval;
71758+}
71759+
71760+static __inline__ struct acl_object_label *
71761+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71762+ const struct acl_subject_label *subj)
71763+{
71764+ char *path = NULL;
71765+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
71766+}
71767+
71768+static __inline__ struct acl_object_label *
71769+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71770+ const struct acl_subject_label *subj)
71771+{
71772+ char *path = NULL;
71773+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
71774+}
71775+
71776+static __inline__ struct acl_object_label *
71777+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71778+ const struct acl_subject_label *subj, char *path)
71779+{
71780+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
71781+}
71782+
71783+struct acl_subject_label *
71784+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71785+ const struct acl_role_label *role)
71786+{
71787+ struct dentry *dentry = (struct dentry *) l_dentry;
71788+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71789+ struct mount *real_mnt = real_mount(mnt);
71790+ struct acl_subject_label *retval;
71791+ struct dentry *parent;
71792+
71793+ read_seqlock_excl(&mount_lock);
71794+ write_seqlock(&rename_lock);
71795+
71796+ for (;;) {
71797+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71798+ break;
71799+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
71800+ if (!mnt_has_parent(real_mnt))
71801+ break;
71802+
71803+ spin_lock(&dentry->d_lock);
71804+ read_lock(&gr_inode_lock);
71805+ retval =
71806+ lookup_acl_subj_label(__get_ino(dentry),
71807+ __get_dev(dentry), role);
71808+ read_unlock(&gr_inode_lock);
71809+ spin_unlock(&dentry->d_lock);
71810+ if (retval != NULL)
71811+ goto out;
71812+
71813+ dentry = real_mnt->mnt_mountpoint;
71814+ real_mnt = real_mnt->mnt_parent;
71815+ mnt = &real_mnt->mnt;
71816+ continue;
71817+ }
71818+
71819+ spin_lock(&dentry->d_lock);
71820+ read_lock(&gr_inode_lock);
71821+ retval = lookup_acl_subj_label(__get_ino(dentry),
71822+ __get_dev(dentry), role);
71823+ read_unlock(&gr_inode_lock);
71824+ parent = dentry->d_parent;
71825+ spin_unlock(&dentry->d_lock);
71826+
71827+ if (retval != NULL)
71828+ goto out;
71829+
71830+ dentry = parent;
71831+ }
71832+
71833+ spin_lock(&dentry->d_lock);
71834+ read_lock(&gr_inode_lock);
71835+ retval = lookup_acl_subj_label(__get_ino(dentry),
71836+ __get_dev(dentry), role);
71837+ read_unlock(&gr_inode_lock);
71838+ spin_unlock(&dentry->d_lock);
71839+
71840+ if (unlikely(retval == NULL)) {
71841+ /* gr_real_root is pinned, we don't need to hold a reference */
71842+ read_lock(&gr_inode_lock);
71843+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
71844+ __get_dev(gr_real_root.dentry), role);
71845+ read_unlock(&gr_inode_lock);
71846+ }
71847+out:
71848+ write_sequnlock(&rename_lock);
71849+ read_sequnlock_excl(&mount_lock);
71850+
71851+ BUG_ON(retval == NULL);
71852+
71853+ return retval;
71854+}
71855+
71856+void
71857+assign_special_role(const char *rolename)
71858+{
71859+ struct acl_object_label *obj;
71860+ struct acl_role_label *r;
71861+ struct acl_role_label *assigned = NULL;
71862+ struct task_struct *tsk;
71863+ struct file *filp;
71864+
71865+ FOR_EACH_ROLE_START(r)
71866+ if (!strcmp(rolename, r->rolename) &&
71867+ (r->roletype & GR_ROLE_SPECIAL)) {
71868+ assigned = r;
71869+ break;
71870+ }
71871+ FOR_EACH_ROLE_END(r)
71872+
71873+ if (!assigned)
71874+ return;
71875+
71876+ read_lock(&tasklist_lock);
71877+ read_lock(&grsec_exec_file_lock);
71878+
71879+ tsk = current->real_parent;
71880+ if (tsk == NULL)
71881+ goto out_unlock;
71882+
71883+ filp = tsk->exec_file;
71884+ if (filp == NULL)
71885+ goto out_unlock;
71886+
71887+ tsk->is_writable = 0;
71888+ tsk->inherited = 0;
71889+
71890+ tsk->acl_sp_role = 1;
71891+ tsk->acl_role_id = ++acl_sp_role_value;
71892+ tsk->role = assigned;
71893+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
71894+
71895+ /* ignore additional mmap checks for processes that are writable
71896+ by the default ACL */
71897+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71898+ if (unlikely(obj->mode & GR_WRITE))
71899+ tsk->is_writable = 1;
71900+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
71901+ if (unlikely(obj->mode & GR_WRITE))
71902+ tsk->is_writable = 1;
71903+
71904+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71905+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
71906+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
71907+#endif
71908+
71909+out_unlock:
71910+ read_unlock(&grsec_exec_file_lock);
71911+ read_unlock(&tasklist_lock);
71912+ return;
71913+}
71914+
71915+
71916+static void
71917+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
71918+{
71919+ struct task_struct *task = current;
71920+ const struct cred *cred = current_cred();
71921+
71922+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
71923+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71924+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71925+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
71926+
71927+ return;
71928+}
71929+
71930+static void
71931+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
71932+{
71933+ struct task_struct *task = current;
71934+ const struct cred *cred = current_cred();
71935+
71936+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
71937+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71938+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71939+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
71940+
71941+ return;
71942+}
71943+
71944+static void
71945+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
71946+{
71947+ struct task_struct *task = current;
71948+ const struct cred *cred = current_cred();
71949+
71950+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
71951+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71952+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71953+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
71954+
71955+ return;
71956+}
71957+
71958+static void
71959+gr_set_proc_res(struct task_struct *task)
71960+{
71961+ struct acl_subject_label *proc;
71962+ unsigned short i;
71963+
71964+ proc = task->acl;
71965+
71966+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
71967+ return;
71968+
71969+ for (i = 0; i < RLIM_NLIMITS; i++) {
71970+ unsigned long rlim_cur, rlim_max;
71971+
71972+ if (!(proc->resmask & (1U << i)))
71973+ continue;
71974+
71975+ rlim_cur = proc->res[i].rlim_cur;
71976+ rlim_max = proc->res[i].rlim_max;
71977+
71978+ if (i == RLIMIT_NOFILE) {
71979+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
71980+ if (rlim_cur > saved_sysctl_nr_open)
71981+ rlim_cur = saved_sysctl_nr_open;
71982+ if (rlim_max > saved_sysctl_nr_open)
71983+ rlim_max = saved_sysctl_nr_open;
71984+ }
71985+
71986+ task->signal->rlim[i].rlim_cur = rlim_cur;
71987+ task->signal->rlim[i].rlim_max = rlim_max;
71988+
71989+ if (i == RLIMIT_CPU)
71990+ update_rlimit_cpu(task, rlim_cur);
71991+ }
71992+
71993+ return;
71994+}
71995+
71996+/* both of the below must be called with
71997+ rcu_read_lock();
71998+ read_lock(&tasklist_lock);
71999+ read_lock(&grsec_exec_file_lock);
72000+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
72001+*/
72002+
72003+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
72004+{
72005+ char *tmpname;
72006+ struct acl_subject_label *tmpsubj;
72007+ struct file *filp;
72008+ struct name_entry *nmatch;
72009+
72010+ filp = task->exec_file;
72011+ if (filp == NULL)
72012+ return NULL;
72013+
72014+ /* the following is to apply the correct subject
72015+ on binaries running when the RBAC system
72016+ is enabled, when the binaries have been
72017+ replaced or deleted since their execution
72018+ -----
72019+ when the RBAC system starts, the inode/dev
72020+ from exec_file will be one the RBAC system
72021+ is unaware of. It only knows the inode/dev
72022+ of the present file on disk, or the absence
72023+ of it.
72024+ */
72025+
72026+ if (filename)
72027+ nmatch = __lookup_name_entry(state, filename);
72028+ else {
72029+ preempt_disable();
72030+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
72031+
72032+ nmatch = __lookup_name_entry(state, tmpname);
72033+ preempt_enable();
72034+ }
72035+ tmpsubj = NULL;
72036+ if (nmatch) {
72037+ if (nmatch->deleted)
72038+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
72039+ else
72040+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
72041+ }
72042+ /* this also works for the reload case -- if we don't match a potentially inherited subject
72043+ then we fall back to a normal lookup based on the binary's ino/dev
72044+ */
72045+ if (tmpsubj == NULL && fallback)
72046+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
72047+
72048+ return tmpsubj;
72049+}
72050+
72051+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
72052+{
72053+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
72054+}
72055+
72056+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
72057+{
72058+ struct acl_object_label *obj;
72059+ struct file *filp;
72060+
72061+ filp = task->exec_file;
72062+
72063+ task->acl = subj;
72064+ task->is_writable = 0;
72065+ /* ignore additional mmap checks for processes that are writable
72066+ by the default ACL */
72067+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
72068+ if (unlikely(obj->mode & GR_WRITE))
72069+ task->is_writable = 1;
72070+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
72071+ if (unlikely(obj->mode & GR_WRITE))
72072+ task->is_writable = 1;
72073+
72074+ gr_set_proc_res(task);
72075+
72076+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72077+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72078+#endif
72079+}
72080+
72081+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
72082+{
72083+ __gr_apply_subject_to_task(&running_polstate, task, subj);
72084+}
72085+
72086+__u32
72087+gr_search_file(const struct dentry * dentry, const __u32 mode,
72088+ const struct vfsmount * mnt)
72089+{
72090+ __u32 retval = mode;
72091+ struct acl_subject_label *curracl;
72092+ struct acl_object_label *currobj;
72093+
72094+ if (unlikely(!(gr_status & GR_READY)))
72095+ return (mode & ~GR_AUDITS);
72096+
72097+ curracl = current->acl;
72098+
72099+ currobj = chk_obj_label(dentry, mnt, curracl);
72100+ retval = currobj->mode & mode;
72101+
72102+ /* if we're opening a specified transfer file for writing
72103+ (e.g. /dev/initctl), then transfer our role to init
72104+ */
72105+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
72106+ current->role->roletype & GR_ROLE_PERSIST)) {
72107+ struct task_struct *task = init_pid_ns.child_reaper;
72108+
72109+ if (task->role != current->role) {
72110+ struct acl_subject_label *subj;
72111+
72112+ task->acl_sp_role = 0;
72113+ task->acl_role_id = current->acl_role_id;
72114+ task->role = current->role;
72115+ rcu_read_lock();
72116+ read_lock(&grsec_exec_file_lock);
72117+ subj = gr_get_subject_for_task(task, NULL, 1);
72118+ gr_apply_subject_to_task(task, subj);
72119+ read_unlock(&grsec_exec_file_lock);
72120+ rcu_read_unlock();
72121+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
72122+ }
72123+ }
72124+
72125+ if (unlikely
72126+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
72127+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
72128+ __u32 new_mode = mode;
72129+
72130+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
72131+
72132+ retval = new_mode;
72133+
72134+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
72135+ new_mode |= GR_INHERIT;
72136+
72137+ if (!(mode & GR_NOLEARN))
72138+ gr_log_learn(dentry, mnt, new_mode);
72139+ }
72140+
72141+ return retval;
72142+}
72143+
72144+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
72145+ const struct dentry *parent,
72146+ const struct vfsmount *mnt)
72147+{
72148+ struct name_entry *match;
72149+ struct acl_object_label *matchpo;
72150+ struct acl_subject_label *curracl;
72151+ char *path;
72152+
72153+ if (unlikely(!(gr_status & GR_READY)))
72154+ return NULL;
72155+
72156+ preempt_disable();
72157+ path = gr_to_filename_rbac(new_dentry, mnt);
72158+ match = lookup_name_entry_create(path);
72159+
72160+ curracl = current->acl;
72161+
72162+ if (match) {
72163+ read_lock(&gr_inode_lock);
72164+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
72165+ read_unlock(&gr_inode_lock);
72166+
72167+ if (matchpo) {
72168+ preempt_enable();
72169+ return matchpo;
72170+ }
72171+ }
72172+
72173+ // lookup parent
72174+
72175+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
72176+
72177+ preempt_enable();
72178+ return matchpo;
72179+}
72180+
72181+__u32
72182+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
72183+ const struct vfsmount * mnt, const __u32 mode)
72184+{
72185+ struct acl_object_label *matchpo;
72186+ __u32 retval;
72187+
72188+ if (unlikely(!(gr_status & GR_READY)))
72189+ return (mode & ~GR_AUDITS);
72190+
72191+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
72192+
72193+ retval = matchpo->mode & mode;
72194+
72195+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
72196+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
72197+ __u32 new_mode = mode;
72198+
72199+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
72200+
72201+ gr_log_learn(new_dentry, mnt, new_mode);
72202+ return new_mode;
72203+ }
72204+
72205+ return retval;
72206+}
72207+
72208+__u32
72209+gr_check_link(const struct dentry * new_dentry,
72210+ const struct dentry * parent_dentry,
72211+ const struct vfsmount * parent_mnt,
72212+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
72213+{
72214+ struct acl_object_label *obj;
72215+ __u32 oldmode, newmode;
72216+ __u32 needmode;
72217+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
72218+ GR_DELETE | GR_INHERIT;
72219+
72220+ if (unlikely(!(gr_status & GR_READY)))
72221+ return (GR_CREATE | GR_LINK);
72222+
72223+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
72224+ oldmode = obj->mode;
72225+
72226+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
72227+ newmode = obj->mode;
72228+
72229+ needmode = newmode & checkmodes;
72230+
72231+ // old name for hardlink must have at least the permissions of the new name
72232+ if ((oldmode & needmode) != needmode)
72233+ goto bad;
72234+
72235+ // if old name had restrictions/auditing, make sure the new name does as well
72236+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
72237+
72238+ // don't allow hardlinking of suid/sgid/fcapped files without permission
72239+ if (is_privileged_binary(old_dentry))
72240+ needmode |= GR_SETID;
72241+
72242+ if ((newmode & needmode) != needmode)
72243+ goto bad;
72244+
72245+ // enforce minimum permissions
72246+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
72247+ return newmode;
72248+bad:
72249+ needmode = oldmode;
72250+ if (is_privileged_binary(old_dentry))
72251+ needmode |= GR_SETID;
72252+
72253+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72254+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
72255+ return (GR_CREATE | GR_LINK);
72256+ } else if (newmode & GR_SUPPRESS)
72257+ return GR_SUPPRESS;
72258+ else
72259+ return 0;
72260+}
72261+
72262+int
72263+gr_check_hidden_task(const struct task_struct *task)
72264+{
72265+ if (unlikely(!(gr_status & GR_READY)))
72266+ return 0;
72267+
72268+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
72269+ return 1;
72270+
72271+ return 0;
72272+}
72273+
72274+int
72275+gr_check_protected_task(const struct task_struct *task)
72276+{
72277+ if (unlikely(!(gr_status & GR_READY) || !task))
72278+ return 0;
72279+
72280+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
72281+ task->acl != current->acl)
72282+ return 1;
72283+
72284+ return 0;
72285+}
72286+
72287+int
72288+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
72289+{
72290+ struct task_struct *p;
72291+ int ret = 0;
72292+
72293+ if (unlikely(!(gr_status & GR_READY) || !pid))
72294+ return ret;
72295+
72296+ read_lock(&tasklist_lock);
72297+ do_each_pid_task(pid, type, p) {
72298+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
72299+ p->acl != current->acl) {
72300+ ret = 1;
72301+ goto out;
72302+ }
72303+ } while_each_pid_task(pid, type, p);
72304+out:
72305+ read_unlock(&tasklist_lock);
72306+
72307+ return ret;
72308+}
72309+
72310+void
72311+gr_copy_label(struct task_struct *tsk)
72312+{
72313+ struct task_struct *p = current;
72314+
72315+ tsk->inherited = p->inherited;
72316+ tsk->acl_sp_role = 0;
72317+ tsk->acl_role_id = p->acl_role_id;
72318+ tsk->acl = p->acl;
72319+ tsk->role = p->role;
72320+ tsk->signal->used_accept = 0;
72321+ tsk->signal->curr_ip = p->signal->curr_ip;
72322+ tsk->signal->saved_ip = p->signal->saved_ip;
72323+ if (p->exec_file)
72324+ get_file(p->exec_file);
72325+ tsk->exec_file = p->exec_file;
72326+ tsk->is_writable = p->is_writable;
72327+ if (unlikely(p->signal->used_accept)) {
72328+ p->signal->curr_ip = 0;
72329+ p->signal->saved_ip = 0;
72330+ }
72331+
72332+ return;
72333+}
72334+
72335+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
72336+
72337+int
72338+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
72339+{
72340+ unsigned int i;
72341+ __u16 num;
72342+ uid_t *uidlist;
72343+ uid_t curuid;
72344+ int realok = 0;
72345+ int effectiveok = 0;
72346+ int fsok = 0;
72347+ uid_t globalreal, globaleffective, globalfs;
72348+
72349+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
72350+ struct user_struct *user;
72351+
72352+ if (!uid_valid(real))
72353+ goto skipit;
72354+
72355+ /* find user based on global namespace */
72356+
72357+ globalreal = GR_GLOBAL_UID(real);
72358+
72359+ user = find_user(make_kuid(&init_user_ns, globalreal));
72360+ if (user == NULL)
72361+ goto skipit;
72362+
72363+ if (gr_process_kernel_setuid_ban(user)) {
72364+ /* for find_user */
72365+ free_uid(user);
72366+ return 1;
72367+ }
72368+
72369+ /* for find_user */
72370+ free_uid(user);
72371+
72372+skipit:
72373+#endif
72374+
72375+ if (unlikely(!(gr_status & GR_READY)))
72376+ return 0;
72377+
72378+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72379+ gr_log_learn_uid_change(real, effective, fs);
72380+
72381+ num = current->acl->user_trans_num;
72382+ uidlist = current->acl->user_transitions;
72383+
72384+ if (uidlist == NULL)
72385+ return 0;
72386+
72387+ if (!uid_valid(real)) {
72388+ realok = 1;
72389+ globalreal = (uid_t)-1;
72390+ } else {
72391+ globalreal = GR_GLOBAL_UID(real);
72392+ }
72393+ if (!uid_valid(effective)) {
72394+ effectiveok = 1;
72395+ globaleffective = (uid_t)-1;
72396+ } else {
72397+ globaleffective = GR_GLOBAL_UID(effective);
72398+ }
72399+ if (!uid_valid(fs)) {
72400+ fsok = 1;
72401+ globalfs = (uid_t)-1;
72402+ } else {
72403+ globalfs = GR_GLOBAL_UID(fs);
72404+ }
72405+
72406+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
72407+ for (i = 0; i < num; i++) {
72408+ curuid = uidlist[i];
72409+ if (globalreal == curuid)
72410+ realok = 1;
72411+ if (globaleffective == curuid)
72412+ effectiveok = 1;
72413+ if (globalfs == curuid)
72414+ fsok = 1;
72415+ }
72416+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
72417+ for (i = 0; i < num; i++) {
72418+ curuid = uidlist[i];
72419+ if (globalreal == curuid)
72420+ break;
72421+ if (globaleffective == curuid)
72422+ break;
72423+ if (globalfs == curuid)
72424+ break;
72425+ }
72426+ /* not in deny list */
72427+ if (i == num) {
72428+ realok = 1;
72429+ effectiveok = 1;
72430+ fsok = 1;
72431+ }
72432+ }
72433+
72434+ if (realok && effectiveok && fsok)
72435+ return 0;
72436+ else {
72437+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
72438+ return 1;
72439+ }
72440+}
72441+
72442+int
72443+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
72444+{
72445+ unsigned int i;
72446+ __u16 num;
72447+ gid_t *gidlist;
72448+ gid_t curgid;
72449+ int realok = 0;
72450+ int effectiveok = 0;
72451+ int fsok = 0;
72452+ gid_t globalreal, globaleffective, globalfs;
72453+
72454+ if (unlikely(!(gr_status & GR_READY)))
72455+ return 0;
72456+
72457+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72458+ gr_log_learn_gid_change(real, effective, fs);
72459+
72460+ num = current->acl->group_trans_num;
72461+ gidlist = current->acl->group_transitions;
72462+
72463+ if (gidlist == NULL)
72464+ return 0;
72465+
72466+ if (!gid_valid(real)) {
72467+ realok = 1;
72468+ globalreal = (gid_t)-1;
72469+ } else {
72470+ globalreal = GR_GLOBAL_GID(real);
72471+ }
72472+ if (!gid_valid(effective)) {
72473+ effectiveok = 1;
72474+ globaleffective = (gid_t)-1;
72475+ } else {
72476+ globaleffective = GR_GLOBAL_GID(effective);
72477+ }
72478+ if (!gid_valid(fs)) {
72479+ fsok = 1;
72480+ globalfs = (gid_t)-1;
72481+ } else {
72482+ globalfs = GR_GLOBAL_GID(fs);
72483+ }
72484+
72485+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
72486+ for (i = 0; i < num; i++) {
72487+ curgid = gidlist[i];
72488+ if (globalreal == curgid)
72489+ realok = 1;
72490+ if (globaleffective == curgid)
72491+ effectiveok = 1;
72492+ if (globalfs == curgid)
72493+ fsok = 1;
72494+ }
72495+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
72496+ for (i = 0; i < num; i++) {
72497+ curgid = gidlist[i];
72498+ if (globalreal == curgid)
72499+ break;
72500+ if (globaleffective == curgid)
72501+ break;
72502+ if (globalfs == curgid)
72503+ break;
72504+ }
72505+ /* not in deny list */
72506+ if (i == num) {
72507+ realok = 1;
72508+ effectiveok = 1;
72509+ fsok = 1;
72510+ }
72511+ }
72512+
72513+ if (realok && effectiveok && fsok)
72514+ return 0;
72515+ else {
72516+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
72517+ return 1;
72518+ }
72519+}
72520+
72521+extern int gr_acl_is_capable(const int cap);
72522+
72523+void
72524+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
72525+{
72526+ struct acl_role_label *role = task->role;
72527+ struct acl_role_label *origrole = role;
72528+ struct acl_subject_label *subj = NULL;
72529+ struct acl_object_label *obj;
72530+ struct file *filp;
72531+ uid_t uid;
72532+ gid_t gid;
72533+
72534+ if (unlikely(!(gr_status & GR_READY)))
72535+ return;
72536+
72537+ uid = GR_GLOBAL_UID(kuid);
72538+ gid = GR_GLOBAL_GID(kgid);
72539+
72540+ filp = task->exec_file;
72541+
72542+ /* kernel process, we'll give them the kernel role */
72543+ if (unlikely(!filp)) {
72544+ task->role = running_polstate.kernel_role;
72545+ task->acl = running_polstate.kernel_role->root_label;
72546+ return;
72547+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
72548+ /* save the current ip at time of role lookup so that the proper
72549+ IP will be learned for role_allowed_ip */
72550+ task->signal->saved_ip = task->signal->curr_ip;
72551+ role = lookup_acl_role_label(task, uid, gid);
72552+ }
72553+
72554+ /* don't change the role if we're not a privileged process */
72555+ if (role && task->role != role &&
72556+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
72557+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
72558+ return;
72559+
72560+ task->role = role;
72561+
72562+ if (task->inherited) {
72563+ /* if we reached our subject through inheritance, then first see
72564+ if there's a subject of the same name in the new role that has
72565+ an object that would result in the same inherited subject
72566+ */
72567+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
72568+ if (subj) {
72569+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
72570+ if (!(obj->mode & GR_INHERIT))
72571+ subj = NULL;
72572+ }
72573+
72574+ }
72575+ if (subj == NULL) {
72576+ /* otherwise:
72577+ perform subject lookup in possibly new role
72578+ we can use this result below in the case where role == task->role
72579+ */
72580+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
72581+ }
72582+
72583+ /* if we changed uid/gid, but result in the same role
72584+ and are using inheritance, don't lose the inherited subject
72585+ if current subject is other than what normal lookup
72586+ would result in, we arrived via inheritance, don't
72587+ lose subject
72588+ */
72589+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
72590+ (subj == task->acl)))
72591+ task->acl = subj;
72592+
72593+ /* leave task->inherited unaffected */
72594+
72595+ task->is_writable = 0;
72596+
72597+ /* ignore additional mmap checks for processes that are writable
72598+ by the default ACL */
72599+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
72600+ if (unlikely(obj->mode & GR_WRITE))
72601+ task->is_writable = 1;
72602+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
72603+ if (unlikely(obj->mode & GR_WRITE))
72604+ task->is_writable = 1;
72605+
72606+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72607+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72608+#endif
72609+
72610+ gr_set_proc_res(task);
72611+
72612+ return;
72613+}
72614+
72615+int
72616+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
72617+ const int unsafe_flags)
72618+{
72619+ struct task_struct *task = current;
72620+ struct acl_subject_label *newacl;
72621+ struct acl_object_label *obj;
72622+ __u32 retmode;
72623+
72624+ if (unlikely(!(gr_status & GR_READY)))
72625+ return 0;
72626+
72627+ newacl = chk_subj_label(dentry, mnt, task->role);
72628+
72629+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
72630+ did an exec
72631+ */
72632+ rcu_read_lock();
72633+ read_lock(&tasklist_lock);
72634+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
72635+ (task->parent->acl->mode & GR_POVERRIDE))) {
72636+ read_unlock(&tasklist_lock);
72637+ rcu_read_unlock();
72638+ goto skip_check;
72639+ }
72640+ read_unlock(&tasklist_lock);
72641+ rcu_read_unlock();
72642+
72643+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
72644+ !(task->role->roletype & GR_ROLE_GOD) &&
72645+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
72646+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
72647+ if (unsafe_flags & LSM_UNSAFE_SHARE)
72648+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
72649+ else
72650+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
72651+ return -EACCES;
72652+ }
72653+
72654+skip_check:
72655+
72656+ obj = chk_obj_label(dentry, mnt, task->acl);
72657+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
72658+
72659+ if (!(task->acl->mode & GR_INHERITLEARN) &&
72660+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
72661+ if (obj->nested)
72662+ task->acl = obj->nested;
72663+ else
72664+ task->acl = newacl;
72665+ task->inherited = 0;
72666+ } else {
72667+ task->inherited = 1;
72668+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
72669+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
72670+ }
72671+
72672+ task->is_writable = 0;
72673+
72674+ /* ignore additional mmap checks for processes that are writable
72675+ by the default ACL */
72676+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
72677+ if (unlikely(obj->mode & GR_WRITE))
72678+ task->is_writable = 1;
72679+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
72680+ if (unlikely(obj->mode & GR_WRITE))
72681+ task->is_writable = 1;
72682+
72683+ gr_set_proc_res(task);
72684+
72685+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72686+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72687+#endif
72688+ return 0;
72689+}
72690+
72691+/* always called with valid inodev ptr */
72692+static void
72693+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
72694+{
72695+ struct acl_object_label *matchpo;
72696+ struct acl_subject_label *matchps;
72697+ struct acl_subject_label *subj;
72698+ struct acl_role_label *role;
72699+ unsigned int x;
72700+
72701+ FOR_EACH_ROLE_START(role)
72702+ FOR_EACH_SUBJECT_START(role, subj, x)
72703+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72704+ matchpo->mode |= GR_DELETED;
72705+ FOR_EACH_SUBJECT_END(subj,x)
72706+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72707+ /* nested subjects aren't in the role's subj_hash table */
72708+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72709+ matchpo->mode |= GR_DELETED;
72710+ FOR_EACH_NESTED_SUBJECT_END(subj)
72711+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
72712+ matchps->mode |= GR_DELETED;
72713+ FOR_EACH_ROLE_END(role)
72714+
72715+ inodev->nentry->deleted = 1;
72716+
72717+ return;
72718+}
72719+
72720+void
72721+gr_handle_delete(const u64 ino, const dev_t dev)
72722+{
72723+ struct inodev_entry *inodev;
72724+
72725+ if (unlikely(!(gr_status & GR_READY)))
72726+ return;
72727+
72728+ write_lock(&gr_inode_lock);
72729+ inodev = lookup_inodev_entry(ino, dev);
72730+ if (inodev != NULL)
72731+ do_handle_delete(inodev, ino, dev);
72732+ write_unlock(&gr_inode_lock);
72733+
72734+ return;
72735+}
72736+
72737+static void
72738+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
72739+ const u64 newinode, const dev_t newdevice,
72740+ struct acl_subject_label *subj)
72741+{
72742+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
72743+ struct acl_object_label *match;
72744+
72745+ match = subj->obj_hash[index];
72746+
72747+ while (match && (match->inode != oldinode ||
72748+ match->device != olddevice ||
72749+ !(match->mode & GR_DELETED)))
72750+ match = match->next;
72751+
72752+ if (match && (match->inode == oldinode)
72753+ && (match->device == olddevice)
72754+ && (match->mode & GR_DELETED)) {
72755+ if (match->prev == NULL) {
72756+ subj->obj_hash[index] = match->next;
72757+ if (match->next != NULL)
72758+ match->next->prev = NULL;
72759+ } else {
72760+ match->prev->next = match->next;
72761+ if (match->next != NULL)
72762+ match->next->prev = match->prev;
72763+ }
72764+ match->prev = NULL;
72765+ match->next = NULL;
72766+ match->inode = newinode;
72767+ match->device = newdevice;
72768+ match->mode &= ~GR_DELETED;
72769+
72770+ insert_acl_obj_label(match, subj);
72771+ }
72772+
72773+ return;
72774+}
72775+
72776+static void
72777+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
72778+ const u64 newinode, const dev_t newdevice,
72779+ struct acl_role_label *role)
72780+{
72781+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
72782+ struct acl_subject_label *match;
72783+
72784+ match = role->subj_hash[index];
72785+
72786+ while (match && (match->inode != oldinode ||
72787+ match->device != olddevice ||
72788+ !(match->mode & GR_DELETED)))
72789+ match = match->next;
72790+
72791+ if (match && (match->inode == oldinode)
72792+ && (match->device == olddevice)
72793+ && (match->mode & GR_DELETED)) {
72794+ if (match->prev == NULL) {
72795+ role->subj_hash[index] = match->next;
72796+ if (match->next != NULL)
72797+ match->next->prev = NULL;
72798+ } else {
72799+ match->prev->next = match->next;
72800+ if (match->next != NULL)
72801+ match->next->prev = match->prev;
72802+ }
72803+ match->prev = NULL;
72804+ match->next = NULL;
72805+ match->inode = newinode;
72806+ match->device = newdevice;
72807+ match->mode &= ~GR_DELETED;
72808+
72809+ insert_acl_subj_label(match, role);
72810+ }
72811+
72812+ return;
72813+}
72814+
72815+static void
72816+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
72817+ const u64 newinode, const dev_t newdevice)
72818+{
72819+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
72820+ struct inodev_entry *match;
72821+
72822+ match = running_polstate.inodev_set.i_hash[index];
72823+
72824+ while (match && (match->nentry->inode != oldinode ||
72825+ match->nentry->device != olddevice || !match->nentry->deleted))
72826+ match = match->next;
72827+
72828+ if (match && (match->nentry->inode == oldinode)
72829+ && (match->nentry->device == olddevice) &&
72830+ match->nentry->deleted) {
72831+ if (match->prev == NULL) {
72832+ running_polstate.inodev_set.i_hash[index] = match->next;
72833+ if (match->next != NULL)
72834+ match->next->prev = NULL;
72835+ } else {
72836+ match->prev->next = match->next;
72837+ if (match->next != NULL)
72838+ match->next->prev = match->prev;
72839+ }
72840+ match->prev = NULL;
72841+ match->next = NULL;
72842+ match->nentry->inode = newinode;
72843+ match->nentry->device = newdevice;
72844+ match->nentry->deleted = 0;
72845+
72846+ insert_inodev_entry(match);
72847+ }
72848+
72849+ return;
72850+}
72851+
72852+static void
72853+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
72854+{
72855+ struct acl_subject_label *subj;
72856+ struct acl_role_label *role;
72857+ unsigned int x;
72858+
72859+ FOR_EACH_ROLE_START(role)
72860+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
72861+
72862+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72863+ if ((subj->inode == ino) && (subj->device == dev)) {
72864+ subj->inode = ino;
72865+ subj->device = dev;
72866+ }
72867+ /* nested subjects aren't in the role's subj_hash table */
72868+ update_acl_obj_label(matchn->inode, matchn->device,
72869+ ino, dev, subj);
72870+ FOR_EACH_NESTED_SUBJECT_END(subj)
72871+ FOR_EACH_SUBJECT_START(role, subj, x)
72872+ update_acl_obj_label(matchn->inode, matchn->device,
72873+ ino, dev, subj);
72874+ FOR_EACH_SUBJECT_END(subj,x)
72875+ FOR_EACH_ROLE_END(role)
72876+
72877+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
72878+
72879+ return;
72880+}
72881+
72882+static void
72883+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
72884+ const struct vfsmount *mnt)
72885+{
72886+ u64 ino = __get_ino(dentry);
72887+ dev_t dev = __get_dev(dentry);
72888+
72889+ __do_handle_create(matchn, ino, dev);
72890+
72891+ return;
72892+}
72893+
72894+void
72895+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
72896+{
72897+ struct name_entry *matchn;
72898+
72899+ if (unlikely(!(gr_status & GR_READY)))
72900+ return;
72901+
72902+ preempt_disable();
72903+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
72904+
72905+ if (unlikely((unsigned long)matchn)) {
72906+ write_lock(&gr_inode_lock);
72907+ do_handle_create(matchn, dentry, mnt);
72908+ write_unlock(&gr_inode_lock);
72909+ }
72910+ preempt_enable();
72911+
72912+ return;
72913+}
72914+
72915+void
72916+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
72917+{
72918+ struct name_entry *matchn;
72919+
72920+ if (unlikely(!(gr_status & GR_READY)))
72921+ return;
72922+
72923+ preempt_disable();
72924+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
72925+
72926+ if (unlikely((unsigned long)matchn)) {
72927+ write_lock(&gr_inode_lock);
72928+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
72929+ write_unlock(&gr_inode_lock);
72930+ }
72931+ preempt_enable();
72932+
72933+ return;
72934+}
72935+
72936+void
72937+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
72938+ struct dentry *old_dentry,
72939+ struct dentry *new_dentry,
72940+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
72941+{
72942+ struct name_entry *matchn;
72943+ struct name_entry *matchn2 = NULL;
72944+ struct inodev_entry *inodev;
72945+ struct inode *inode = new_dentry->d_inode;
72946+ u64 old_ino = __get_ino(old_dentry);
72947+ dev_t old_dev = __get_dev(old_dentry);
72948+ unsigned int exchange = flags & RENAME_EXCHANGE;
72949+
72950+ /* vfs_rename swaps the name and parent link for old_dentry and
72951+ new_dentry
72952+ at this point, old_dentry has the new name, parent link, and inode
72953+ for the renamed file
72954+ if a file is being replaced by a rename, new_dentry has the inode
72955+ and name for the replaced file
72956+ */
72957+
72958+ if (unlikely(!(gr_status & GR_READY)))
72959+ return;
72960+
72961+ preempt_disable();
72962+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
72963+
72964+ /* exchange cases:
72965+ a filename exists for the source, but not dest
72966+ do a recreate on source
72967+ a filename exists for the dest, but not source
72968+ do a recreate on dest
72969+ a filename exists for both source and dest
72970+ delete source and dest, then create source and dest
72971+ a filename exists for neither source nor dest
72972+ no updates needed
72973+
72974+ the name entry lookups get us the old inode/dev associated with
72975+ each name, so do the deletes first (if possible) so that when
72976+ we do the create, we pick up on the right entries
72977+ */
72978+
72979+ if (exchange)
72980+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
72981+
72982+ /* we wouldn't have to check d_inode if it weren't for
72983+ NFS silly-renaming
72984+ */
72985+
72986+ write_lock(&gr_inode_lock);
72987+ if (unlikely((replace || exchange) && inode)) {
72988+ u64 new_ino = __get_ino(new_dentry);
72989+ dev_t new_dev = __get_dev(new_dentry);
72990+
72991+ inodev = lookup_inodev_entry(new_ino, new_dev);
72992+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
72993+ do_handle_delete(inodev, new_ino, new_dev);
72994+ }
72995+
72996+ inodev = lookup_inodev_entry(old_ino, old_dev);
72997+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
72998+ do_handle_delete(inodev, old_ino, old_dev);
72999+
73000+ if (unlikely(matchn != NULL))
73001+ do_handle_create(matchn, old_dentry, mnt);
73002+
73003+ if (unlikely(matchn2 != NULL))
73004+ do_handle_create(matchn2, new_dentry, mnt);
73005+
73006+ write_unlock(&gr_inode_lock);
73007+ preempt_enable();
73008+
73009+ return;
73010+}
73011+
73012+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
73013+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
73014+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
73015+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
73016+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
73017+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
73018+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
73019+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
73020+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
73021+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
73022+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
73023+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
73024+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
73025+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
73026+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
73027+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
73028+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
73029+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
73030+};
73031+
73032+void
73033+gr_learn_resource(const struct task_struct *task,
73034+ const int res, const unsigned long wanted, const int gt)
73035+{
73036+ struct acl_subject_label *acl;
73037+ const struct cred *cred;
73038+
73039+ if (unlikely((gr_status & GR_READY) &&
73040+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
73041+ goto skip_reslog;
73042+
73043+ gr_log_resource(task, res, wanted, gt);
73044+skip_reslog:
73045+
73046+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
73047+ return;
73048+
73049+ acl = task->acl;
73050+
73051+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
73052+ !(acl->resmask & (1U << (unsigned short) res))))
73053+ return;
73054+
73055+ if (wanted >= acl->res[res].rlim_cur) {
73056+ unsigned long res_add;
73057+
73058+ res_add = wanted + res_learn_bumps[res];
73059+
73060+ acl->res[res].rlim_cur = res_add;
73061+
73062+ if (wanted > acl->res[res].rlim_max)
73063+ acl->res[res].rlim_max = res_add;
73064+
73065+ /* only log the subject filename, since resource logging is supported for
73066+ single-subject learning only */
73067+ rcu_read_lock();
73068+ cred = __task_cred(task);
73069+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
73070+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
73071+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
73072+ "", (unsigned long) res, &task->signal->saved_ip);
73073+ rcu_read_unlock();
73074+ }
73075+
73076+ return;
73077+}
73078+EXPORT_SYMBOL_GPL(gr_learn_resource);
73079+#endif
73080+
73081+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
73082+void
73083+pax_set_initial_flags(struct linux_binprm *bprm)
73084+{
73085+ struct task_struct *task = current;
73086+ struct acl_subject_label *proc;
73087+ unsigned long flags;
73088+
73089+ if (unlikely(!(gr_status & GR_READY)))
73090+ return;
73091+
73092+ flags = pax_get_flags(task);
73093+
73094+ proc = task->acl;
73095+
73096+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
73097+ flags &= ~MF_PAX_PAGEEXEC;
73098+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
73099+ flags &= ~MF_PAX_SEGMEXEC;
73100+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
73101+ flags &= ~MF_PAX_RANDMMAP;
73102+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
73103+ flags &= ~MF_PAX_EMUTRAMP;
73104+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
73105+ flags &= ~MF_PAX_MPROTECT;
73106+
73107+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
73108+ flags |= MF_PAX_PAGEEXEC;
73109+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
73110+ flags |= MF_PAX_SEGMEXEC;
73111+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
73112+ flags |= MF_PAX_RANDMMAP;
73113+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
73114+ flags |= MF_PAX_EMUTRAMP;
73115+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
73116+ flags |= MF_PAX_MPROTECT;
73117+
73118+ pax_set_flags(task, flags);
73119+
73120+ return;
73121+}
73122+#endif
73123+
73124+int
73125+gr_handle_proc_ptrace(struct task_struct *task)
73126+{
73127+ struct file *filp;
73128+ struct task_struct *tmp = task;
73129+ struct task_struct *curtemp = current;
73130+ __u32 retmode;
73131+
73132+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
73133+ if (unlikely(!(gr_status & GR_READY)))
73134+ return 0;
73135+#endif
73136+
73137+ read_lock(&tasklist_lock);
73138+ read_lock(&grsec_exec_file_lock);
73139+ filp = task->exec_file;
73140+
73141+ while (task_pid_nr(tmp) > 0) {
73142+ if (tmp == curtemp)
73143+ break;
73144+ tmp = tmp->real_parent;
73145+ }
73146+
73147+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
73148+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
73149+ read_unlock(&grsec_exec_file_lock);
73150+ read_unlock(&tasklist_lock);
73151+ return 1;
73152+ }
73153+
73154+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
73155+ if (!(gr_status & GR_READY)) {
73156+ read_unlock(&grsec_exec_file_lock);
73157+ read_unlock(&tasklist_lock);
73158+ return 0;
73159+ }
73160+#endif
73161+
73162+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
73163+ read_unlock(&grsec_exec_file_lock);
73164+ read_unlock(&tasklist_lock);
73165+
73166+ if (retmode & GR_NOPTRACE)
73167+ return 1;
73168+
73169+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
73170+ && (current->acl != task->acl || (current->acl != current->role->root_label
73171+ && task_pid_nr(current) != task_pid_nr(task))))
73172+ return 1;
73173+
73174+ return 0;
73175+}
73176+
73177+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
73178+{
73179+ if (unlikely(!(gr_status & GR_READY)))
73180+ return;
73181+
73182+ if (!(current->role->roletype & GR_ROLE_GOD))
73183+ return;
73184+
73185+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
73186+ p->role->rolename, gr_task_roletype_to_char(p),
73187+ p->acl->filename);
73188+}
73189+
73190+int
73191+gr_handle_ptrace(struct task_struct *task, const long request)
73192+{
73193+ struct task_struct *tmp = task;
73194+ struct task_struct *curtemp = current;
73195+ __u32 retmode;
73196+
73197+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
73198+ if (unlikely(!(gr_status & GR_READY)))
73199+ return 0;
73200+#endif
73201+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
73202+ read_lock(&tasklist_lock);
73203+ while (task_pid_nr(tmp) > 0) {
73204+ if (tmp == curtemp)
73205+ break;
73206+ tmp = tmp->real_parent;
73207+ }
73208+
73209+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
73210+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
73211+ read_unlock(&tasklist_lock);
73212+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
73213+ return 1;
73214+ }
73215+ read_unlock(&tasklist_lock);
73216+ }
73217+
73218+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
73219+ if (!(gr_status & GR_READY))
73220+ return 0;
73221+#endif
73222+
73223+ read_lock(&grsec_exec_file_lock);
73224+ if (unlikely(!task->exec_file)) {
73225+ read_unlock(&grsec_exec_file_lock);
73226+ return 0;
73227+ }
73228+
73229+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
73230+ read_unlock(&grsec_exec_file_lock);
73231+
73232+ if (retmode & GR_NOPTRACE) {
73233+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
73234+ return 1;
73235+ }
73236+
73237+ if (retmode & GR_PTRACERD) {
73238+ switch (request) {
73239+ case PTRACE_SEIZE:
73240+ case PTRACE_POKETEXT:
73241+ case PTRACE_POKEDATA:
73242+ case PTRACE_POKEUSR:
73243+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
73244+ case PTRACE_SETREGS:
73245+ case PTRACE_SETFPREGS:
73246+#endif
73247+#ifdef CONFIG_X86
73248+ case PTRACE_SETFPXREGS:
73249+#endif
73250+#ifdef CONFIG_ALTIVEC
73251+ case PTRACE_SETVRREGS:
73252+#endif
73253+ return 1;
73254+ default:
73255+ return 0;
73256+ }
73257+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
73258+ !(current->role->roletype & GR_ROLE_GOD) &&
73259+ (current->acl != task->acl)) {
73260+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
73261+ return 1;
73262+ }
73263+
73264+ return 0;
73265+}
73266+
73267+static int is_writable_mmap(const struct file *filp)
73268+{
73269+ struct task_struct *task = current;
73270+ struct acl_object_label *obj, *obj2;
73271+
73272+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
73273+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
73274+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
73275+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
73276+ task->role->root_label);
73277+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
73278+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
73279+ return 1;
73280+ }
73281+ }
73282+ return 0;
73283+}
73284+
73285+int
73286+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
73287+{
73288+ __u32 mode;
73289+
73290+ if (unlikely(!file || !(prot & PROT_EXEC)))
73291+ return 1;
73292+
73293+ if (is_writable_mmap(file))
73294+ return 0;
73295+
73296+ mode =
73297+ gr_search_file(file->f_path.dentry,
73298+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
73299+ file->f_path.mnt);
73300+
73301+ if (!gr_tpe_allow(file))
73302+ return 0;
73303+
73304+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
73305+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73306+ return 0;
73307+ } else if (unlikely(!(mode & GR_EXEC))) {
73308+ return 0;
73309+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
73310+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73311+ return 1;
73312+ }
73313+
73314+ return 1;
73315+}
73316+
73317+int
73318+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
73319+{
73320+ __u32 mode;
73321+
73322+ if (unlikely(!file || !(prot & PROT_EXEC)))
73323+ return 1;
73324+
73325+ if (is_writable_mmap(file))
73326+ return 0;
73327+
73328+ mode =
73329+ gr_search_file(file->f_path.dentry,
73330+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
73331+ file->f_path.mnt);
73332+
73333+ if (!gr_tpe_allow(file))
73334+ return 0;
73335+
73336+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
73337+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73338+ return 0;
73339+ } else if (unlikely(!(mode & GR_EXEC))) {
73340+ return 0;
73341+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
73342+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73343+ return 1;
73344+ }
73345+
73346+ return 1;
73347+}
73348+
73349+void
73350+gr_acl_handle_psacct(struct task_struct *task, const long code)
73351+{
73352+ unsigned long runtime, cputime;
73353+ cputime_t utime, stime;
73354+ unsigned int wday, cday;
73355+ __u8 whr, chr;
73356+ __u8 wmin, cmin;
73357+ __u8 wsec, csec;
73358+ struct timespec curtime, starttime;
73359+
73360+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
73361+ !(task->acl->mode & GR_PROCACCT)))
73362+ return;
73363+
73364+ curtime = ns_to_timespec(ktime_get_ns());
73365+ starttime = ns_to_timespec(task->start_time);
73366+ runtime = curtime.tv_sec - starttime.tv_sec;
73367+ wday = runtime / (60 * 60 * 24);
73368+ runtime -= wday * (60 * 60 * 24);
73369+ whr = runtime / (60 * 60);
73370+ runtime -= whr * (60 * 60);
73371+ wmin = runtime / 60;
73372+ runtime -= wmin * 60;
73373+ wsec = runtime;
73374+
73375+ task_cputime(task, &utime, &stime);
73376+ cputime = cputime_to_secs(utime + stime);
73377+ cday = cputime / (60 * 60 * 24);
73378+ cputime -= cday * (60 * 60 * 24);
73379+ chr = cputime / (60 * 60);
73380+ cputime -= chr * (60 * 60);
73381+ cmin = cputime / 60;
73382+ cputime -= cmin * 60;
73383+ csec = cputime;
73384+
73385+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
73386+
73387+ return;
73388+}
73389+
73390+#ifdef CONFIG_TASKSTATS
73391+int gr_is_taskstats_denied(int pid)
73392+{
73393+ struct task_struct *task;
73394+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73395+ const struct cred *cred;
73396+#endif
73397+ int ret = 0;
73398+
73399+ /* restrict taskstats viewing to un-chrooted root users
73400+ who have the 'view' subject flag if the RBAC system is enabled
73401+ */
73402+
73403+ rcu_read_lock();
73404+ read_lock(&tasklist_lock);
73405+ task = find_task_by_vpid(pid);
73406+ if (task) {
73407+#ifdef CONFIG_GRKERNSEC_CHROOT
73408+ if (proc_is_chrooted(task))
73409+ ret = -EACCES;
73410+#endif
73411+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73412+ cred = __task_cred(task);
73413+#ifdef CONFIG_GRKERNSEC_PROC_USER
73414+ if (gr_is_global_nonroot(cred->uid))
73415+ ret = -EACCES;
73416+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73417+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
73418+ ret = -EACCES;
73419+#endif
73420+#endif
73421+ if (gr_status & GR_READY) {
73422+ if (!(task->acl->mode & GR_VIEW))
73423+ ret = -EACCES;
73424+ }
73425+ } else
73426+ ret = -ENOENT;
73427+
73428+ read_unlock(&tasklist_lock);
73429+ rcu_read_unlock();
73430+
73431+ return ret;
73432+}
73433+#endif
73434+
73435+/* AUXV entries are filled via a descendant of search_binary_handler
73436+ after we've already applied the subject for the target
73437+*/
73438+int gr_acl_enable_at_secure(void)
73439+{
73440+ if (unlikely(!(gr_status & GR_READY)))
73441+ return 0;
73442+
73443+ if (current->acl->mode & GR_ATSECURE)
73444+ return 1;
73445+
73446+ return 0;
73447+}
73448+
73449+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
73450+{
73451+ struct task_struct *task = current;
73452+ struct dentry *dentry = file->f_path.dentry;
73453+ struct vfsmount *mnt = file->f_path.mnt;
73454+ struct acl_object_label *obj, *tmp;
73455+ struct acl_subject_label *subj;
73456+ unsigned int bufsize;
73457+ int is_not_root;
73458+ char *path;
73459+ dev_t dev = __get_dev(dentry);
73460+
73461+ if (unlikely(!(gr_status & GR_READY)))
73462+ return 1;
73463+
73464+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
73465+ return 1;
73466+
73467+ /* ignore Eric Biederman */
73468+ if (IS_PRIVATE(dentry->d_inode))
73469+ return 1;
73470+
73471+ subj = task->acl;
73472+ read_lock(&gr_inode_lock);
73473+ do {
73474+ obj = lookup_acl_obj_label(ino, dev, subj);
73475+ if (obj != NULL) {
73476+ read_unlock(&gr_inode_lock);
73477+ return (obj->mode & GR_FIND) ? 1 : 0;
73478+ }
73479+ } while ((subj = subj->parent_subject));
73480+ read_unlock(&gr_inode_lock);
73481+
73482+ /* this is purely an optimization since we're looking for an object
73483+ for the directory we're doing a readdir on
73484+ if it's possible for any globbed object to match the entry we're
73485+ filling into the directory, then the object we find here will be
73486+ an anchor point with attached globbed objects
73487+ */
73488+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
73489+ if (obj->globbed == NULL)
73490+ return (obj->mode & GR_FIND) ? 1 : 0;
73491+
73492+ is_not_root = ((obj->filename[0] == '/') &&
73493+ (obj->filename[1] == '\0')) ? 0 : 1;
73494+ bufsize = PAGE_SIZE - namelen - is_not_root;
73495+
73496+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
73497+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
73498+ return 1;
73499+
73500+ preempt_disable();
73501+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
73502+ bufsize);
73503+
73504+ bufsize = strlen(path);
73505+
73506+ /* if base is "/", don't append an additional slash */
73507+ if (is_not_root)
73508+ *(path + bufsize) = '/';
73509+ memcpy(path + bufsize + is_not_root, name, namelen);
73510+ *(path + bufsize + namelen + is_not_root) = '\0';
73511+
73512+ tmp = obj->globbed;
73513+ while (tmp) {
73514+ if (!glob_match(tmp->filename, path)) {
73515+ preempt_enable();
73516+ return (tmp->mode & GR_FIND) ? 1 : 0;
73517+ }
73518+ tmp = tmp->next;
73519+ }
73520+ preempt_enable();
73521+ return (obj->mode & GR_FIND) ? 1 : 0;
73522+}
73523+
73524+void gr_put_exec_file(struct task_struct *task)
73525+{
73526+ struct file *filp;
73527+
73528+ write_lock(&grsec_exec_file_lock);
73529+ filp = task->exec_file;
73530+ task->exec_file = NULL;
73531+ write_unlock(&grsec_exec_file_lock);
73532+
73533+ if (filp)
73534+ fput(filp);
73535+
73536+ return;
73537+}
73538+
73539+
73540+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
73541+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
73542+#endif
73543+#ifdef CONFIG_SECURITY
73544+EXPORT_SYMBOL_GPL(gr_check_user_change);
73545+EXPORT_SYMBOL_GPL(gr_check_group_change);
73546+#endif
73547+
73548diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
73549new file mode 100644
73550index 0000000..18ffbbd
73551--- /dev/null
73552+++ b/grsecurity/gracl_alloc.c
73553@@ -0,0 +1,105 @@
73554+#include <linux/kernel.h>
73555+#include <linux/mm.h>
73556+#include <linux/slab.h>
73557+#include <linux/vmalloc.h>
73558+#include <linux/gracl.h>
73559+#include <linux/grsecurity.h>
73560+
73561+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
73562+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
73563+
73564+static __inline__ int
73565+alloc_pop(void)
73566+{
73567+ if (current_alloc_state->alloc_stack_next == 1)
73568+ return 0;
73569+
73570+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
73571+
73572+ current_alloc_state->alloc_stack_next--;
73573+
73574+ return 1;
73575+}
73576+
73577+static __inline__ int
73578+alloc_push(void *buf)
73579+{
73580+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
73581+ return 1;
73582+
73583+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
73584+
73585+ current_alloc_state->alloc_stack_next++;
73586+
73587+ return 0;
73588+}
73589+
73590+void *
73591+acl_alloc(unsigned long len)
73592+{
73593+ void *ret = NULL;
73594+
73595+ if (!len || len > PAGE_SIZE)
73596+ goto out;
73597+
73598+ ret = kmalloc(len, GFP_KERNEL);
73599+
73600+ if (ret) {
73601+ if (alloc_push(ret)) {
73602+ kfree(ret);
73603+ ret = NULL;
73604+ }
73605+ }
73606+
73607+out:
73608+ return ret;
73609+}
73610+
73611+void *
73612+acl_alloc_num(unsigned long num, unsigned long len)
73613+{
73614+ if (!len || (num > (PAGE_SIZE / len)))
73615+ return NULL;
73616+
73617+ return acl_alloc(num * len);
73618+}
73619+
73620+void
73621+acl_free_all(void)
73622+{
73623+ if (!current_alloc_state->alloc_stack)
73624+ return;
73625+
73626+ while (alloc_pop()) ;
73627+
73628+ if (current_alloc_state->alloc_stack) {
73629+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
73630+ kfree(current_alloc_state->alloc_stack);
73631+ else
73632+ vfree(current_alloc_state->alloc_stack);
73633+ }
73634+
73635+ current_alloc_state->alloc_stack = NULL;
73636+ current_alloc_state->alloc_stack_size = 1;
73637+ current_alloc_state->alloc_stack_next = 1;
73638+
73639+ return;
73640+}
73641+
73642+int
73643+acl_alloc_stack_init(unsigned long size)
73644+{
73645+ if ((size * sizeof (void *)) <= PAGE_SIZE)
73646+ current_alloc_state->alloc_stack =
73647+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
73648+ else
73649+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
73650+
73651+ current_alloc_state->alloc_stack_size = size;
73652+ current_alloc_state->alloc_stack_next = 1;
73653+
73654+ if (!current_alloc_state->alloc_stack)
73655+ return 0;
73656+ else
73657+ return 1;
73658+}
73659diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
73660new file mode 100644
73661index 0000000..1a94c11
73662--- /dev/null
73663+++ b/grsecurity/gracl_cap.c
73664@@ -0,0 +1,127 @@
73665+#include <linux/kernel.h>
73666+#include <linux/module.h>
73667+#include <linux/sched.h>
73668+#include <linux/gracl.h>
73669+#include <linux/grsecurity.h>
73670+#include <linux/grinternal.h>
73671+
73672+extern const char *captab_log[];
73673+extern int captab_log_entries;
73674+
73675+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
73676+{
73677+ struct acl_subject_label *curracl;
73678+
73679+ if (!gr_acl_is_enabled())
73680+ return 1;
73681+
73682+ curracl = task->acl;
73683+
73684+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
73685+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
73686+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
73687+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
73688+ gr_to_filename(task->exec_file->f_path.dentry,
73689+ task->exec_file->f_path.mnt) : curracl->filename,
73690+ curracl->filename, 0UL,
73691+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
73692+ return 1;
73693+ }
73694+
73695+ return 0;
73696+}
73697+
73698+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
73699+{
73700+ struct acl_subject_label *curracl;
73701+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73702+ kernel_cap_t cap_audit = __cap_empty_set;
73703+
73704+ if (!gr_acl_is_enabled())
73705+ return 1;
73706+
73707+ curracl = task->acl;
73708+
73709+ cap_drop = curracl->cap_lower;
73710+ cap_mask = curracl->cap_mask;
73711+ cap_audit = curracl->cap_invert_audit;
73712+
73713+ while ((curracl = curracl->parent_subject)) {
73714+ /* if the cap isn't specified in the current computed mask but is specified in the
73715+ current level subject, and is lowered in the current level subject, then add
73716+ it to the set of dropped capabilities
73717+ otherwise, add the current level subject's mask to the current computed mask
73718+ */
73719+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73720+ cap_raise(cap_mask, cap);
73721+ if (cap_raised(curracl->cap_lower, cap))
73722+ cap_raise(cap_drop, cap);
73723+ if (cap_raised(curracl->cap_invert_audit, cap))
73724+ cap_raise(cap_audit, cap);
73725+ }
73726+ }
73727+
73728+ if (!cap_raised(cap_drop, cap)) {
73729+ if (cap_raised(cap_audit, cap))
73730+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
73731+ return 1;
73732+ }
73733+
73734+ /* only learn the capability use if the process has the capability in the
73735+ general case, the two uses in sys.c of gr_learn_cap are an exception
73736+ to this rule to ensure any role transition involves what the full-learned
73737+ policy believes in a privileged process
73738+ */
73739+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
73740+ return 1;
73741+
73742+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
73743+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
73744+
73745+ return 0;
73746+}
73747+
73748+int
73749+gr_acl_is_capable(const int cap)
73750+{
73751+ return gr_task_acl_is_capable(current, current_cred(), cap);
73752+}
73753+
73754+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
73755+{
73756+ struct acl_subject_label *curracl;
73757+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73758+
73759+ if (!gr_acl_is_enabled())
73760+ return 1;
73761+
73762+ curracl = task->acl;
73763+
73764+ cap_drop = curracl->cap_lower;
73765+ cap_mask = curracl->cap_mask;
73766+
73767+ while ((curracl = curracl->parent_subject)) {
73768+ /* if the cap isn't specified in the current computed mask but is specified in the
73769+ current level subject, and is lowered in the current level subject, then add
73770+ it to the set of dropped capabilities
73771+ otherwise, add the current level subject's mask to the current computed mask
73772+ */
73773+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73774+ cap_raise(cap_mask, cap);
73775+ if (cap_raised(curracl->cap_lower, cap))
73776+ cap_raise(cap_drop, cap);
73777+ }
73778+ }
73779+
73780+ if (!cap_raised(cap_drop, cap))
73781+ return 1;
73782+
73783+ return 0;
73784+}
73785+
73786+int
73787+gr_acl_is_capable_nolog(const int cap)
73788+{
73789+ return gr_task_acl_is_capable_nolog(current, cap);
73790+}
73791+
73792diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
73793new file mode 100644
73794index 0000000..a43dd06
73795--- /dev/null
73796+++ b/grsecurity/gracl_compat.c
73797@@ -0,0 +1,269 @@
73798+#include <linux/kernel.h>
73799+#include <linux/gracl.h>
73800+#include <linux/compat.h>
73801+#include <linux/gracl_compat.h>
73802+
73803+#include <asm/uaccess.h>
73804+
73805+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
73806+{
73807+ struct gr_arg_wrapper_compat uwrapcompat;
73808+
73809+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
73810+ return -EFAULT;
73811+
73812+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
73813+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
73814+ return -EINVAL;
73815+
73816+ uwrap->arg = compat_ptr(uwrapcompat.arg);
73817+ uwrap->version = uwrapcompat.version;
73818+ uwrap->size = sizeof(struct gr_arg);
73819+
73820+ return 0;
73821+}
73822+
73823+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
73824+{
73825+ struct gr_arg_compat argcompat;
73826+
73827+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
73828+ return -EFAULT;
73829+
73830+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
73831+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
73832+ arg->role_db.num_roles = argcompat.role_db.num_roles;
73833+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
73834+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
73835+ arg->role_db.num_objects = argcompat.role_db.num_objects;
73836+
73837+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
73838+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
73839+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
73840+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
73841+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
73842+ arg->segv_device = argcompat.segv_device;
73843+ arg->segv_inode = argcompat.segv_inode;
73844+ arg->segv_uid = argcompat.segv_uid;
73845+ arg->num_sprole_pws = argcompat.num_sprole_pws;
73846+ arg->mode = argcompat.mode;
73847+
73848+ return 0;
73849+}
73850+
73851+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
73852+{
73853+ struct acl_object_label_compat objcompat;
73854+
73855+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
73856+ return -EFAULT;
73857+
73858+ obj->filename = compat_ptr(objcompat.filename);
73859+ obj->inode = objcompat.inode;
73860+ obj->device = objcompat.device;
73861+ obj->mode = objcompat.mode;
73862+
73863+ obj->nested = compat_ptr(objcompat.nested);
73864+ obj->globbed = compat_ptr(objcompat.globbed);
73865+
73866+ obj->prev = compat_ptr(objcompat.prev);
73867+ obj->next = compat_ptr(objcompat.next);
73868+
73869+ return 0;
73870+}
73871+
73872+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73873+{
73874+ unsigned int i;
73875+ struct acl_subject_label_compat subjcompat;
73876+
73877+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
73878+ return -EFAULT;
73879+
73880+ subj->filename = compat_ptr(subjcompat.filename);
73881+ subj->inode = subjcompat.inode;
73882+ subj->device = subjcompat.device;
73883+ subj->mode = subjcompat.mode;
73884+ subj->cap_mask = subjcompat.cap_mask;
73885+ subj->cap_lower = subjcompat.cap_lower;
73886+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
73887+
73888+ for (i = 0; i < GR_NLIMITS; i++) {
73889+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
73890+ subj->res[i].rlim_cur = RLIM_INFINITY;
73891+ else
73892+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
73893+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
73894+ subj->res[i].rlim_max = RLIM_INFINITY;
73895+ else
73896+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
73897+ }
73898+ subj->resmask = subjcompat.resmask;
73899+
73900+ subj->user_trans_type = subjcompat.user_trans_type;
73901+ subj->group_trans_type = subjcompat.group_trans_type;
73902+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
73903+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
73904+ subj->user_trans_num = subjcompat.user_trans_num;
73905+ subj->group_trans_num = subjcompat.group_trans_num;
73906+
73907+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
73908+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
73909+ subj->ip_type = subjcompat.ip_type;
73910+ subj->ips = compat_ptr(subjcompat.ips);
73911+ subj->ip_num = subjcompat.ip_num;
73912+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
73913+
73914+ subj->crashes = subjcompat.crashes;
73915+ subj->expires = subjcompat.expires;
73916+
73917+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
73918+ subj->hash = compat_ptr(subjcompat.hash);
73919+ subj->prev = compat_ptr(subjcompat.prev);
73920+ subj->next = compat_ptr(subjcompat.next);
73921+
73922+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
73923+ subj->obj_hash_size = subjcompat.obj_hash_size;
73924+ subj->pax_flags = subjcompat.pax_flags;
73925+
73926+ return 0;
73927+}
73928+
73929+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
73930+{
73931+ struct acl_role_label_compat rolecompat;
73932+
73933+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
73934+ return -EFAULT;
73935+
73936+ role->rolename = compat_ptr(rolecompat.rolename);
73937+ role->uidgid = rolecompat.uidgid;
73938+ role->roletype = rolecompat.roletype;
73939+
73940+ role->auth_attempts = rolecompat.auth_attempts;
73941+ role->expires = rolecompat.expires;
73942+
73943+ role->root_label = compat_ptr(rolecompat.root_label);
73944+ role->hash = compat_ptr(rolecompat.hash);
73945+
73946+ role->prev = compat_ptr(rolecompat.prev);
73947+ role->next = compat_ptr(rolecompat.next);
73948+
73949+ role->transitions = compat_ptr(rolecompat.transitions);
73950+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
73951+ role->domain_children = compat_ptr(rolecompat.domain_children);
73952+ role->domain_child_num = rolecompat.domain_child_num;
73953+
73954+ role->umask = rolecompat.umask;
73955+
73956+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
73957+ role->subj_hash_size = rolecompat.subj_hash_size;
73958+
73959+ return 0;
73960+}
73961+
73962+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73963+{
73964+ struct role_allowed_ip_compat roleip_compat;
73965+
73966+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
73967+ return -EFAULT;
73968+
73969+ roleip->addr = roleip_compat.addr;
73970+ roleip->netmask = roleip_compat.netmask;
73971+
73972+ roleip->prev = compat_ptr(roleip_compat.prev);
73973+ roleip->next = compat_ptr(roleip_compat.next);
73974+
73975+ return 0;
73976+}
73977+
73978+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
73979+{
73980+ struct role_transition_compat trans_compat;
73981+
73982+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
73983+ return -EFAULT;
73984+
73985+ trans->rolename = compat_ptr(trans_compat.rolename);
73986+
73987+ trans->prev = compat_ptr(trans_compat.prev);
73988+ trans->next = compat_ptr(trans_compat.next);
73989+
73990+ return 0;
73991+
73992+}
73993+
73994+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73995+{
73996+ struct gr_hash_struct_compat hash_compat;
73997+
73998+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
73999+ return -EFAULT;
74000+
74001+ hash->table = compat_ptr(hash_compat.table);
74002+ hash->nametable = compat_ptr(hash_compat.nametable);
74003+ hash->first = compat_ptr(hash_compat.first);
74004+
74005+ hash->table_size = hash_compat.table_size;
74006+ hash->used_size = hash_compat.used_size;
74007+
74008+ hash->type = hash_compat.type;
74009+
74010+ return 0;
74011+}
74012+
74013+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
74014+{
74015+ compat_uptr_t ptrcompat;
74016+
74017+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
74018+ return -EFAULT;
74019+
74020+ *(void **)ptr = compat_ptr(ptrcompat);
74021+
74022+ return 0;
74023+}
74024+
74025+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
74026+{
74027+ struct acl_ip_label_compat ip_compat;
74028+
74029+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
74030+ return -EFAULT;
74031+
74032+ ip->iface = compat_ptr(ip_compat.iface);
74033+ ip->addr = ip_compat.addr;
74034+ ip->netmask = ip_compat.netmask;
74035+ ip->low = ip_compat.low;
74036+ ip->high = ip_compat.high;
74037+ ip->mode = ip_compat.mode;
74038+ ip->type = ip_compat.type;
74039+
74040+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
74041+
74042+ ip->prev = compat_ptr(ip_compat.prev);
74043+ ip->next = compat_ptr(ip_compat.next);
74044+
74045+ return 0;
74046+}
74047+
74048+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
74049+{
74050+ struct sprole_pw_compat pw_compat;
74051+
74052+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
74053+ return -EFAULT;
74054+
74055+ pw->rolename = compat_ptr(pw_compat.rolename);
74056+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
74057+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
74058+
74059+ return 0;
74060+}
74061+
74062+size_t get_gr_arg_wrapper_size_compat(void)
74063+{
74064+ return sizeof(struct gr_arg_wrapper_compat);
74065+}
74066+
74067diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
74068new file mode 100644
74069index 0000000..8ee8e4f
74070--- /dev/null
74071+++ b/grsecurity/gracl_fs.c
74072@@ -0,0 +1,447 @@
74073+#include <linux/kernel.h>
74074+#include <linux/sched.h>
74075+#include <linux/types.h>
74076+#include <linux/fs.h>
74077+#include <linux/file.h>
74078+#include <linux/stat.h>
74079+#include <linux/grsecurity.h>
74080+#include <linux/grinternal.h>
74081+#include <linux/gracl.h>
74082+
74083+umode_t
74084+gr_acl_umask(void)
74085+{
74086+ if (unlikely(!gr_acl_is_enabled()))
74087+ return 0;
74088+
74089+ return current->role->umask;
74090+}
74091+
74092+__u32
74093+gr_acl_handle_hidden_file(const struct dentry * dentry,
74094+ const struct vfsmount * mnt)
74095+{
74096+ __u32 mode;
74097+
74098+ if (unlikely(d_is_negative(dentry)))
74099+ return GR_FIND;
74100+
74101+ mode =
74102+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
74103+
74104+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
74105+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
74106+ return mode;
74107+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
74108+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
74109+ return 0;
74110+ } else if (unlikely(!(mode & GR_FIND)))
74111+ return 0;
74112+
74113+ return GR_FIND;
74114+}
74115+
74116+__u32
74117+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
74118+ int acc_mode)
74119+{
74120+ __u32 reqmode = GR_FIND;
74121+ __u32 mode;
74122+
74123+ if (unlikely(d_is_negative(dentry)))
74124+ return reqmode;
74125+
74126+ if (acc_mode & MAY_APPEND)
74127+ reqmode |= GR_APPEND;
74128+ else if (acc_mode & MAY_WRITE)
74129+ reqmode |= GR_WRITE;
74130+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
74131+ reqmode |= GR_READ;
74132+
74133+ mode =
74134+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
74135+ mnt);
74136+
74137+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
74138+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
74139+ reqmode & GR_READ ? " reading" : "",
74140+ reqmode & GR_WRITE ? " writing" : reqmode &
74141+ GR_APPEND ? " appending" : "");
74142+ return reqmode;
74143+ } else
74144+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
74145+ {
74146+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
74147+ reqmode & GR_READ ? " reading" : "",
74148+ reqmode & GR_WRITE ? " writing" : reqmode &
74149+ GR_APPEND ? " appending" : "");
74150+ return 0;
74151+ } else if (unlikely((mode & reqmode) != reqmode))
74152+ return 0;
74153+
74154+ return reqmode;
74155+}
74156+
74157+__u32
74158+gr_acl_handle_creat(const struct dentry * dentry,
74159+ const struct dentry * p_dentry,
74160+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
74161+ const int imode)
74162+{
74163+ __u32 reqmode = GR_WRITE | GR_CREATE;
74164+ __u32 mode;
74165+
74166+ if (acc_mode & MAY_APPEND)
74167+ reqmode |= GR_APPEND;
74168+ // if a directory was required or the directory already exists, then
74169+ // don't count this open as a read
74170+ if ((acc_mode & MAY_READ) &&
74171+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
74172+ reqmode |= GR_READ;
74173+ if ((open_flags & O_CREAT) &&
74174+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
74175+ reqmode |= GR_SETID;
74176+
74177+ mode =
74178+ gr_check_create(dentry, p_dentry, p_mnt,
74179+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
74180+
74181+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
74182+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
74183+ reqmode & GR_READ ? " reading" : "",
74184+ reqmode & GR_WRITE ? " writing" : reqmode &
74185+ GR_APPEND ? " appending" : "");
74186+ return reqmode;
74187+ } else
74188+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
74189+ {
74190+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
74191+ reqmode & GR_READ ? " reading" : "",
74192+ reqmode & GR_WRITE ? " writing" : reqmode &
74193+ GR_APPEND ? " appending" : "");
74194+ return 0;
74195+ } else if (unlikely((mode & reqmode) != reqmode))
74196+ return 0;
74197+
74198+ return reqmode;
74199+}
74200+
74201+__u32
74202+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
74203+ const int fmode)
74204+{
74205+ __u32 mode, reqmode = GR_FIND;
74206+
74207+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
74208+ reqmode |= GR_EXEC;
74209+ if (fmode & S_IWOTH)
74210+ reqmode |= GR_WRITE;
74211+ if (fmode & S_IROTH)
74212+ reqmode |= GR_READ;
74213+
74214+ mode =
74215+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
74216+ mnt);
74217+
74218+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
74219+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
74220+ reqmode & GR_READ ? " reading" : "",
74221+ reqmode & GR_WRITE ? " writing" : "",
74222+ reqmode & GR_EXEC ? " executing" : "");
74223+ return reqmode;
74224+ } else
74225+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
74226+ {
74227+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
74228+ reqmode & GR_READ ? " reading" : "",
74229+ reqmode & GR_WRITE ? " writing" : "",
74230+ reqmode & GR_EXEC ? " executing" : "");
74231+ return 0;
74232+ } else if (unlikely((mode & reqmode) != reqmode))
74233+ return 0;
74234+
74235+ return reqmode;
74236+}
74237+
74238+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
74239+{
74240+ __u32 mode;
74241+
74242+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
74243+
74244+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
74245+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
74246+ return mode;
74247+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
74248+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
74249+ return 0;
74250+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
74251+ return 0;
74252+
74253+ return (reqmode);
74254+}
74255+
74256+__u32
74257+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
74258+{
74259+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
74260+}
74261+
74262+__u32
74263+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
74264+{
74265+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
74266+}
74267+
74268+__u32
74269+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
74270+{
74271+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
74272+}
74273+
74274+__u32
74275+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
74276+{
74277+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
74278+}
74279+
74280+__u32
74281+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
74282+ umode_t *modeptr)
74283+{
74284+ umode_t mode;
74285+
74286+ *modeptr &= ~gr_acl_umask();
74287+ mode = *modeptr;
74288+
74289+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
74290+ return 1;
74291+
74292+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
74293+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
74294+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
74295+ GR_CHMOD_ACL_MSG);
74296+ } else {
74297+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
74298+ }
74299+}
74300+
74301+__u32
74302+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
74303+{
74304+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
74305+}
74306+
74307+__u32
74308+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
74309+{
74310+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
74311+}
74312+
74313+__u32
74314+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
74315+{
74316+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
74317+}
74318+
74319+__u32
74320+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
74321+{
74322+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
74323+}
74324+
74325+__u32
74326+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
74327+{
74328+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
74329+ GR_UNIXCONNECT_ACL_MSG);
74330+}
74331+
74332+/* hardlinks require at minimum create and link permission,
74333+ any additional privilege required is based on the
74334+ privilege of the file being linked to
74335+*/
74336+__u32
74337+gr_acl_handle_link(const struct dentry * new_dentry,
74338+ const struct dentry * parent_dentry,
74339+ const struct vfsmount * parent_mnt,
74340+ const struct dentry * old_dentry,
74341+ const struct vfsmount * old_mnt, const struct filename *to)
74342+{
74343+ __u32 mode;
74344+ __u32 needmode = GR_CREATE | GR_LINK;
74345+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
74346+
74347+ mode =
74348+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
74349+ old_mnt);
74350+
74351+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
74352+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
74353+ return mode;
74354+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
74355+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
74356+ return 0;
74357+ } else if (unlikely((mode & needmode) != needmode))
74358+ return 0;
74359+
74360+ return 1;
74361+}
74362+
74363+__u32
74364+gr_acl_handle_symlink(const struct dentry * new_dentry,
74365+ const struct dentry * parent_dentry,
74366+ const struct vfsmount * parent_mnt, const struct filename *from)
74367+{
74368+ __u32 needmode = GR_WRITE | GR_CREATE;
74369+ __u32 mode;
74370+
74371+ mode =
74372+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
74373+ GR_CREATE | GR_AUDIT_CREATE |
74374+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
74375+
74376+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
74377+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
74378+ return mode;
74379+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
74380+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
74381+ return 0;
74382+ } else if (unlikely((mode & needmode) != needmode))
74383+ return 0;
74384+
74385+ return (GR_WRITE | GR_CREATE);
74386+}
74387+
74388+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
74389+{
74390+ __u32 mode;
74391+
74392+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
74393+
74394+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
74395+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
74396+ return mode;
74397+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
74398+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
74399+ return 0;
74400+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
74401+ return 0;
74402+
74403+ return (reqmode);
74404+}
74405+
74406+__u32
74407+gr_acl_handle_mknod(const struct dentry * new_dentry,
74408+ const struct dentry * parent_dentry,
74409+ const struct vfsmount * parent_mnt,
74410+ const int mode)
74411+{
74412+ __u32 reqmode = GR_WRITE | GR_CREATE;
74413+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
74414+ reqmode |= GR_SETID;
74415+
74416+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
74417+ reqmode, GR_MKNOD_ACL_MSG);
74418+}
74419+
74420+__u32
74421+gr_acl_handle_mkdir(const struct dentry *new_dentry,
74422+ const struct dentry *parent_dentry,
74423+ const struct vfsmount *parent_mnt)
74424+{
74425+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
74426+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
74427+}
74428+
74429+#define RENAME_CHECK_SUCCESS(old, new) \
74430+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
74431+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
74432+
74433+int
74434+gr_acl_handle_rename(struct dentry *new_dentry,
74435+ struct dentry *parent_dentry,
74436+ const struct vfsmount *parent_mnt,
74437+ struct dentry *old_dentry,
74438+ struct inode *old_parent_inode,
74439+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
74440+{
74441+ __u32 comp1, comp2;
74442+ int error = 0;
74443+
74444+ if (unlikely(!gr_acl_is_enabled()))
74445+ return 0;
74446+
74447+ if (flags & RENAME_EXCHANGE) {
74448+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
74449+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74450+ GR_SUPPRESS, parent_mnt);
74451+ comp2 =
74452+ gr_search_file(old_dentry,
74453+ GR_READ | GR_WRITE | GR_AUDIT_READ |
74454+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
74455+ } else if (d_is_negative(new_dentry)) {
74456+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
74457+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
74458+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
74459+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
74460+ GR_DELETE | GR_AUDIT_DELETE |
74461+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74462+ GR_SUPPRESS, old_mnt);
74463+ } else {
74464+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
74465+ GR_CREATE | GR_DELETE |
74466+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
74467+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74468+ GR_SUPPRESS, parent_mnt);
74469+ comp2 =
74470+ gr_search_file(old_dentry,
74471+ GR_READ | GR_WRITE | GR_AUDIT_READ |
74472+ GR_DELETE | GR_AUDIT_DELETE |
74473+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
74474+ }
74475+
74476+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
74477+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
74478+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
74479+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
74480+ && !(comp2 & GR_SUPPRESS)) {
74481+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
74482+ error = -EACCES;
74483+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
74484+ error = -EACCES;
74485+
74486+ return error;
74487+}
74488+
74489+void
74490+gr_acl_handle_exit(void)
74491+{
74492+ u16 id;
74493+ char *rolename;
74494+
74495+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
74496+ !(current->role->roletype & GR_ROLE_PERSIST))) {
74497+ id = current->acl_role_id;
74498+ rolename = current->role->rolename;
74499+ gr_set_acls(1);
74500+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
74501+ }
74502+
74503+ gr_put_exec_file(current);
74504+ return;
74505+}
74506+
74507+int
74508+gr_acl_handle_procpidmem(const struct task_struct *task)
74509+{
74510+ if (unlikely(!gr_acl_is_enabled()))
74511+ return 0;
74512+
74513+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
74514+ !(current->acl->mode & GR_POVERRIDE) &&
74515+ !(current->role->roletype & GR_ROLE_GOD))
74516+ return -EACCES;
74517+
74518+ return 0;
74519+}
74520diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
74521new file mode 100644
74522index 0000000..f056b81
74523--- /dev/null
74524+++ b/grsecurity/gracl_ip.c
74525@@ -0,0 +1,386 @@
74526+#include <linux/kernel.h>
74527+#include <asm/uaccess.h>
74528+#include <asm/errno.h>
74529+#include <net/sock.h>
74530+#include <linux/file.h>
74531+#include <linux/fs.h>
74532+#include <linux/net.h>
74533+#include <linux/in.h>
74534+#include <linux/skbuff.h>
74535+#include <linux/ip.h>
74536+#include <linux/udp.h>
74537+#include <linux/types.h>
74538+#include <linux/sched.h>
74539+#include <linux/netdevice.h>
74540+#include <linux/inetdevice.h>
74541+#include <linux/gracl.h>
74542+#include <linux/grsecurity.h>
74543+#include <linux/grinternal.h>
74544+
74545+#define GR_BIND 0x01
74546+#define GR_CONNECT 0x02
74547+#define GR_INVERT 0x04
74548+#define GR_BINDOVERRIDE 0x08
74549+#define GR_CONNECTOVERRIDE 0x10
74550+#define GR_SOCK_FAMILY 0x20
74551+
74552+static const char * gr_protocols[IPPROTO_MAX] = {
74553+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
74554+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
74555+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
74556+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
74557+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
74558+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
74559+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
74560+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
74561+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
74562+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
74563+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
74564+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
74565+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
74566+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
74567+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
74568+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
74569+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
74570+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
74571+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
74572+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
74573+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
74574+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
74575+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
74576+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
74577+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
74578+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
74579+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
74580+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
74581+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
74582+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
74583+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
74584+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
74585+ };
74586+
74587+static const char * gr_socktypes[SOCK_MAX] = {
74588+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
74589+ "unknown:7", "unknown:8", "unknown:9", "packet"
74590+ };
74591+
74592+static const char * gr_sockfamilies[AF_MAX+1] = {
74593+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
74594+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
74595+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
74596+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
74597+ };
74598+
74599+const char *
74600+gr_proto_to_name(unsigned char proto)
74601+{
74602+ return gr_protocols[proto];
74603+}
74604+
74605+const char *
74606+gr_socktype_to_name(unsigned char type)
74607+{
74608+ return gr_socktypes[type];
74609+}
74610+
74611+const char *
74612+gr_sockfamily_to_name(unsigned char family)
74613+{
74614+ return gr_sockfamilies[family];
74615+}
74616+
74617+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
74618+
74619+int
74620+gr_search_socket(const int domain, const int type, const int protocol)
74621+{
74622+ struct acl_subject_label *curr;
74623+ const struct cred *cred = current_cred();
74624+
74625+ if (unlikely(!gr_acl_is_enabled()))
74626+ goto exit;
74627+
74628+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
74629+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
74630+ goto exit; // let the kernel handle it
74631+
74632+ curr = current->acl;
74633+
74634+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
74635+ /* the family is allowed, if this is PF_INET allow it only if
74636+ the extra sock type/protocol checks pass */
74637+ if (domain == PF_INET)
74638+ goto inet_check;
74639+ goto exit;
74640+ } else {
74641+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74642+ __u32 fakeip = 0;
74643+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74644+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74645+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74646+ gr_to_filename(current->exec_file->f_path.dentry,
74647+ current->exec_file->f_path.mnt) :
74648+ curr->filename, curr->filename,
74649+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
74650+ &current->signal->saved_ip);
74651+ goto exit;
74652+ }
74653+ goto exit_fail;
74654+ }
74655+
74656+inet_check:
74657+ /* the rest of this checking is for IPv4 only */
74658+ if (!curr->ips)
74659+ goto exit;
74660+
74661+ if ((curr->ip_type & (1U << type)) &&
74662+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
74663+ goto exit;
74664+
74665+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74666+ /* we don't place acls on raw sockets , and sometimes
74667+ dgram/ip sockets are opened for ioctl and not
74668+ bind/connect, so we'll fake a bind learn log */
74669+ if (type == SOCK_RAW || type == SOCK_PACKET) {
74670+ __u32 fakeip = 0;
74671+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74672+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74673+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74674+ gr_to_filename(current->exec_file->f_path.dentry,
74675+ current->exec_file->f_path.mnt) :
74676+ curr->filename, curr->filename,
74677+ &fakeip, 0, type,
74678+ protocol, GR_CONNECT, &current->signal->saved_ip);
74679+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
74680+ __u32 fakeip = 0;
74681+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74682+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74683+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74684+ gr_to_filename(current->exec_file->f_path.dentry,
74685+ current->exec_file->f_path.mnt) :
74686+ curr->filename, curr->filename,
74687+ &fakeip, 0, type,
74688+ protocol, GR_BIND, &current->signal->saved_ip);
74689+ }
74690+ /* we'll log when they use connect or bind */
74691+ goto exit;
74692+ }
74693+
74694+exit_fail:
74695+ if (domain == PF_INET)
74696+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
74697+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
74698+ else if (rcu_access_pointer(net_families[domain]) != NULL)
74699+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
74700+ gr_socktype_to_name(type), protocol);
74701+
74702+ return 0;
74703+exit:
74704+ return 1;
74705+}
74706+
74707+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
74708+{
74709+ if ((ip->mode & mode) &&
74710+ (ip_port >= ip->low) &&
74711+ (ip_port <= ip->high) &&
74712+ ((ntohl(ip_addr) & our_netmask) ==
74713+ (ntohl(our_addr) & our_netmask))
74714+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
74715+ && (ip->type & (1U << type))) {
74716+ if (ip->mode & GR_INVERT)
74717+ return 2; // specifically denied
74718+ else
74719+ return 1; // allowed
74720+ }
74721+
74722+ return 0; // not specifically allowed, may continue parsing
74723+}
74724+
74725+static int
74726+gr_search_connectbind(const int full_mode, struct sock *sk,
74727+ struct sockaddr_in *addr, const int type)
74728+{
74729+ char iface[IFNAMSIZ] = {0};
74730+ struct acl_subject_label *curr;
74731+ struct acl_ip_label *ip;
74732+ struct inet_sock *isk;
74733+ struct net_device *dev;
74734+ struct in_device *idev;
74735+ unsigned long i;
74736+ int ret;
74737+ int mode = full_mode & (GR_BIND | GR_CONNECT);
74738+ __u32 ip_addr = 0;
74739+ __u32 our_addr;
74740+ __u32 our_netmask;
74741+ char *p;
74742+ __u16 ip_port = 0;
74743+ const struct cred *cred = current_cred();
74744+
74745+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
74746+ return 0;
74747+
74748+ curr = current->acl;
74749+ isk = inet_sk(sk);
74750+
74751+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
74752+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
74753+ addr->sin_addr.s_addr = curr->inaddr_any_override;
74754+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
74755+ struct sockaddr_in saddr;
74756+ int err;
74757+
74758+ saddr.sin_family = AF_INET;
74759+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
74760+ saddr.sin_port = isk->inet_sport;
74761+
74762+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
74763+ if (err)
74764+ return err;
74765+
74766+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
74767+ if (err)
74768+ return err;
74769+ }
74770+
74771+ if (!curr->ips)
74772+ return 0;
74773+
74774+ ip_addr = addr->sin_addr.s_addr;
74775+ ip_port = ntohs(addr->sin_port);
74776+
74777+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74778+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74779+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74780+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74781+ gr_to_filename(current->exec_file->f_path.dentry,
74782+ current->exec_file->f_path.mnt) :
74783+ curr->filename, curr->filename,
74784+ &ip_addr, ip_port, type,
74785+ sk->sk_protocol, mode, &current->signal->saved_ip);
74786+ return 0;
74787+ }
74788+
74789+ for (i = 0; i < curr->ip_num; i++) {
74790+ ip = *(curr->ips + i);
74791+ if (ip->iface != NULL) {
74792+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
74793+ p = strchr(iface, ':');
74794+ if (p != NULL)
74795+ *p = '\0';
74796+ dev = dev_get_by_name(sock_net(sk), iface);
74797+ if (dev == NULL)
74798+ continue;
74799+ idev = in_dev_get(dev);
74800+ if (idev == NULL) {
74801+ dev_put(dev);
74802+ continue;
74803+ }
74804+ rcu_read_lock();
74805+ for_ifa(idev) {
74806+ if (!strcmp(ip->iface, ifa->ifa_label)) {
74807+ our_addr = ifa->ifa_address;
74808+ our_netmask = 0xffffffff;
74809+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
74810+ if (ret == 1) {
74811+ rcu_read_unlock();
74812+ in_dev_put(idev);
74813+ dev_put(dev);
74814+ return 0;
74815+ } else if (ret == 2) {
74816+ rcu_read_unlock();
74817+ in_dev_put(idev);
74818+ dev_put(dev);
74819+ goto denied;
74820+ }
74821+ }
74822+ } endfor_ifa(idev);
74823+ rcu_read_unlock();
74824+ in_dev_put(idev);
74825+ dev_put(dev);
74826+ } else {
74827+ our_addr = ip->addr;
74828+ our_netmask = ip->netmask;
74829+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
74830+ if (ret == 1)
74831+ return 0;
74832+ else if (ret == 2)
74833+ goto denied;
74834+ }
74835+ }
74836+
74837+denied:
74838+ if (mode == GR_BIND)
74839+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
74840+ else if (mode == GR_CONNECT)
74841+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
74842+
74843+ return -EACCES;
74844+}
74845+
74846+int
74847+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
74848+{
74849+ /* always allow disconnection of dgram sockets with connect */
74850+ if (addr->sin_family == AF_UNSPEC)
74851+ return 0;
74852+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
74853+}
74854+
74855+int
74856+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
74857+{
74858+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
74859+}
74860+
74861+int gr_search_listen(struct socket *sock)
74862+{
74863+ struct sock *sk = sock->sk;
74864+ struct sockaddr_in addr;
74865+
74866+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
74867+ addr.sin_port = inet_sk(sk)->inet_sport;
74868+
74869+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
74870+}
74871+
74872+int gr_search_accept(struct socket *sock)
74873+{
74874+ struct sock *sk = sock->sk;
74875+ struct sockaddr_in addr;
74876+
74877+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
74878+ addr.sin_port = inet_sk(sk)->inet_sport;
74879+
74880+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
74881+}
74882+
74883+int
74884+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
74885+{
74886+ if (addr)
74887+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
74888+ else {
74889+ struct sockaddr_in sin;
74890+ const struct inet_sock *inet = inet_sk(sk);
74891+
74892+ sin.sin_addr.s_addr = inet->inet_daddr;
74893+ sin.sin_port = inet->inet_dport;
74894+
74895+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
74896+ }
74897+}
74898+
74899+int
74900+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
74901+{
74902+ struct sockaddr_in sin;
74903+
74904+ if (unlikely(skb->len < sizeof (struct udphdr)))
74905+ return 0; // skip this packet
74906+
74907+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
74908+ sin.sin_port = udp_hdr(skb)->source;
74909+
74910+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
74911+}
74912diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
74913new file mode 100644
74914index 0000000..25f54ef
74915--- /dev/null
74916+++ b/grsecurity/gracl_learn.c
74917@@ -0,0 +1,207 @@
74918+#include <linux/kernel.h>
74919+#include <linux/mm.h>
74920+#include <linux/sched.h>
74921+#include <linux/poll.h>
74922+#include <linux/string.h>
74923+#include <linux/file.h>
74924+#include <linux/types.h>
74925+#include <linux/vmalloc.h>
74926+#include <linux/grinternal.h>
74927+
74928+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
74929+ size_t count, loff_t *ppos);
74930+extern int gr_acl_is_enabled(void);
74931+
74932+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
74933+static int gr_learn_attached;
74934+
74935+/* use a 512k buffer */
74936+#define LEARN_BUFFER_SIZE (512 * 1024)
74937+
74938+static DEFINE_SPINLOCK(gr_learn_lock);
74939+static DEFINE_MUTEX(gr_learn_user_mutex);
74940+
74941+/* we need to maintain two buffers, so that the kernel context of grlearn
74942+ uses a semaphore around the userspace copying, and the other kernel contexts
74943+ use a spinlock when copying into the buffer, since they cannot sleep
74944+*/
74945+static char *learn_buffer;
74946+static char *learn_buffer_user;
74947+static int learn_buffer_len;
74948+static int learn_buffer_user_len;
74949+
74950+static ssize_t
74951+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
74952+{
74953+ DECLARE_WAITQUEUE(wait, current);
74954+ ssize_t retval = 0;
74955+
74956+ add_wait_queue(&learn_wait, &wait);
74957+ set_current_state(TASK_INTERRUPTIBLE);
74958+ do {
74959+ mutex_lock(&gr_learn_user_mutex);
74960+ spin_lock(&gr_learn_lock);
74961+ if (learn_buffer_len)
74962+ break;
74963+ spin_unlock(&gr_learn_lock);
74964+ mutex_unlock(&gr_learn_user_mutex);
74965+ if (file->f_flags & O_NONBLOCK) {
74966+ retval = -EAGAIN;
74967+ goto out;
74968+ }
74969+ if (signal_pending(current)) {
74970+ retval = -ERESTARTSYS;
74971+ goto out;
74972+ }
74973+
74974+ schedule();
74975+ } while (1);
74976+
74977+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
74978+ learn_buffer_user_len = learn_buffer_len;
74979+ retval = learn_buffer_len;
74980+ learn_buffer_len = 0;
74981+
74982+ spin_unlock(&gr_learn_lock);
74983+
74984+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
74985+ retval = -EFAULT;
74986+
74987+ mutex_unlock(&gr_learn_user_mutex);
74988+out:
74989+ set_current_state(TASK_RUNNING);
74990+ remove_wait_queue(&learn_wait, &wait);
74991+ return retval;
74992+}
74993+
74994+static unsigned int
74995+poll_learn(struct file * file, poll_table * wait)
74996+{
74997+ poll_wait(file, &learn_wait, wait);
74998+
74999+ if (learn_buffer_len)
75000+ return (POLLIN | POLLRDNORM);
75001+
75002+ return 0;
75003+}
75004+
75005+void
75006+gr_clear_learn_entries(void)
75007+{
75008+ char *tmp;
75009+
75010+ mutex_lock(&gr_learn_user_mutex);
75011+ spin_lock(&gr_learn_lock);
75012+ tmp = learn_buffer;
75013+ learn_buffer = NULL;
75014+ spin_unlock(&gr_learn_lock);
75015+ if (tmp)
75016+ vfree(tmp);
75017+ if (learn_buffer_user != NULL) {
75018+ vfree(learn_buffer_user);
75019+ learn_buffer_user = NULL;
75020+ }
75021+ learn_buffer_len = 0;
75022+ mutex_unlock(&gr_learn_user_mutex);
75023+
75024+ return;
75025+}
75026+
75027+void
75028+gr_add_learn_entry(const char *fmt, ...)
75029+{
75030+ va_list args;
75031+ unsigned int len;
75032+
75033+ if (!gr_learn_attached)
75034+ return;
75035+
75036+ spin_lock(&gr_learn_lock);
75037+
75038+ /* leave a gap at the end so we know when it's "full" but don't have to
75039+ compute the exact length of the string we're trying to append
75040+ */
75041+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
75042+ spin_unlock(&gr_learn_lock);
75043+ wake_up_interruptible(&learn_wait);
75044+ return;
75045+ }
75046+ if (learn_buffer == NULL) {
75047+ spin_unlock(&gr_learn_lock);
75048+ return;
75049+ }
75050+
75051+ va_start(args, fmt);
75052+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
75053+ va_end(args);
75054+
75055+ learn_buffer_len += len + 1;
75056+
75057+ spin_unlock(&gr_learn_lock);
75058+ wake_up_interruptible(&learn_wait);
75059+
75060+ return;
75061+}
75062+
75063+static int
75064+open_learn(struct inode *inode, struct file *file)
75065+{
75066+ if (file->f_mode & FMODE_READ && gr_learn_attached)
75067+ return -EBUSY;
75068+ if (file->f_mode & FMODE_READ) {
75069+ int retval = 0;
75070+ mutex_lock(&gr_learn_user_mutex);
75071+ if (learn_buffer == NULL)
75072+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
75073+ if (learn_buffer_user == NULL)
75074+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
75075+ if (learn_buffer == NULL) {
75076+ retval = -ENOMEM;
75077+ goto out_error;
75078+ }
75079+ if (learn_buffer_user == NULL) {
75080+ retval = -ENOMEM;
75081+ goto out_error;
75082+ }
75083+ learn_buffer_len = 0;
75084+ learn_buffer_user_len = 0;
75085+ gr_learn_attached = 1;
75086+out_error:
75087+ mutex_unlock(&gr_learn_user_mutex);
75088+ return retval;
75089+ }
75090+ return 0;
75091+}
75092+
75093+static int
75094+close_learn(struct inode *inode, struct file *file)
75095+{
75096+ if (file->f_mode & FMODE_READ) {
75097+ char *tmp = NULL;
75098+ mutex_lock(&gr_learn_user_mutex);
75099+ spin_lock(&gr_learn_lock);
75100+ tmp = learn_buffer;
75101+ learn_buffer = NULL;
75102+ spin_unlock(&gr_learn_lock);
75103+ if (tmp)
75104+ vfree(tmp);
75105+ if (learn_buffer_user != NULL) {
75106+ vfree(learn_buffer_user);
75107+ learn_buffer_user = NULL;
75108+ }
75109+ learn_buffer_len = 0;
75110+ learn_buffer_user_len = 0;
75111+ gr_learn_attached = 0;
75112+ mutex_unlock(&gr_learn_user_mutex);
75113+ }
75114+
75115+ return 0;
75116+}
75117+
75118+const struct file_operations grsec_fops = {
75119+ .read = read_learn,
75120+ .write = write_grsec_handler,
75121+ .open = open_learn,
75122+ .release = close_learn,
75123+ .poll = poll_learn,
75124+};
75125diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
75126new file mode 100644
75127index 0000000..fd26052
75128--- /dev/null
75129+++ b/grsecurity/gracl_policy.c
75130@@ -0,0 +1,1781 @@
75131+#include <linux/kernel.h>
75132+#include <linux/module.h>
75133+#include <linux/sched.h>
75134+#include <linux/mm.h>
75135+#include <linux/file.h>
75136+#include <linux/fs.h>
75137+#include <linux/namei.h>
75138+#include <linux/mount.h>
75139+#include <linux/tty.h>
75140+#include <linux/proc_fs.h>
75141+#include <linux/lglock.h>
75142+#include <linux/slab.h>
75143+#include <linux/vmalloc.h>
75144+#include <linux/types.h>
75145+#include <linux/sysctl.h>
75146+#include <linux/netdevice.h>
75147+#include <linux/ptrace.h>
75148+#include <linux/gracl.h>
75149+#include <linux/gralloc.h>
75150+#include <linux/security.h>
75151+#include <linux/grinternal.h>
75152+#include <linux/pid_namespace.h>
75153+#include <linux/stop_machine.h>
75154+#include <linux/fdtable.h>
75155+#include <linux/percpu.h>
75156+#include <linux/lglock.h>
75157+#include <linux/hugetlb.h>
75158+#include <linux/posix-timers.h>
75159+#include "../fs/mount.h"
75160+
75161+#include <asm/uaccess.h>
75162+#include <asm/errno.h>
75163+#include <asm/mman.h>
75164+
75165+extern struct gr_policy_state *polstate;
75166+
75167+#define FOR_EACH_ROLE_START(role) \
75168+ role = polstate->role_list; \
75169+ while (role) {
75170+
75171+#define FOR_EACH_ROLE_END(role) \
75172+ role = role->prev; \
75173+ }
75174+
75175+struct path gr_real_root;
75176+
75177+extern struct gr_alloc_state *current_alloc_state;
75178+
75179+u16 acl_sp_role_value;
75180+
75181+static DEFINE_MUTEX(gr_dev_mutex);
75182+
75183+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
75184+extern void gr_clear_learn_entries(void);
75185+
75186+struct gr_arg *gr_usermode __read_only;
75187+unsigned char *gr_system_salt __read_only;
75188+unsigned char *gr_system_sum __read_only;
75189+
75190+static unsigned int gr_auth_attempts = 0;
75191+static unsigned long gr_auth_expires = 0UL;
75192+
75193+struct acl_object_label *fakefs_obj_rw;
75194+struct acl_object_label *fakefs_obj_rwx;
75195+
75196+extern int gr_init_uidset(void);
75197+extern void gr_free_uidset(void);
75198+extern void gr_remove_uid(uid_t uid);
75199+extern int gr_find_uid(uid_t uid);
75200+
75201+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
75202+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
75203+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
75204+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
75205+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
75206+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
75207+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
75208+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
75209+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
75210+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
75211+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
75212+extern void assign_special_role(const char *rolename);
75213+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
75214+extern int gr_rbac_disable(void *unused);
75215+extern void gr_enable_rbac_system(void);
75216+
75217+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
75218+{
75219+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
75220+ return -EFAULT;
75221+
75222+ return 0;
75223+}
75224+
75225+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
75226+{
75227+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
75228+ return -EFAULT;
75229+
75230+ return 0;
75231+}
75232+
75233+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
75234+{
75235+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
75236+ return -EFAULT;
75237+
75238+ return 0;
75239+}
75240+
75241+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
75242+{
75243+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
75244+ return -EFAULT;
75245+
75246+ return 0;
75247+}
75248+
75249+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
75250+{
75251+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
75252+ return -EFAULT;
75253+
75254+ return 0;
75255+}
75256+
75257+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
75258+{
75259+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
75260+ return -EFAULT;
75261+
75262+ return 0;
75263+}
75264+
75265+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
75266+{
75267+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
75268+ return -EFAULT;
75269+
75270+ return 0;
75271+}
75272+
75273+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
75274+{
75275+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
75276+ return -EFAULT;
75277+
75278+ return 0;
75279+}
75280+
75281+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
75282+{
75283+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
75284+ return -EFAULT;
75285+
75286+ return 0;
75287+}
75288+
75289+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
75290+{
75291+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
75292+ return -EFAULT;
75293+
75294+ if ((uwrap->version != GRSECURITY_VERSION) ||
75295+ (uwrap->size != sizeof(struct gr_arg)))
75296+ return -EINVAL;
75297+
75298+ return 0;
75299+}
75300+
75301+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
75302+{
75303+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
75304+ return -EFAULT;
75305+
75306+ return 0;
75307+}
75308+
75309+static size_t get_gr_arg_wrapper_size_normal(void)
75310+{
75311+ return sizeof(struct gr_arg_wrapper);
75312+}
75313+
75314+#ifdef CONFIG_COMPAT
75315+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
75316+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
75317+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
75318+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
75319+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
75320+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
75321+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
75322+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
75323+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
75324+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
75325+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
75326+extern size_t get_gr_arg_wrapper_size_compat(void);
75327+
75328+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
75329+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
75330+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
75331+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
75332+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
75333+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
75334+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
75335+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
75336+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
75337+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
75338+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
75339+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
75340+
75341+#else
75342+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
75343+#define copy_gr_arg copy_gr_arg_normal
75344+#define copy_gr_hash_struct copy_gr_hash_struct_normal
75345+#define copy_acl_object_label copy_acl_object_label_normal
75346+#define copy_acl_subject_label copy_acl_subject_label_normal
75347+#define copy_acl_role_label copy_acl_role_label_normal
75348+#define copy_acl_ip_label copy_acl_ip_label_normal
75349+#define copy_pointer_from_array copy_pointer_from_array_normal
75350+#define copy_sprole_pw copy_sprole_pw_normal
75351+#define copy_role_transition copy_role_transition_normal
75352+#define copy_role_allowed_ip copy_role_allowed_ip_normal
75353+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
75354+#endif
75355+
75356+static struct acl_subject_label *
75357+lookup_subject_map(const struct acl_subject_label *userp)
75358+{
75359+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
75360+ struct subject_map *match;
75361+
75362+ match = polstate->subj_map_set.s_hash[index];
75363+
75364+ while (match && match->user != userp)
75365+ match = match->next;
75366+
75367+ if (match != NULL)
75368+ return match->kernel;
75369+ else
75370+ return NULL;
75371+}
75372+
75373+static void
75374+insert_subj_map_entry(struct subject_map *subjmap)
75375+{
75376+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
75377+ struct subject_map **curr;
75378+
75379+ subjmap->prev = NULL;
75380+
75381+ curr = &polstate->subj_map_set.s_hash[index];
75382+ if (*curr != NULL)
75383+ (*curr)->prev = subjmap;
75384+
75385+ subjmap->next = *curr;
75386+ *curr = subjmap;
75387+
75388+ return;
75389+}
75390+
75391+static void
75392+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
75393+{
75394+ unsigned int index =
75395+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
75396+ struct acl_role_label **curr;
75397+ struct acl_role_label *tmp, *tmp2;
75398+
75399+ curr = &polstate->acl_role_set.r_hash[index];
75400+
75401+ /* simple case, slot is empty, just set it to our role */
75402+ if (*curr == NULL) {
75403+ *curr = role;
75404+ } else {
75405+ /* example:
75406+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
75407+ 2 -> 3
75408+ */
75409+ /* first check to see if we can already be reached via this slot */
75410+ tmp = *curr;
75411+ while (tmp && tmp != role)
75412+ tmp = tmp->next;
75413+ if (tmp == role) {
75414+ /* we don't need to add ourselves to this slot's chain */
75415+ return;
75416+ }
75417+ /* we need to add ourselves to this chain, two cases */
75418+ if (role->next == NULL) {
75419+ /* simple case, append the current chain to our role */
75420+ role->next = *curr;
75421+ *curr = role;
75422+ } else {
75423+ /* 1 -> 2 -> 3 -> 4
75424+ 2 -> 3 -> 4
75425+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
75426+ */
75427+ /* trickier case: walk our role's chain until we find
75428+ the role for the start of the current slot's chain */
75429+ tmp = role;
75430+ tmp2 = *curr;
75431+ while (tmp->next && tmp->next != tmp2)
75432+ tmp = tmp->next;
75433+ if (tmp->next == tmp2) {
75434+ /* from example above, we found 3, so just
75435+ replace this slot's chain with ours */
75436+ *curr = role;
75437+ } else {
75438+ /* we didn't find a subset of our role's chain
75439+ in the current slot's chain, so append their
75440+ chain to ours, and set us as the first role in
75441+ the slot's chain
75442+
75443+ we could fold this case with the case above,
75444+ but making it explicit for clarity
75445+ */
75446+ tmp->next = tmp2;
75447+ *curr = role;
75448+ }
75449+ }
75450+ }
75451+
75452+ return;
75453+}
75454+
75455+static void
75456+insert_acl_role_label(struct acl_role_label *role)
75457+{
75458+ int i;
75459+
75460+ if (polstate->role_list == NULL) {
75461+ polstate->role_list = role;
75462+ role->prev = NULL;
75463+ } else {
75464+ role->prev = polstate->role_list;
75465+ polstate->role_list = role;
75466+ }
75467+
75468+ /* used for hash chains */
75469+ role->next = NULL;
75470+
75471+ if (role->roletype & GR_ROLE_DOMAIN) {
75472+ for (i = 0; i < role->domain_child_num; i++)
75473+ __insert_acl_role_label(role, role->domain_children[i]);
75474+ } else
75475+ __insert_acl_role_label(role, role->uidgid);
75476+}
75477+
75478+static int
75479+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
75480+{
75481+ struct name_entry **curr, *nentry;
75482+ struct inodev_entry *ientry;
75483+ unsigned int len = strlen(name);
75484+ unsigned int key = full_name_hash(name, len);
75485+ unsigned int index = key % polstate->name_set.n_size;
75486+
75487+ curr = &polstate->name_set.n_hash[index];
75488+
75489+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
75490+ curr = &((*curr)->next);
75491+
75492+ if (*curr != NULL)
75493+ return 1;
75494+
75495+ nentry = acl_alloc(sizeof (struct name_entry));
75496+ if (nentry == NULL)
75497+ return 0;
75498+ ientry = acl_alloc(sizeof (struct inodev_entry));
75499+ if (ientry == NULL)
75500+ return 0;
75501+ ientry->nentry = nentry;
75502+
75503+ nentry->key = key;
75504+ nentry->name = name;
75505+ nentry->inode = inode;
75506+ nentry->device = device;
75507+ nentry->len = len;
75508+ nentry->deleted = deleted;
75509+
75510+ nentry->prev = NULL;
75511+ curr = &polstate->name_set.n_hash[index];
75512+ if (*curr != NULL)
75513+ (*curr)->prev = nentry;
75514+ nentry->next = *curr;
75515+ *curr = nentry;
75516+
75517+ /* insert us into the table searchable by inode/dev */
75518+ __insert_inodev_entry(polstate, ientry);
75519+
75520+ return 1;
75521+}
75522+
75523+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
75524+
75525+static void *
75526+create_table(__u32 * len, int elementsize)
75527+{
75528+ unsigned int table_sizes[] = {
75529+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
75530+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
75531+ 4194301, 8388593, 16777213, 33554393, 67108859
75532+ };
75533+ void *newtable = NULL;
75534+ unsigned int pwr = 0;
75535+
75536+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
75537+ table_sizes[pwr] <= *len)
75538+ pwr++;
75539+
75540+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
75541+ return newtable;
75542+
75543+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
75544+ newtable =
75545+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
75546+ else
75547+ newtable = vmalloc(table_sizes[pwr] * elementsize);
75548+
75549+ *len = table_sizes[pwr];
75550+
75551+ return newtable;
75552+}
75553+
75554+static int
75555+init_variables(const struct gr_arg *arg, bool reload)
75556+{
75557+ struct task_struct *reaper = init_pid_ns.child_reaper;
75558+ unsigned int stacksize;
75559+
75560+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
75561+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
75562+ polstate->name_set.n_size = arg->role_db.num_objects;
75563+ polstate->inodev_set.i_size = arg->role_db.num_objects;
75564+
75565+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
75566+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
75567+ return 1;
75568+
75569+ if (!reload) {
75570+ if (!gr_init_uidset())
75571+ return 1;
75572+ }
75573+
75574+ /* set up the stack that holds allocation info */
75575+
75576+ stacksize = arg->role_db.num_pointers + 5;
75577+
75578+ if (!acl_alloc_stack_init(stacksize))
75579+ return 1;
75580+
75581+ if (!reload) {
75582+ /* grab reference for the real root dentry and vfsmount */
75583+ get_fs_root(reaper->fs, &gr_real_root);
75584+
75585+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75586+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
75587+#endif
75588+
75589+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
75590+ if (fakefs_obj_rw == NULL)
75591+ return 1;
75592+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
75593+
75594+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
75595+ if (fakefs_obj_rwx == NULL)
75596+ return 1;
75597+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
75598+ }
75599+
75600+ polstate->subj_map_set.s_hash =
75601+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
75602+ polstate->acl_role_set.r_hash =
75603+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
75604+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
75605+ polstate->inodev_set.i_hash =
75606+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
75607+
75608+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
75609+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
75610+ return 1;
75611+
75612+ memset(polstate->subj_map_set.s_hash, 0,
75613+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
75614+ memset(polstate->acl_role_set.r_hash, 0,
75615+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
75616+ memset(polstate->name_set.n_hash, 0,
75617+ sizeof (struct name_entry *) * polstate->name_set.n_size);
75618+ memset(polstate->inodev_set.i_hash, 0,
75619+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
75620+
75621+ return 0;
75622+}
75623+
75624+/* free information not needed after startup
75625+ currently contains user->kernel pointer mappings for subjects
75626+*/
75627+
75628+static void
75629+free_init_variables(void)
75630+{
75631+ __u32 i;
75632+
75633+ if (polstate->subj_map_set.s_hash) {
75634+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
75635+ if (polstate->subj_map_set.s_hash[i]) {
75636+ kfree(polstate->subj_map_set.s_hash[i]);
75637+ polstate->subj_map_set.s_hash[i] = NULL;
75638+ }
75639+ }
75640+
75641+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
75642+ PAGE_SIZE)
75643+ kfree(polstate->subj_map_set.s_hash);
75644+ else
75645+ vfree(polstate->subj_map_set.s_hash);
75646+ }
75647+
75648+ return;
75649+}
75650+
75651+static void
75652+free_variables(bool reload)
75653+{
75654+ struct acl_subject_label *s;
75655+ struct acl_role_label *r;
75656+ struct task_struct *task, *task2;
75657+ unsigned int x;
75658+
75659+ if (!reload) {
75660+ gr_clear_learn_entries();
75661+
75662+ read_lock(&tasklist_lock);
75663+ do_each_thread(task2, task) {
75664+ task->acl_sp_role = 0;
75665+ task->acl_role_id = 0;
75666+ task->inherited = 0;
75667+ task->acl = NULL;
75668+ task->role = NULL;
75669+ } while_each_thread(task2, task);
75670+ read_unlock(&tasklist_lock);
75671+
75672+ kfree(fakefs_obj_rw);
75673+ fakefs_obj_rw = NULL;
75674+ kfree(fakefs_obj_rwx);
75675+ fakefs_obj_rwx = NULL;
75676+
75677+ /* release the reference to the real root dentry and vfsmount */
75678+ path_put(&gr_real_root);
75679+ memset(&gr_real_root, 0, sizeof(gr_real_root));
75680+ }
75681+
75682+ /* free all object hash tables */
75683+
75684+ FOR_EACH_ROLE_START(r)
75685+ if (r->subj_hash == NULL)
75686+ goto next_role;
75687+ FOR_EACH_SUBJECT_START(r, s, x)
75688+ if (s->obj_hash == NULL)
75689+ break;
75690+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75691+ kfree(s->obj_hash);
75692+ else
75693+ vfree(s->obj_hash);
75694+ FOR_EACH_SUBJECT_END(s, x)
75695+ FOR_EACH_NESTED_SUBJECT_START(r, s)
75696+ if (s->obj_hash == NULL)
75697+ break;
75698+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75699+ kfree(s->obj_hash);
75700+ else
75701+ vfree(s->obj_hash);
75702+ FOR_EACH_NESTED_SUBJECT_END(s)
75703+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
75704+ kfree(r->subj_hash);
75705+ else
75706+ vfree(r->subj_hash);
75707+ r->subj_hash = NULL;
75708+next_role:
75709+ FOR_EACH_ROLE_END(r)
75710+
75711+ acl_free_all();
75712+
75713+ if (polstate->acl_role_set.r_hash) {
75714+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
75715+ PAGE_SIZE)
75716+ kfree(polstate->acl_role_set.r_hash);
75717+ else
75718+ vfree(polstate->acl_role_set.r_hash);
75719+ }
75720+ if (polstate->name_set.n_hash) {
75721+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
75722+ PAGE_SIZE)
75723+ kfree(polstate->name_set.n_hash);
75724+ else
75725+ vfree(polstate->name_set.n_hash);
75726+ }
75727+
75728+ if (polstate->inodev_set.i_hash) {
75729+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
75730+ PAGE_SIZE)
75731+ kfree(polstate->inodev_set.i_hash);
75732+ else
75733+ vfree(polstate->inodev_set.i_hash);
75734+ }
75735+
75736+ if (!reload)
75737+ gr_free_uidset();
75738+
75739+ memset(&polstate->name_set, 0, sizeof (struct name_db));
75740+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
75741+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
75742+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
75743+
75744+ polstate->default_role = NULL;
75745+ polstate->kernel_role = NULL;
75746+ polstate->role_list = NULL;
75747+
75748+ return;
75749+}
75750+
75751+static struct acl_subject_label *
75752+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
75753+
75754+static int alloc_and_copy_string(char **name, unsigned int maxlen)
75755+{
75756+ unsigned int len = strnlen_user(*name, maxlen);
75757+ char *tmp;
75758+
75759+ if (!len || len >= maxlen)
75760+ return -EINVAL;
75761+
75762+ if ((tmp = (char *) acl_alloc(len)) == NULL)
75763+ return -ENOMEM;
75764+
75765+ if (copy_from_user(tmp, *name, len))
75766+ return -EFAULT;
75767+
75768+ tmp[len-1] = '\0';
75769+ *name = tmp;
75770+
75771+ return 0;
75772+}
75773+
75774+static int
75775+copy_user_glob(struct acl_object_label *obj)
75776+{
75777+ struct acl_object_label *g_tmp, **guser;
75778+ int error;
75779+
75780+ if (obj->globbed == NULL)
75781+ return 0;
75782+
75783+ guser = &obj->globbed;
75784+ while (*guser) {
75785+ g_tmp = (struct acl_object_label *)
75786+ acl_alloc(sizeof (struct acl_object_label));
75787+ if (g_tmp == NULL)
75788+ return -ENOMEM;
75789+
75790+ if (copy_acl_object_label(g_tmp, *guser))
75791+ return -EFAULT;
75792+
75793+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
75794+ if (error)
75795+ return error;
75796+
75797+ *guser = g_tmp;
75798+ guser = &(g_tmp->next);
75799+ }
75800+
75801+ return 0;
75802+}
75803+
75804+static int
75805+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
75806+ struct acl_role_label *role)
75807+{
75808+ struct acl_object_label *o_tmp;
75809+ int ret;
75810+
75811+ while (userp) {
75812+ if ((o_tmp = (struct acl_object_label *)
75813+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
75814+ return -ENOMEM;
75815+
75816+ if (copy_acl_object_label(o_tmp, userp))
75817+ return -EFAULT;
75818+
75819+ userp = o_tmp->prev;
75820+
75821+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
75822+ if (ret)
75823+ return ret;
75824+
75825+ insert_acl_obj_label(o_tmp, subj);
75826+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
75827+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
75828+ return -ENOMEM;
75829+
75830+ ret = copy_user_glob(o_tmp);
75831+ if (ret)
75832+ return ret;
75833+
75834+ if (o_tmp->nested) {
75835+ int already_copied;
75836+
75837+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
75838+ if (IS_ERR(o_tmp->nested))
75839+ return PTR_ERR(o_tmp->nested);
75840+
75841+ /* insert into nested subject list if we haven't copied this one yet
75842+ to prevent duplicate entries */
75843+ if (!already_copied) {
75844+ o_tmp->nested->next = role->hash->first;
75845+ role->hash->first = o_tmp->nested;
75846+ }
75847+ }
75848+ }
75849+
75850+ return 0;
75851+}
75852+
75853+static __u32
75854+count_user_subjs(struct acl_subject_label *userp)
75855+{
75856+ struct acl_subject_label s_tmp;
75857+ __u32 num = 0;
75858+
75859+ while (userp) {
75860+ if (copy_acl_subject_label(&s_tmp, userp))
75861+ break;
75862+
75863+ userp = s_tmp.prev;
75864+ }
75865+
75866+ return num;
75867+}
75868+
75869+static int
75870+copy_user_allowedips(struct acl_role_label *rolep)
75871+{
75872+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
75873+
75874+ ruserip = rolep->allowed_ips;
75875+
75876+ while (ruserip) {
75877+ rlast = rtmp;
75878+
75879+ if ((rtmp = (struct role_allowed_ip *)
75880+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
75881+ return -ENOMEM;
75882+
75883+ if (copy_role_allowed_ip(rtmp, ruserip))
75884+ return -EFAULT;
75885+
75886+ ruserip = rtmp->prev;
75887+
75888+ if (!rlast) {
75889+ rtmp->prev = NULL;
75890+ rolep->allowed_ips = rtmp;
75891+ } else {
75892+ rlast->next = rtmp;
75893+ rtmp->prev = rlast;
75894+ }
75895+
75896+ if (!ruserip)
75897+ rtmp->next = NULL;
75898+ }
75899+
75900+ return 0;
75901+}
75902+
75903+static int
75904+copy_user_transitions(struct acl_role_label *rolep)
75905+{
75906+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
75907+ int error;
75908+
75909+ rusertp = rolep->transitions;
75910+
75911+ while (rusertp) {
75912+ rlast = rtmp;
75913+
75914+ if ((rtmp = (struct role_transition *)
75915+ acl_alloc(sizeof (struct role_transition))) == NULL)
75916+ return -ENOMEM;
75917+
75918+ if (copy_role_transition(rtmp, rusertp))
75919+ return -EFAULT;
75920+
75921+ rusertp = rtmp->prev;
75922+
75923+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
75924+ if (error)
75925+ return error;
75926+
75927+ if (!rlast) {
75928+ rtmp->prev = NULL;
75929+ rolep->transitions = rtmp;
75930+ } else {
75931+ rlast->next = rtmp;
75932+ rtmp->prev = rlast;
75933+ }
75934+
75935+ if (!rusertp)
75936+ rtmp->next = NULL;
75937+ }
75938+
75939+ return 0;
75940+}
75941+
75942+static __u32 count_user_objs(const struct acl_object_label __user *userp)
75943+{
75944+ struct acl_object_label o_tmp;
75945+ __u32 num = 0;
75946+
75947+ while (userp) {
75948+ if (copy_acl_object_label(&o_tmp, userp))
75949+ break;
75950+
75951+ userp = o_tmp.prev;
75952+ num++;
75953+ }
75954+
75955+ return num;
75956+}
75957+
75958+static struct acl_subject_label *
75959+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
75960+{
75961+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
75962+ __u32 num_objs;
75963+ struct acl_ip_label **i_tmp, *i_utmp2;
75964+ struct gr_hash_struct ghash;
75965+ struct subject_map *subjmap;
75966+ unsigned int i_num;
75967+ int err;
75968+
75969+ if (already_copied != NULL)
75970+ *already_copied = 0;
75971+
75972+ s_tmp = lookup_subject_map(userp);
75973+
75974+ /* we've already copied this subject into the kernel, just return
75975+ the reference to it, and don't copy it over again
75976+ */
75977+ if (s_tmp) {
75978+ if (already_copied != NULL)
75979+ *already_copied = 1;
75980+ return(s_tmp);
75981+ }
75982+
75983+ if ((s_tmp = (struct acl_subject_label *)
75984+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
75985+ return ERR_PTR(-ENOMEM);
75986+
75987+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
75988+ if (subjmap == NULL)
75989+ return ERR_PTR(-ENOMEM);
75990+
75991+ subjmap->user = userp;
75992+ subjmap->kernel = s_tmp;
75993+ insert_subj_map_entry(subjmap);
75994+
75995+ if (copy_acl_subject_label(s_tmp, userp))
75996+ return ERR_PTR(-EFAULT);
75997+
75998+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
75999+ if (err)
76000+ return ERR_PTR(err);
76001+
76002+ if (!strcmp(s_tmp->filename, "/"))
76003+ role->root_label = s_tmp;
76004+
76005+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
76006+ return ERR_PTR(-EFAULT);
76007+
76008+ /* copy user and group transition tables */
76009+
76010+ if (s_tmp->user_trans_num) {
76011+ uid_t *uidlist;
76012+
76013+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
76014+ if (uidlist == NULL)
76015+ return ERR_PTR(-ENOMEM);
76016+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
76017+ return ERR_PTR(-EFAULT);
76018+
76019+ s_tmp->user_transitions = uidlist;
76020+ }
76021+
76022+ if (s_tmp->group_trans_num) {
76023+ gid_t *gidlist;
76024+
76025+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
76026+ if (gidlist == NULL)
76027+ return ERR_PTR(-ENOMEM);
76028+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
76029+ return ERR_PTR(-EFAULT);
76030+
76031+ s_tmp->group_transitions = gidlist;
76032+ }
76033+
76034+ /* set up object hash table */
76035+ num_objs = count_user_objs(ghash.first);
76036+
76037+ s_tmp->obj_hash_size = num_objs;
76038+ s_tmp->obj_hash =
76039+ (struct acl_object_label **)
76040+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
76041+
76042+ if (!s_tmp->obj_hash)
76043+ return ERR_PTR(-ENOMEM);
76044+
76045+ memset(s_tmp->obj_hash, 0,
76046+ s_tmp->obj_hash_size *
76047+ sizeof (struct acl_object_label *));
76048+
76049+ /* add in objects */
76050+ err = copy_user_objs(ghash.first, s_tmp, role);
76051+
76052+ if (err)
76053+ return ERR_PTR(err);
76054+
76055+ /* set pointer for parent subject */
76056+ if (s_tmp->parent_subject) {
76057+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
76058+
76059+ if (IS_ERR(s_tmp2))
76060+ return s_tmp2;
76061+
76062+ s_tmp->parent_subject = s_tmp2;
76063+ }
76064+
76065+ /* add in ip acls */
76066+
76067+ if (!s_tmp->ip_num) {
76068+ s_tmp->ips = NULL;
76069+ goto insert;
76070+ }
76071+
76072+ i_tmp =
76073+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
76074+ sizeof (struct acl_ip_label *));
76075+
76076+ if (!i_tmp)
76077+ return ERR_PTR(-ENOMEM);
76078+
76079+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
76080+ *(i_tmp + i_num) =
76081+ (struct acl_ip_label *)
76082+ acl_alloc(sizeof (struct acl_ip_label));
76083+ if (!*(i_tmp + i_num))
76084+ return ERR_PTR(-ENOMEM);
76085+
76086+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
76087+ return ERR_PTR(-EFAULT);
76088+
76089+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
76090+ return ERR_PTR(-EFAULT);
76091+
76092+ if ((*(i_tmp + i_num))->iface == NULL)
76093+ continue;
76094+
76095+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
76096+ if (err)
76097+ return ERR_PTR(err);
76098+ }
76099+
76100+ s_tmp->ips = i_tmp;
76101+
76102+insert:
76103+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
76104+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
76105+ return ERR_PTR(-ENOMEM);
76106+
76107+ return s_tmp;
76108+}
76109+
76110+static int
76111+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
76112+{
76113+ struct acl_subject_label s_pre;
76114+ struct acl_subject_label * ret;
76115+ int err;
76116+
76117+ while (userp) {
76118+ if (copy_acl_subject_label(&s_pre, userp))
76119+ return -EFAULT;
76120+
76121+ ret = do_copy_user_subj(userp, role, NULL);
76122+
76123+ err = PTR_ERR(ret);
76124+ if (IS_ERR(ret))
76125+ return err;
76126+
76127+ insert_acl_subj_label(ret, role);
76128+
76129+ userp = s_pre.prev;
76130+ }
76131+
76132+ return 0;
76133+}
76134+
76135+static int
76136+copy_user_acl(struct gr_arg *arg)
76137+{
76138+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
76139+ struct acl_subject_label *subj_list;
76140+ struct sprole_pw *sptmp;
76141+ struct gr_hash_struct *ghash;
76142+ uid_t *domainlist;
76143+ unsigned int r_num;
76144+ int err = 0;
76145+ __u16 i;
76146+ __u32 num_subjs;
76147+
76148+ /* we need a default and kernel role */
76149+ if (arg->role_db.num_roles < 2)
76150+ return -EINVAL;
76151+
76152+ /* copy special role authentication info from userspace */
76153+
76154+ polstate->num_sprole_pws = arg->num_sprole_pws;
76155+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
76156+
76157+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
76158+ return -ENOMEM;
76159+
76160+ for (i = 0; i < polstate->num_sprole_pws; i++) {
76161+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
76162+ if (!sptmp)
76163+ return -ENOMEM;
76164+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
76165+ return -EFAULT;
76166+
76167+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
76168+ if (err)
76169+ return err;
76170+
76171+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
76172+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
76173+#endif
76174+
76175+ polstate->acl_special_roles[i] = sptmp;
76176+ }
76177+
76178+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
76179+
76180+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
76181+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
76182+
76183+ if (!r_tmp)
76184+ return -ENOMEM;
76185+
76186+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
76187+ return -EFAULT;
76188+
76189+ if (copy_acl_role_label(r_tmp, r_utmp2))
76190+ return -EFAULT;
76191+
76192+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
76193+ if (err)
76194+ return err;
76195+
76196+ if (!strcmp(r_tmp->rolename, "default")
76197+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
76198+ polstate->default_role = r_tmp;
76199+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
76200+ polstate->kernel_role = r_tmp;
76201+ }
76202+
76203+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
76204+ return -ENOMEM;
76205+
76206+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
76207+ return -EFAULT;
76208+
76209+ r_tmp->hash = ghash;
76210+
76211+ num_subjs = count_user_subjs(r_tmp->hash->first);
76212+
76213+ r_tmp->subj_hash_size = num_subjs;
76214+ r_tmp->subj_hash =
76215+ (struct acl_subject_label **)
76216+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
76217+
76218+ if (!r_tmp->subj_hash)
76219+ return -ENOMEM;
76220+
76221+ err = copy_user_allowedips(r_tmp);
76222+ if (err)
76223+ return err;
76224+
76225+ /* copy domain info */
76226+ if (r_tmp->domain_children != NULL) {
76227+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
76228+ if (domainlist == NULL)
76229+ return -ENOMEM;
76230+
76231+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
76232+ return -EFAULT;
76233+
76234+ r_tmp->domain_children = domainlist;
76235+ }
76236+
76237+ err = copy_user_transitions(r_tmp);
76238+ if (err)
76239+ return err;
76240+
76241+ memset(r_tmp->subj_hash, 0,
76242+ r_tmp->subj_hash_size *
76243+ sizeof (struct acl_subject_label *));
76244+
76245+ /* acquire the list of subjects, then NULL out
76246+ the list prior to parsing the subjects for this role,
76247+ as during this parsing the list is replaced with a list
76248+ of *nested* subjects for the role
76249+ */
76250+ subj_list = r_tmp->hash->first;
76251+
76252+ /* set nested subject list to null */
76253+ r_tmp->hash->first = NULL;
76254+
76255+ err = copy_user_subjs(subj_list, r_tmp);
76256+
76257+ if (err)
76258+ return err;
76259+
76260+ insert_acl_role_label(r_tmp);
76261+ }
76262+
76263+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
76264+ return -EINVAL;
76265+
76266+ return err;
76267+}
76268+
76269+static int gracl_reload_apply_policies(void *reload)
76270+{
76271+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
76272+ struct task_struct *task, *task2;
76273+ struct acl_role_label *role, *rtmp;
76274+ struct acl_subject_label *subj;
76275+ const struct cred *cred;
76276+ int role_applied;
76277+ int ret = 0;
76278+
76279+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
76280+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
76281+
76282+ /* first make sure we'll be able to apply the new policy cleanly */
76283+ do_each_thread(task2, task) {
76284+ if (task->exec_file == NULL)
76285+ continue;
76286+ role_applied = 0;
76287+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
76288+ /* preserve special roles */
76289+ FOR_EACH_ROLE_START(role)
76290+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
76291+ rtmp = task->role;
76292+ task->role = role;
76293+ role_applied = 1;
76294+ break;
76295+ }
76296+ FOR_EACH_ROLE_END(role)
76297+ }
76298+ if (!role_applied) {
76299+ cred = __task_cred(task);
76300+ rtmp = task->role;
76301+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76302+ }
76303+ /* this handles non-nested inherited subjects, nested subjects will still
76304+ be dropped currently */
76305+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
76306+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
76307+ /* change the role back so that we've made no modifications to the policy */
76308+ task->role = rtmp;
76309+
76310+ if (subj == NULL || task->tmpacl == NULL) {
76311+ ret = -EINVAL;
76312+ goto out;
76313+ }
76314+ } while_each_thread(task2, task);
76315+
76316+ /* now actually apply the policy */
76317+
76318+ do_each_thread(task2, task) {
76319+ if (task->exec_file) {
76320+ role_applied = 0;
76321+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
76322+ /* preserve special roles */
76323+ FOR_EACH_ROLE_START(role)
76324+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
76325+ task->role = role;
76326+ role_applied = 1;
76327+ break;
76328+ }
76329+ FOR_EACH_ROLE_END(role)
76330+ }
76331+ if (!role_applied) {
76332+ cred = __task_cred(task);
76333+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76334+ }
76335+ /* this handles non-nested inherited subjects, nested subjects will still
76336+ be dropped currently */
76337+ if (!reload_state->oldmode && task->inherited)
76338+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
76339+ else {
76340+ /* looked up and tagged to the task previously */
76341+ subj = task->tmpacl;
76342+ }
76343+ /* subj will be non-null */
76344+ __gr_apply_subject_to_task(polstate, task, subj);
76345+ if (reload_state->oldmode) {
76346+ task->acl_role_id = 0;
76347+ task->acl_sp_role = 0;
76348+ task->inherited = 0;
76349+ }
76350+ } else {
76351+ // it's a kernel process
76352+ task->role = polstate->kernel_role;
76353+ task->acl = polstate->kernel_role->root_label;
76354+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
76355+ task->acl->mode &= ~GR_PROCFIND;
76356+#endif
76357+ }
76358+ } while_each_thread(task2, task);
76359+
76360+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
76361+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
76362+
76363+out:
76364+
76365+ return ret;
76366+}
76367+
76368+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
76369+{
76370+ struct gr_reload_state new_reload_state = { };
76371+ int err;
76372+
76373+ new_reload_state.oldpolicy_ptr = polstate;
76374+ new_reload_state.oldalloc_ptr = current_alloc_state;
76375+ new_reload_state.oldmode = oldmode;
76376+
76377+ current_alloc_state = &new_reload_state.newalloc;
76378+ polstate = &new_reload_state.newpolicy;
76379+
76380+ /* everything relevant is now saved off, copy in the new policy */
76381+ if (init_variables(args, true)) {
76382+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76383+ err = -ENOMEM;
76384+ goto error;
76385+ }
76386+
76387+ err = copy_user_acl(args);
76388+ free_init_variables();
76389+ if (err)
76390+ goto error;
76391+ /* the new policy is copied in, with the old policy available via saved_state
76392+ first go through applying roles, making sure to preserve special roles
76393+ then apply new subjects, making sure to preserve inherited and nested subjects,
76394+ though currently only inherited subjects will be preserved
76395+ */
76396+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
76397+ if (err)
76398+ goto error;
76399+
76400+ /* we've now applied the new policy, so restore the old policy state to free it */
76401+ polstate = &new_reload_state.oldpolicy;
76402+ current_alloc_state = &new_reload_state.oldalloc;
76403+ free_variables(true);
76404+
76405+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
76406+ to running_polstate/current_alloc_state inside stop_machine
76407+ */
76408+ err = 0;
76409+ goto out;
76410+error:
76411+ /* on error of loading the new policy, we'll just keep the previous
76412+ policy set around
76413+ */
76414+ free_variables(true);
76415+
76416+ /* doesn't affect runtime, but maintains consistent state */
76417+out:
76418+ polstate = new_reload_state.oldpolicy_ptr;
76419+ current_alloc_state = new_reload_state.oldalloc_ptr;
76420+
76421+ return err;
76422+}
76423+
76424+static int
76425+gracl_init(struct gr_arg *args)
76426+{
76427+ int error = 0;
76428+
76429+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
76430+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
76431+
76432+ if (init_variables(args, false)) {
76433+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76434+ error = -ENOMEM;
76435+ goto out;
76436+ }
76437+
76438+ error = copy_user_acl(args);
76439+ free_init_variables();
76440+ if (error)
76441+ goto out;
76442+
76443+ error = gr_set_acls(0);
76444+ if (error)
76445+ goto out;
76446+
76447+ gr_enable_rbac_system();
76448+
76449+ return 0;
76450+
76451+out:
76452+ free_variables(false);
76453+ return error;
76454+}
76455+
76456+static int
76457+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
76458+ unsigned char **sum)
76459+{
76460+ struct acl_role_label *r;
76461+ struct role_allowed_ip *ipp;
76462+ struct role_transition *trans;
76463+ unsigned int i;
76464+ int found = 0;
76465+ u32 curr_ip = current->signal->curr_ip;
76466+
76467+ current->signal->saved_ip = curr_ip;
76468+
76469+ /* check transition table */
76470+
76471+ for (trans = current->role->transitions; trans; trans = trans->next) {
76472+ if (!strcmp(rolename, trans->rolename)) {
76473+ found = 1;
76474+ break;
76475+ }
76476+ }
76477+
76478+ if (!found)
76479+ return 0;
76480+
76481+ /* handle special roles that do not require authentication
76482+ and check ip */
76483+
76484+ FOR_EACH_ROLE_START(r)
76485+ if (!strcmp(rolename, r->rolename) &&
76486+ (r->roletype & GR_ROLE_SPECIAL)) {
76487+ found = 0;
76488+ if (r->allowed_ips != NULL) {
76489+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
76490+ if ((ntohl(curr_ip) & ipp->netmask) ==
76491+ (ntohl(ipp->addr) & ipp->netmask))
76492+ found = 1;
76493+ }
76494+ } else
76495+ found = 2;
76496+ if (!found)
76497+ return 0;
76498+
76499+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
76500+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
76501+ *salt = NULL;
76502+ *sum = NULL;
76503+ return 1;
76504+ }
76505+ }
76506+ FOR_EACH_ROLE_END(r)
76507+
76508+ for (i = 0; i < polstate->num_sprole_pws; i++) {
76509+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
76510+ *salt = polstate->acl_special_roles[i]->salt;
76511+ *sum = polstate->acl_special_roles[i]->sum;
76512+ return 1;
76513+ }
76514+ }
76515+
76516+ return 0;
76517+}
76518+
76519+int gr_check_secure_terminal(struct task_struct *task)
76520+{
76521+ struct task_struct *p, *p2, *p3;
76522+ struct files_struct *files;
76523+ struct fdtable *fdt;
76524+ struct file *our_file = NULL, *file;
76525+ int i;
76526+
76527+ if (task->signal->tty == NULL)
76528+ return 1;
76529+
76530+ files = get_files_struct(task);
76531+ if (files != NULL) {
76532+ rcu_read_lock();
76533+ fdt = files_fdtable(files);
76534+ for (i=0; i < fdt->max_fds; i++) {
76535+ file = fcheck_files(files, i);
76536+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
76537+ get_file(file);
76538+ our_file = file;
76539+ }
76540+ }
76541+ rcu_read_unlock();
76542+ put_files_struct(files);
76543+ }
76544+
76545+ if (our_file == NULL)
76546+ return 1;
76547+
76548+ read_lock(&tasklist_lock);
76549+ do_each_thread(p2, p) {
76550+ files = get_files_struct(p);
76551+ if (files == NULL ||
76552+ (p->signal && p->signal->tty == task->signal->tty)) {
76553+ if (files != NULL)
76554+ put_files_struct(files);
76555+ continue;
76556+ }
76557+ rcu_read_lock();
76558+ fdt = files_fdtable(files);
76559+ for (i=0; i < fdt->max_fds; i++) {
76560+ file = fcheck_files(files, i);
76561+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
76562+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
76563+ p3 = task;
76564+ while (task_pid_nr(p3) > 0) {
76565+ if (p3 == p)
76566+ break;
76567+ p3 = p3->real_parent;
76568+ }
76569+ if (p3 == p)
76570+ break;
76571+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
76572+ gr_handle_alertkill(p);
76573+ rcu_read_unlock();
76574+ put_files_struct(files);
76575+ read_unlock(&tasklist_lock);
76576+ fput(our_file);
76577+ return 0;
76578+ }
76579+ }
76580+ rcu_read_unlock();
76581+ put_files_struct(files);
76582+ } while_each_thread(p2, p);
76583+ read_unlock(&tasklist_lock);
76584+
76585+ fput(our_file);
76586+ return 1;
76587+}
76588+
76589+ssize_t
76590+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
76591+{
76592+ struct gr_arg_wrapper uwrap;
76593+ unsigned char *sprole_salt = NULL;
76594+ unsigned char *sprole_sum = NULL;
76595+ int error = 0;
76596+ int error2 = 0;
76597+ size_t req_count = 0;
76598+ unsigned char oldmode = 0;
76599+
76600+ mutex_lock(&gr_dev_mutex);
76601+
76602+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
76603+ error = -EPERM;
76604+ goto out;
76605+ }
76606+
76607+#ifdef CONFIG_COMPAT
76608+ pax_open_kernel();
76609+ if (is_compat_task()) {
76610+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
76611+ copy_gr_arg = &copy_gr_arg_compat;
76612+ copy_acl_object_label = &copy_acl_object_label_compat;
76613+ copy_acl_subject_label = &copy_acl_subject_label_compat;
76614+ copy_acl_role_label = &copy_acl_role_label_compat;
76615+ copy_acl_ip_label = &copy_acl_ip_label_compat;
76616+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
76617+ copy_role_transition = &copy_role_transition_compat;
76618+ copy_sprole_pw = &copy_sprole_pw_compat;
76619+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
76620+ copy_pointer_from_array = &copy_pointer_from_array_compat;
76621+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
76622+ } else {
76623+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
76624+ copy_gr_arg = &copy_gr_arg_normal;
76625+ copy_acl_object_label = &copy_acl_object_label_normal;
76626+ copy_acl_subject_label = &copy_acl_subject_label_normal;
76627+ copy_acl_role_label = &copy_acl_role_label_normal;
76628+ copy_acl_ip_label = &copy_acl_ip_label_normal;
76629+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
76630+ copy_role_transition = &copy_role_transition_normal;
76631+ copy_sprole_pw = &copy_sprole_pw_normal;
76632+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
76633+ copy_pointer_from_array = &copy_pointer_from_array_normal;
76634+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
76635+ }
76636+ pax_close_kernel();
76637+#endif
76638+
76639+ req_count = get_gr_arg_wrapper_size();
76640+
76641+ if (count != req_count) {
76642+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
76643+ error = -EINVAL;
76644+ goto out;
76645+ }
76646+
76647+
76648+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
76649+ gr_auth_expires = 0;
76650+ gr_auth_attempts = 0;
76651+ }
76652+
76653+ error = copy_gr_arg_wrapper(buf, &uwrap);
76654+ if (error)
76655+ goto out;
76656+
76657+ error = copy_gr_arg(uwrap.arg, gr_usermode);
76658+ if (error)
76659+ goto out;
76660+
76661+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76662+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76663+ time_after(gr_auth_expires, get_seconds())) {
76664+ error = -EBUSY;
76665+ goto out;
76666+ }
76667+
76668+ /* if non-root trying to do anything other than use a special role,
76669+ do not attempt authentication, do not count towards authentication
76670+ locking
76671+ */
76672+
76673+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
76674+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76675+ gr_is_global_nonroot(current_uid())) {
76676+ error = -EPERM;
76677+ goto out;
76678+ }
76679+
76680+ /* ensure pw and special role name are null terminated */
76681+
76682+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
76683+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
76684+
76685+ /* Okay.
76686+ * We have our enough of the argument structure..(we have yet
76687+ * to copy_from_user the tables themselves) . Copy the tables
76688+ * only if we need them, i.e. for loading operations. */
76689+
76690+ switch (gr_usermode->mode) {
76691+ case GR_STATUS:
76692+ if (gr_acl_is_enabled()) {
76693+ error = 1;
76694+ if (!gr_check_secure_terminal(current))
76695+ error = 3;
76696+ } else
76697+ error = 2;
76698+ goto out;
76699+ case GR_SHUTDOWN:
76700+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76701+ stop_machine(gr_rbac_disable, NULL, NULL);
76702+ free_variables(false);
76703+ memset(gr_usermode, 0, sizeof(struct gr_arg));
76704+ memset(gr_system_salt, 0, GR_SALT_LEN);
76705+ memset(gr_system_sum, 0, GR_SHA_LEN);
76706+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
76707+ } else if (gr_acl_is_enabled()) {
76708+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
76709+ error = -EPERM;
76710+ } else {
76711+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
76712+ error = -EAGAIN;
76713+ }
76714+ break;
76715+ case GR_ENABLE:
76716+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
76717+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
76718+ else {
76719+ if (gr_acl_is_enabled())
76720+ error = -EAGAIN;
76721+ else
76722+ error = error2;
76723+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
76724+ }
76725+ break;
76726+ case GR_OLDRELOAD:
76727+ oldmode = 1;
76728+ case GR_RELOAD:
76729+ if (!gr_acl_is_enabled()) {
76730+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
76731+ error = -EAGAIN;
76732+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76733+ error2 = gracl_reload(gr_usermode, oldmode);
76734+ if (!error2)
76735+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
76736+ else {
76737+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76738+ error = error2;
76739+ }
76740+ } else {
76741+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76742+ error = -EPERM;
76743+ }
76744+ break;
76745+ case GR_SEGVMOD:
76746+ if (unlikely(!gr_acl_is_enabled())) {
76747+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
76748+ error = -EAGAIN;
76749+ break;
76750+ }
76751+
76752+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76753+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
76754+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
76755+ struct acl_subject_label *segvacl;
76756+ segvacl =
76757+ lookup_acl_subj_label(gr_usermode->segv_inode,
76758+ gr_usermode->segv_device,
76759+ current->role);
76760+ if (segvacl) {
76761+ segvacl->crashes = 0;
76762+ segvacl->expires = 0;
76763+ }
76764+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
76765+ gr_remove_uid(gr_usermode->segv_uid);
76766+ }
76767+ } else {
76768+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
76769+ error = -EPERM;
76770+ }
76771+ break;
76772+ case GR_SPROLE:
76773+ case GR_SPROLEPAM:
76774+ if (unlikely(!gr_acl_is_enabled())) {
76775+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
76776+ error = -EAGAIN;
76777+ break;
76778+ }
76779+
76780+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
76781+ current->role->expires = 0;
76782+ current->role->auth_attempts = 0;
76783+ }
76784+
76785+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76786+ time_after(current->role->expires, get_seconds())) {
76787+ error = -EBUSY;
76788+ goto out;
76789+ }
76790+
76791+ if (lookup_special_role_auth
76792+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
76793+ && ((!sprole_salt && !sprole_sum)
76794+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
76795+ char *p = "";
76796+ assign_special_role(gr_usermode->sp_role);
76797+ read_lock(&tasklist_lock);
76798+ if (current->real_parent)
76799+ p = current->real_parent->role->rolename;
76800+ read_unlock(&tasklist_lock);
76801+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
76802+ p, acl_sp_role_value);
76803+ } else {
76804+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
76805+ error = -EPERM;
76806+ if(!(current->role->auth_attempts++))
76807+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
76808+
76809+ goto out;
76810+ }
76811+ break;
76812+ case GR_UNSPROLE:
76813+ if (unlikely(!gr_acl_is_enabled())) {
76814+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
76815+ error = -EAGAIN;
76816+ break;
76817+ }
76818+
76819+ if (current->role->roletype & GR_ROLE_SPECIAL) {
76820+ char *p = "";
76821+ int i = 0;
76822+
76823+ read_lock(&tasklist_lock);
76824+ if (current->real_parent) {
76825+ p = current->real_parent->role->rolename;
76826+ i = current->real_parent->acl_role_id;
76827+ }
76828+ read_unlock(&tasklist_lock);
76829+
76830+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
76831+ gr_set_acls(1);
76832+ } else {
76833+ error = -EPERM;
76834+ goto out;
76835+ }
76836+ break;
76837+ default:
76838+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
76839+ error = -EINVAL;
76840+ break;
76841+ }
76842+
76843+ if (error != -EPERM)
76844+ goto out;
76845+
76846+ if(!(gr_auth_attempts++))
76847+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
76848+
76849+ out:
76850+ mutex_unlock(&gr_dev_mutex);
76851+
76852+ if (!error)
76853+ error = req_count;
76854+
76855+ return error;
76856+}
76857+
76858+int
76859+gr_set_acls(const int type)
76860+{
76861+ struct task_struct *task, *task2;
76862+ struct acl_role_label *role = current->role;
76863+ struct acl_subject_label *subj;
76864+ __u16 acl_role_id = current->acl_role_id;
76865+ const struct cred *cred;
76866+ int ret;
76867+
76868+ rcu_read_lock();
76869+ read_lock(&tasklist_lock);
76870+ read_lock(&grsec_exec_file_lock);
76871+ do_each_thread(task2, task) {
76872+ /* check to see if we're called from the exit handler,
76873+ if so, only replace ACLs that have inherited the admin
76874+ ACL */
76875+
76876+ if (type && (task->role != role ||
76877+ task->acl_role_id != acl_role_id))
76878+ continue;
76879+
76880+ task->acl_role_id = 0;
76881+ task->acl_sp_role = 0;
76882+ task->inherited = 0;
76883+
76884+ if (task->exec_file) {
76885+ cred = __task_cred(task);
76886+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76887+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
76888+ if (subj == NULL) {
76889+ ret = -EINVAL;
76890+ read_unlock(&grsec_exec_file_lock);
76891+ read_unlock(&tasklist_lock);
76892+ rcu_read_unlock();
76893+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
76894+ return ret;
76895+ }
76896+ __gr_apply_subject_to_task(polstate, task, subj);
76897+ } else {
76898+ // it's a kernel process
76899+ task->role = polstate->kernel_role;
76900+ task->acl = polstate->kernel_role->root_label;
76901+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
76902+ task->acl->mode &= ~GR_PROCFIND;
76903+#endif
76904+ }
76905+ } while_each_thread(task2, task);
76906+ read_unlock(&grsec_exec_file_lock);
76907+ read_unlock(&tasklist_lock);
76908+ rcu_read_unlock();
76909+
76910+ return 0;
76911+}
76912diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
76913new file mode 100644
76914index 0000000..39645c9
76915--- /dev/null
76916+++ b/grsecurity/gracl_res.c
76917@@ -0,0 +1,68 @@
76918+#include <linux/kernel.h>
76919+#include <linux/sched.h>
76920+#include <linux/gracl.h>
76921+#include <linux/grinternal.h>
76922+
76923+static const char *restab_log[] = {
76924+ [RLIMIT_CPU] = "RLIMIT_CPU",
76925+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
76926+ [RLIMIT_DATA] = "RLIMIT_DATA",
76927+ [RLIMIT_STACK] = "RLIMIT_STACK",
76928+ [RLIMIT_CORE] = "RLIMIT_CORE",
76929+ [RLIMIT_RSS] = "RLIMIT_RSS",
76930+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
76931+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
76932+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
76933+ [RLIMIT_AS] = "RLIMIT_AS",
76934+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
76935+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
76936+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
76937+ [RLIMIT_NICE] = "RLIMIT_NICE",
76938+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
76939+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
76940+ [GR_CRASH_RES] = "RLIMIT_CRASH"
76941+};
76942+
76943+void
76944+gr_log_resource(const struct task_struct *task,
76945+ const int res, const unsigned long wanted, const int gt)
76946+{
76947+ const struct cred *cred;
76948+ unsigned long rlim;
76949+
76950+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
76951+ return;
76952+
76953+ // not yet supported resource
76954+ if (unlikely(!restab_log[res]))
76955+ return;
76956+
76957+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
76958+ rlim = task_rlimit_max(task, res);
76959+ else
76960+ rlim = task_rlimit(task, res);
76961+
76962+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
76963+ return;
76964+
76965+ rcu_read_lock();
76966+ cred = __task_cred(task);
76967+
76968+ if (res == RLIMIT_NPROC &&
76969+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
76970+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
76971+ goto out_rcu_unlock;
76972+ else if (res == RLIMIT_MEMLOCK &&
76973+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
76974+ goto out_rcu_unlock;
76975+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
76976+ goto out_rcu_unlock;
76977+ rcu_read_unlock();
76978+
76979+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
76980+
76981+ return;
76982+out_rcu_unlock:
76983+ rcu_read_unlock();
76984+ return;
76985+}
76986diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
76987new file mode 100644
76988index 0000000..218b66b
76989--- /dev/null
76990+++ b/grsecurity/gracl_segv.c
76991@@ -0,0 +1,324 @@
76992+#include <linux/kernel.h>
76993+#include <linux/mm.h>
76994+#include <asm/uaccess.h>
76995+#include <asm/errno.h>
76996+#include <asm/mman.h>
76997+#include <net/sock.h>
76998+#include <linux/file.h>
76999+#include <linux/fs.h>
77000+#include <linux/net.h>
77001+#include <linux/in.h>
77002+#include <linux/slab.h>
77003+#include <linux/types.h>
77004+#include <linux/sched.h>
77005+#include <linux/timer.h>
77006+#include <linux/gracl.h>
77007+#include <linux/grsecurity.h>
77008+#include <linux/grinternal.h>
77009+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77010+#include <linux/magic.h>
77011+#include <linux/pagemap.h>
77012+#include "../fs/btrfs/async-thread.h"
77013+#include "../fs/btrfs/ctree.h"
77014+#include "../fs/btrfs/btrfs_inode.h"
77015+#endif
77016+
77017+static struct crash_uid *uid_set;
77018+static unsigned short uid_used;
77019+static DEFINE_SPINLOCK(gr_uid_lock);
77020+extern rwlock_t gr_inode_lock;
77021+extern struct acl_subject_label *
77022+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
77023+ struct acl_role_label *role);
77024+
77025+static inline dev_t __get_dev(const struct dentry *dentry)
77026+{
77027+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77028+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
77029+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
77030+ else
77031+#endif
77032+ return dentry->d_sb->s_dev;
77033+}
77034+
77035+static inline u64 __get_ino(const struct dentry *dentry)
77036+{
77037+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77038+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
77039+ return btrfs_ino(dentry->d_inode);
77040+ else
77041+#endif
77042+ return dentry->d_inode->i_ino;
77043+}
77044+
77045+int
77046+gr_init_uidset(void)
77047+{
77048+ uid_set =
77049+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
77050+ uid_used = 0;
77051+
77052+ return uid_set ? 1 : 0;
77053+}
77054+
77055+void
77056+gr_free_uidset(void)
77057+{
77058+ if (uid_set) {
77059+ struct crash_uid *tmpset;
77060+ spin_lock(&gr_uid_lock);
77061+ tmpset = uid_set;
77062+ uid_set = NULL;
77063+ uid_used = 0;
77064+ spin_unlock(&gr_uid_lock);
77065+ if (tmpset)
77066+ kfree(tmpset);
77067+ }
77068+
77069+ return;
77070+}
77071+
77072+int
77073+gr_find_uid(const uid_t uid)
77074+{
77075+ struct crash_uid *tmp = uid_set;
77076+ uid_t buid;
77077+ int low = 0, high = uid_used - 1, mid;
77078+
77079+ while (high >= low) {
77080+ mid = (low + high) >> 1;
77081+ buid = tmp[mid].uid;
77082+ if (buid == uid)
77083+ return mid;
77084+ if (buid > uid)
77085+ high = mid - 1;
77086+ if (buid < uid)
77087+ low = mid + 1;
77088+ }
77089+
77090+ return -1;
77091+}
77092+
77093+static __inline__ void
77094+gr_insertsort(void)
77095+{
77096+ unsigned short i, j;
77097+ struct crash_uid index;
77098+
77099+ for (i = 1; i < uid_used; i++) {
77100+ index = uid_set[i];
77101+ j = i;
77102+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
77103+ uid_set[j] = uid_set[j - 1];
77104+ j--;
77105+ }
77106+ uid_set[j] = index;
77107+ }
77108+
77109+ return;
77110+}
77111+
77112+static __inline__ void
77113+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
77114+{
77115+ int loc;
77116+ uid_t uid = GR_GLOBAL_UID(kuid);
77117+
77118+ if (uid_used == GR_UIDTABLE_MAX)
77119+ return;
77120+
77121+ loc = gr_find_uid(uid);
77122+
77123+ if (loc >= 0) {
77124+ uid_set[loc].expires = expires;
77125+ return;
77126+ }
77127+
77128+ uid_set[uid_used].uid = uid;
77129+ uid_set[uid_used].expires = expires;
77130+ uid_used++;
77131+
77132+ gr_insertsort();
77133+
77134+ return;
77135+}
77136+
77137+void
77138+gr_remove_uid(const unsigned short loc)
77139+{
77140+ unsigned short i;
77141+
77142+ for (i = loc + 1; i < uid_used; i++)
77143+ uid_set[i - 1] = uid_set[i];
77144+
77145+ uid_used--;
77146+
77147+ return;
77148+}
77149+
77150+int
77151+gr_check_crash_uid(const kuid_t kuid)
77152+{
77153+ int loc;
77154+ int ret = 0;
77155+ uid_t uid;
77156+
77157+ if (unlikely(!gr_acl_is_enabled()))
77158+ return 0;
77159+
77160+ uid = GR_GLOBAL_UID(kuid);
77161+
77162+ spin_lock(&gr_uid_lock);
77163+ loc = gr_find_uid(uid);
77164+
77165+ if (loc < 0)
77166+ goto out_unlock;
77167+
77168+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
77169+ gr_remove_uid(loc);
77170+ else
77171+ ret = 1;
77172+
77173+out_unlock:
77174+ spin_unlock(&gr_uid_lock);
77175+ return ret;
77176+}
77177+
77178+static __inline__ int
77179+proc_is_setxid(const struct cred *cred)
77180+{
77181+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
77182+ !uid_eq(cred->uid, cred->fsuid))
77183+ return 1;
77184+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
77185+ !gid_eq(cred->gid, cred->fsgid))
77186+ return 1;
77187+
77188+ return 0;
77189+}
77190+
77191+extern int gr_fake_force_sig(int sig, struct task_struct *t);
77192+
77193+void
77194+gr_handle_crash(struct task_struct *task, const int sig)
77195+{
77196+ struct acl_subject_label *curr;
77197+ struct task_struct *tsk, *tsk2;
77198+ const struct cred *cred;
77199+ const struct cred *cred2;
77200+
77201+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
77202+ return;
77203+
77204+ if (unlikely(!gr_acl_is_enabled()))
77205+ return;
77206+
77207+ curr = task->acl;
77208+
77209+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
77210+ return;
77211+
77212+ if (time_before_eq(curr->expires, get_seconds())) {
77213+ curr->expires = 0;
77214+ curr->crashes = 0;
77215+ }
77216+
77217+ curr->crashes++;
77218+
77219+ if (!curr->expires)
77220+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
77221+
77222+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
77223+ time_after(curr->expires, get_seconds())) {
77224+ rcu_read_lock();
77225+ cred = __task_cred(task);
77226+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
77227+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
77228+ spin_lock(&gr_uid_lock);
77229+ gr_insert_uid(cred->uid, curr->expires);
77230+ spin_unlock(&gr_uid_lock);
77231+ curr->expires = 0;
77232+ curr->crashes = 0;
77233+ read_lock(&tasklist_lock);
77234+ do_each_thread(tsk2, tsk) {
77235+ cred2 = __task_cred(tsk);
77236+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
77237+ gr_fake_force_sig(SIGKILL, tsk);
77238+ } while_each_thread(tsk2, tsk);
77239+ read_unlock(&tasklist_lock);
77240+ } else {
77241+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
77242+ read_lock(&tasklist_lock);
77243+ read_lock(&grsec_exec_file_lock);
77244+ do_each_thread(tsk2, tsk) {
77245+ if (likely(tsk != task)) {
77246+ // if this thread has the same subject as the one that triggered
77247+ // RES_CRASH and it's the same binary, kill it
77248+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
77249+ gr_fake_force_sig(SIGKILL, tsk);
77250+ }
77251+ } while_each_thread(tsk2, tsk);
77252+ read_unlock(&grsec_exec_file_lock);
77253+ read_unlock(&tasklist_lock);
77254+ }
77255+ rcu_read_unlock();
77256+ }
77257+
77258+ return;
77259+}
77260+
77261+int
77262+gr_check_crash_exec(const struct file *filp)
77263+{
77264+ struct acl_subject_label *curr;
77265+ struct dentry *dentry;
77266+
77267+ if (unlikely(!gr_acl_is_enabled()))
77268+ return 0;
77269+
77270+ read_lock(&gr_inode_lock);
77271+ dentry = filp->f_path.dentry;
77272+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
77273+ current->role);
77274+ read_unlock(&gr_inode_lock);
77275+
77276+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
77277+ (!curr->crashes && !curr->expires))
77278+ return 0;
77279+
77280+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
77281+ time_after(curr->expires, get_seconds()))
77282+ return 1;
77283+ else if (time_before_eq(curr->expires, get_seconds())) {
77284+ curr->crashes = 0;
77285+ curr->expires = 0;
77286+ }
77287+
77288+ return 0;
77289+}
77290+
77291+void
77292+gr_handle_alertkill(struct task_struct *task)
77293+{
77294+ struct acl_subject_label *curracl;
77295+ __u32 curr_ip;
77296+ struct task_struct *p, *p2;
77297+
77298+ if (unlikely(!gr_acl_is_enabled()))
77299+ return;
77300+
77301+ curracl = task->acl;
77302+ curr_ip = task->signal->curr_ip;
77303+
77304+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
77305+ read_lock(&tasklist_lock);
77306+ do_each_thread(p2, p) {
77307+ if (p->signal->curr_ip == curr_ip)
77308+ gr_fake_force_sig(SIGKILL, p);
77309+ } while_each_thread(p2, p);
77310+ read_unlock(&tasklist_lock);
77311+ } else if (curracl->mode & GR_KILLPROC)
77312+ gr_fake_force_sig(SIGKILL, task);
77313+
77314+ return;
77315+}
77316diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
77317new file mode 100644
77318index 0000000..6b0c9cc
77319--- /dev/null
77320+++ b/grsecurity/gracl_shm.c
77321@@ -0,0 +1,40 @@
77322+#include <linux/kernel.h>
77323+#include <linux/mm.h>
77324+#include <linux/sched.h>
77325+#include <linux/file.h>
77326+#include <linux/ipc.h>
77327+#include <linux/gracl.h>
77328+#include <linux/grsecurity.h>
77329+#include <linux/grinternal.h>
77330+
77331+int
77332+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77333+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
77334+{
77335+ struct task_struct *task;
77336+
77337+ if (!gr_acl_is_enabled())
77338+ return 1;
77339+
77340+ rcu_read_lock();
77341+ read_lock(&tasklist_lock);
77342+
77343+ task = find_task_by_vpid(shm_cprid);
77344+
77345+ if (unlikely(!task))
77346+ task = find_task_by_vpid(shm_lapid);
77347+
77348+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
77349+ (task_pid_nr(task) == shm_lapid)) &&
77350+ (task->acl->mode & GR_PROTSHM) &&
77351+ (task->acl != current->acl))) {
77352+ read_unlock(&tasklist_lock);
77353+ rcu_read_unlock();
77354+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
77355+ return 0;
77356+ }
77357+ read_unlock(&tasklist_lock);
77358+ rcu_read_unlock();
77359+
77360+ return 1;
77361+}
77362diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
77363new file mode 100644
77364index 0000000..bc0be01
77365--- /dev/null
77366+++ b/grsecurity/grsec_chdir.c
77367@@ -0,0 +1,19 @@
77368+#include <linux/kernel.h>
77369+#include <linux/sched.h>
77370+#include <linux/fs.h>
77371+#include <linux/file.h>
77372+#include <linux/grsecurity.h>
77373+#include <linux/grinternal.h>
77374+
77375+void
77376+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
77377+{
77378+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77379+ if ((grsec_enable_chdir && grsec_enable_group &&
77380+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
77381+ !grsec_enable_group)) {
77382+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
77383+ }
77384+#endif
77385+ return;
77386+}
77387diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
77388new file mode 100644
77389index 0000000..114ea4f
77390--- /dev/null
77391+++ b/grsecurity/grsec_chroot.c
77392@@ -0,0 +1,467 @@
77393+#include <linux/kernel.h>
77394+#include <linux/module.h>
77395+#include <linux/sched.h>
77396+#include <linux/file.h>
77397+#include <linux/fs.h>
77398+#include <linux/mount.h>
77399+#include <linux/types.h>
77400+#include "../fs/mount.h"
77401+#include <linux/grsecurity.h>
77402+#include <linux/grinternal.h>
77403+
77404+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77405+int gr_init_ran;
77406+#endif
77407+
77408+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
77409+{
77410+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77411+ struct dentry *tmpd = dentry;
77412+
77413+ read_seqlock_excl(&mount_lock);
77414+ write_seqlock(&rename_lock);
77415+
77416+ while (tmpd != mnt->mnt_root) {
77417+ atomic_inc(&tmpd->chroot_refcnt);
77418+ tmpd = tmpd->d_parent;
77419+ }
77420+ atomic_inc(&tmpd->chroot_refcnt);
77421+
77422+ write_sequnlock(&rename_lock);
77423+ read_sequnlock_excl(&mount_lock);
77424+#endif
77425+}
77426+
77427+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
77428+{
77429+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77430+ struct dentry *tmpd = dentry;
77431+
77432+ read_seqlock_excl(&mount_lock);
77433+ write_seqlock(&rename_lock);
77434+
77435+ while (tmpd != mnt->mnt_root) {
77436+ atomic_dec(&tmpd->chroot_refcnt);
77437+ tmpd = tmpd->d_parent;
77438+ }
77439+ atomic_dec(&tmpd->chroot_refcnt);
77440+
77441+ write_sequnlock(&rename_lock);
77442+ read_sequnlock_excl(&mount_lock);
77443+#endif
77444+}
77445+
77446+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77447+static struct dentry *get_closest_chroot(struct dentry *dentry)
77448+{
77449+ write_seqlock(&rename_lock);
77450+ do {
77451+ if (atomic_read(&dentry->chroot_refcnt)) {
77452+ write_sequnlock(&rename_lock);
77453+ return dentry;
77454+ }
77455+ dentry = dentry->d_parent;
77456+ } while (!IS_ROOT(dentry));
77457+ write_sequnlock(&rename_lock);
77458+ return NULL;
77459+}
77460+#endif
77461+
77462+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
77463+ struct dentry *newdentry, struct vfsmount *newmnt)
77464+{
77465+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77466+ struct dentry *chroot;
77467+
77468+ if (unlikely(!grsec_enable_chroot_rename))
77469+ return 0;
77470+
77471+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
77472+ return 0;
77473+
77474+ chroot = get_closest_chroot(olddentry);
77475+
77476+ if (chroot == NULL)
77477+ return 0;
77478+
77479+ if (is_subdir(newdentry, chroot))
77480+ return 0;
77481+
77482+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
77483+
77484+ return 1;
77485+#else
77486+ return 0;
77487+#endif
77488+}
77489+
77490+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
77491+{
77492+#ifdef CONFIG_GRKERNSEC
77493+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
77494+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
77495+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77496+ && gr_init_ran
77497+#endif
77498+ )
77499+ task->gr_is_chrooted = 1;
77500+ else {
77501+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77502+ if (task_pid_nr(task) == 1 && !gr_init_ran)
77503+ gr_init_ran = 1;
77504+#endif
77505+ task->gr_is_chrooted = 0;
77506+ }
77507+
77508+ task->gr_chroot_dentry = path->dentry;
77509+#endif
77510+ return;
77511+}
77512+
77513+void gr_clear_chroot_entries(struct task_struct *task)
77514+{
77515+#ifdef CONFIG_GRKERNSEC
77516+ task->gr_is_chrooted = 0;
77517+ task->gr_chroot_dentry = NULL;
77518+#endif
77519+ return;
77520+}
77521+
77522+int
77523+gr_handle_chroot_unix(const pid_t pid)
77524+{
77525+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77526+ struct task_struct *p;
77527+
77528+ if (unlikely(!grsec_enable_chroot_unix))
77529+ return 1;
77530+
77531+ if (likely(!proc_is_chrooted(current)))
77532+ return 1;
77533+
77534+ rcu_read_lock();
77535+ read_lock(&tasklist_lock);
77536+ p = find_task_by_vpid_unrestricted(pid);
77537+ if (unlikely(p && !have_same_root(current, p))) {
77538+ read_unlock(&tasklist_lock);
77539+ rcu_read_unlock();
77540+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
77541+ return 0;
77542+ }
77543+ read_unlock(&tasklist_lock);
77544+ rcu_read_unlock();
77545+#endif
77546+ return 1;
77547+}
77548+
77549+int
77550+gr_handle_chroot_nice(void)
77551+{
77552+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77553+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
77554+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
77555+ return -EPERM;
77556+ }
77557+#endif
77558+ return 0;
77559+}
77560+
77561+int
77562+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
77563+{
77564+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77565+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
77566+ && proc_is_chrooted(current)) {
77567+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
77568+ return -EACCES;
77569+ }
77570+#endif
77571+ return 0;
77572+}
77573+
77574+int
77575+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
77576+{
77577+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77578+ struct task_struct *p;
77579+ int ret = 0;
77580+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
77581+ return ret;
77582+
77583+ read_lock(&tasklist_lock);
77584+ do_each_pid_task(pid, type, p) {
77585+ if (!have_same_root(current, p)) {
77586+ ret = 1;
77587+ goto out;
77588+ }
77589+ } while_each_pid_task(pid, type, p);
77590+out:
77591+ read_unlock(&tasklist_lock);
77592+ return ret;
77593+#endif
77594+ return 0;
77595+}
77596+
77597+int
77598+gr_pid_is_chrooted(struct task_struct *p)
77599+{
77600+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77601+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
77602+ return 0;
77603+
77604+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
77605+ !have_same_root(current, p)) {
77606+ return 1;
77607+ }
77608+#endif
77609+ return 0;
77610+}
77611+
77612+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
77613+
77614+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
77615+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
77616+{
77617+ struct path path, currentroot;
77618+ int ret = 0;
77619+
77620+ path.dentry = (struct dentry *)u_dentry;
77621+ path.mnt = (struct vfsmount *)u_mnt;
77622+ get_fs_root(current->fs, &currentroot);
77623+ if (path_is_under(&path, &currentroot))
77624+ ret = 1;
77625+ path_put(&currentroot);
77626+
77627+ return ret;
77628+}
77629+#endif
77630+
77631+int
77632+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
77633+{
77634+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77635+ if (!grsec_enable_chroot_fchdir)
77636+ return 1;
77637+
77638+ if (!proc_is_chrooted(current))
77639+ return 1;
77640+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
77641+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
77642+ return 0;
77643+ }
77644+#endif
77645+ return 1;
77646+}
77647+
77648+int
77649+gr_chroot_fhandle(void)
77650+{
77651+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77652+ if (!grsec_enable_chroot_fchdir)
77653+ return 1;
77654+
77655+ if (!proc_is_chrooted(current))
77656+ return 1;
77657+ else {
77658+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
77659+ return 0;
77660+ }
77661+#endif
77662+ return 1;
77663+}
77664+
77665+int
77666+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77667+ const u64 shm_createtime)
77668+{
77669+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77670+ struct task_struct *p;
77671+
77672+ if (unlikely(!grsec_enable_chroot_shmat))
77673+ return 1;
77674+
77675+ if (likely(!proc_is_chrooted(current)))
77676+ return 1;
77677+
77678+ rcu_read_lock();
77679+ read_lock(&tasklist_lock);
77680+
77681+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
77682+ if (time_before_eq64(p->start_time, shm_createtime)) {
77683+ if (have_same_root(current, p)) {
77684+ goto allow;
77685+ } else {
77686+ read_unlock(&tasklist_lock);
77687+ rcu_read_unlock();
77688+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
77689+ return 0;
77690+ }
77691+ }
77692+ /* creator exited, pid reuse, fall through to next check */
77693+ }
77694+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
77695+ if (unlikely(!have_same_root(current, p))) {
77696+ read_unlock(&tasklist_lock);
77697+ rcu_read_unlock();
77698+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
77699+ return 0;
77700+ }
77701+ }
77702+
77703+allow:
77704+ read_unlock(&tasklist_lock);
77705+ rcu_read_unlock();
77706+#endif
77707+ return 1;
77708+}
77709+
77710+void
77711+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
77712+{
77713+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77714+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
77715+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
77716+#endif
77717+ return;
77718+}
77719+
77720+int
77721+gr_handle_chroot_mknod(const struct dentry *dentry,
77722+ const struct vfsmount *mnt, const int mode)
77723+{
77724+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77725+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
77726+ proc_is_chrooted(current)) {
77727+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
77728+ return -EPERM;
77729+ }
77730+#endif
77731+ return 0;
77732+}
77733+
77734+int
77735+gr_handle_chroot_mount(const struct dentry *dentry,
77736+ const struct vfsmount *mnt, const char *dev_name)
77737+{
77738+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77739+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
77740+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
77741+ return -EPERM;
77742+ }
77743+#endif
77744+ return 0;
77745+}
77746+
77747+int
77748+gr_handle_chroot_pivot(void)
77749+{
77750+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77751+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
77752+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
77753+ return -EPERM;
77754+ }
77755+#endif
77756+ return 0;
77757+}
77758+
77759+int
77760+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
77761+{
77762+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77763+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
77764+ !gr_is_outside_chroot(dentry, mnt)) {
77765+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
77766+ return -EPERM;
77767+ }
77768+#endif
77769+ return 0;
77770+}
77771+
77772+extern const char *captab_log[];
77773+extern int captab_log_entries;
77774+
77775+int
77776+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77777+{
77778+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77779+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77780+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77781+ if (cap_raised(chroot_caps, cap)) {
77782+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
77783+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
77784+ }
77785+ return 0;
77786+ }
77787+ }
77788+#endif
77789+ return 1;
77790+}
77791+
77792+int
77793+gr_chroot_is_capable(const int cap)
77794+{
77795+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77796+ return gr_task_chroot_is_capable(current, current_cred(), cap);
77797+#endif
77798+ return 1;
77799+}
77800+
77801+int
77802+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
77803+{
77804+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77805+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77806+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77807+ if (cap_raised(chroot_caps, cap)) {
77808+ return 0;
77809+ }
77810+ }
77811+#endif
77812+ return 1;
77813+}
77814+
77815+int
77816+gr_chroot_is_capable_nolog(const int cap)
77817+{
77818+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77819+ return gr_task_chroot_is_capable_nolog(current, cap);
77820+#endif
77821+ return 1;
77822+}
77823+
77824+int
77825+gr_handle_chroot_sysctl(const int op)
77826+{
77827+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77828+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
77829+ proc_is_chrooted(current))
77830+ return -EACCES;
77831+#endif
77832+ return 0;
77833+}
77834+
77835+void
77836+gr_handle_chroot_chdir(const struct path *path)
77837+{
77838+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77839+ if (grsec_enable_chroot_chdir)
77840+ set_fs_pwd(current->fs, path);
77841+#endif
77842+ return;
77843+}
77844+
77845+int
77846+gr_handle_chroot_chmod(const struct dentry *dentry,
77847+ const struct vfsmount *mnt, const int mode)
77848+{
77849+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77850+ /* allow chmod +s on directories, but not files */
77851+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
77852+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
77853+ proc_is_chrooted(current)) {
77854+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
77855+ return -EPERM;
77856+ }
77857+#endif
77858+ return 0;
77859+}
77860diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
77861new file mode 100644
77862index 0000000..946f750
77863--- /dev/null
77864+++ b/grsecurity/grsec_disabled.c
77865@@ -0,0 +1,445 @@
77866+#include <linux/kernel.h>
77867+#include <linux/module.h>
77868+#include <linux/sched.h>
77869+#include <linux/file.h>
77870+#include <linux/fs.h>
77871+#include <linux/kdev_t.h>
77872+#include <linux/net.h>
77873+#include <linux/in.h>
77874+#include <linux/ip.h>
77875+#include <linux/skbuff.h>
77876+#include <linux/sysctl.h>
77877+
77878+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
77879+void
77880+pax_set_initial_flags(struct linux_binprm *bprm)
77881+{
77882+ return;
77883+}
77884+#endif
77885+
77886+#ifdef CONFIG_SYSCTL
77887+__u32
77888+gr_handle_sysctl(const struct ctl_table * table, const int op)
77889+{
77890+ return 0;
77891+}
77892+#endif
77893+
77894+#ifdef CONFIG_TASKSTATS
77895+int gr_is_taskstats_denied(int pid)
77896+{
77897+ return 0;
77898+}
77899+#endif
77900+
77901+int
77902+gr_acl_is_enabled(void)
77903+{
77904+ return 0;
77905+}
77906+
77907+int
77908+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
77909+{
77910+ return 0;
77911+}
77912+
77913+void
77914+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
77915+{
77916+ return;
77917+}
77918+
77919+int
77920+gr_handle_rawio(const struct inode *inode)
77921+{
77922+ return 0;
77923+}
77924+
77925+void
77926+gr_acl_handle_psacct(struct task_struct *task, const long code)
77927+{
77928+ return;
77929+}
77930+
77931+int
77932+gr_handle_ptrace(struct task_struct *task, const long request)
77933+{
77934+ return 0;
77935+}
77936+
77937+int
77938+gr_handle_proc_ptrace(struct task_struct *task)
77939+{
77940+ return 0;
77941+}
77942+
77943+int
77944+gr_set_acls(const int type)
77945+{
77946+ return 0;
77947+}
77948+
77949+int
77950+gr_check_hidden_task(const struct task_struct *tsk)
77951+{
77952+ return 0;
77953+}
77954+
77955+int
77956+gr_check_protected_task(const struct task_struct *task)
77957+{
77958+ return 0;
77959+}
77960+
77961+int
77962+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
77963+{
77964+ return 0;
77965+}
77966+
77967+void
77968+gr_copy_label(struct task_struct *tsk)
77969+{
77970+ return;
77971+}
77972+
77973+void
77974+gr_set_pax_flags(struct task_struct *task)
77975+{
77976+ return;
77977+}
77978+
77979+int
77980+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
77981+ const int unsafe_share)
77982+{
77983+ return 0;
77984+}
77985+
77986+void
77987+gr_handle_delete(const u64 ino, const dev_t dev)
77988+{
77989+ return;
77990+}
77991+
77992+void
77993+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
77994+{
77995+ return;
77996+}
77997+
77998+void
77999+gr_handle_crash(struct task_struct *task, const int sig)
78000+{
78001+ return;
78002+}
78003+
78004+int
78005+gr_check_crash_exec(const struct file *filp)
78006+{
78007+ return 0;
78008+}
78009+
78010+int
78011+gr_check_crash_uid(const kuid_t uid)
78012+{
78013+ return 0;
78014+}
78015+
78016+void
78017+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
78018+ struct dentry *old_dentry,
78019+ struct dentry *new_dentry,
78020+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
78021+{
78022+ return;
78023+}
78024+
78025+int
78026+gr_search_socket(const int family, const int type, const int protocol)
78027+{
78028+ return 1;
78029+}
78030+
78031+int
78032+gr_search_connectbind(const int mode, const struct socket *sock,
78033+ const struct sockaddr_in *addr)
78034+{
78035+ return 0;
78036+}
78037+
78038+void
78039+gr_handle_alertkill(struct task_struct *task)
78040+{
78041+ return;
78042+}
78043+
78044+__u32
78045+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
78046+{
78047+ return 1;
78048+}
78049+
78050+__u32
78051+gr_acl_handle_hidden_file(const struct dentry * dentry,
78052+ const struct vfsmount * mnt)
78053+{
78054+ return 1;
78055+}
78056+
78057+__u32
78058+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
78059+ int acc_mode)
78060+{
78061+ return 1;
78062+}
78063+
78064+__u32
78065+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
78066+{
78067+ return 1;
78068+}
78069+
78070+__u32
78071+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
78072+{
78073+ return 1;
78074+}
78075+
78076+int
78077+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
78078+ unsigned int *vm_flags)
78079+{
78080+ return 1;
78081+}
78082+
78083+__u32
78084+gr_acl_handle_truncate(const struct dentry * dentry,
78085+ const struct vfsmount * mnt)
78086+{
78087+ return 1;
78088+}
78089+
78090+__u32
78091+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
78092+{
78093+ return 1;
78094+}
78095+
78096+__u32
78097+gr_acl_handle_access(const struct dentry * dentry,
78098+ const struct vfsmount * mnt, const int fmode)
78099+{
78100+ return 1;
78101+}
78102+
78103+__u32
78104+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
78105+ umode_t *mode)
78106+{
78107+ return 1;
78108+}
78109+
78110+__u32
78111+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
78112+{
78113+ return 1;
78114+}
78115+
78116+__u32
78117+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
78118+{
78119+ return 1;
78120+}
78121+
78122+__u32
78123+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
78124+{
78125+ return 1;
78126+}
78127+
78128+void
78129+grsecurity_init(void)
78130+{
78131+ return;
78132+}
78133+
78134+umode_t gr_acl_umask(void)
78135+{
78136+ return 0;
78137+}
78138+
78139+__u32
78140+gr_acl_handle_mknod(const struct dentry * new_dentry,
78141+ const struct dentry * parent_dentry,
78142+ const struct vfsmount * parent_mnt,
78143+ const int mode)
78144+{
78145+ return 1;
78146+}
78147+
78148+__u32
78149+gr_acl_handle_mkdir(const struct dentry * new_dentry,
78150+ const struct dentry * parent_dentry,
78151+ const struct vfsmount * parent_mnt)
78152+{
78153+ return 1;
78154+}
78155+
78156+__u32
78157+gr_acl_handle_symlink(const struct dentry * new_dentry,
78158+ const struct dentry * parent_dentry,
78159+ const struct vfsmount * parent_mnt, const struct filename *from)
78160+{
78161+ return 1;
78162+}
78163+
78164+__u32
78165+gr_acl_handle_link(const struct dentry * new_dentry,
78166+ const struct dentry * parent_dentry,
78167+ const struct vfsmount * parent_mnt,
78168+ const struct dentry * old_dentry,
78169+ const struct vfsmount * old_mnt, const struct filename *to)
78170+{
78171+ return 1;
78172+}
78173+
78174+int
78175+gr_acl_handle_rename(const struct dentry *new_dentry,
78176+ const struct dentry *parent_dentry,
78177+ const struct vfsmount *parent_mnt,
78178+ const struct dentry *old_dentry,
78179+ const struct inode *old_parent_inode,
78180+ const struct vfsmount *old_mnt, const struct filename *newname,
78181+ unsigned int flags)
78182+{
78183+ return 0;
78184+}
78185+
78186+int
78187+gr_acl_handle_filldir(const struct file *file, const char *name,
78188+ const int namelen, const u64 ino)
78189+{
78190+ return 1;
78191+}
78192+
78193+int
78194+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
78195+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
78196+{
78197+ return 1;
78198+}
78199+
78200+int
78201+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
78202+{
78203+ return 0;
78204+}
78205+
78206+int
78207+gr_search_accept(const struct socket *sock)
78208+{
78209+ return 0;
78210+}
78211+
78212+int
78213+gr_search_listen(const struct socket *sock)
78214+{
78215+ return 0;
78216+}
78217+
78218+int
78219+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
78220+{
78221+ return 0;
78222+}
78223+
78224+__u32
78225+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
78226+{
78227+ return 1;
78228+}
78229+
78230+__u32
78231+gr_acl_handle_creat(const struct dentry * dentry,
78232+ const struct dentry * p_dentry,
78233+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
78234+ const int imode)
78235+{
78236+ return 1;
78237+}
78238+
78239+void
78240+gr_acl_handle_exit(void)
78241+{
78242+ return;
78243+}
78244+
78245+int
78246+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
78247+{
78248+ return 1;
78249+}
78250+
78251+void
78252+gr_set_role_label(const kuid_t uid, const kgid_t gid)
78253+{
78254+ return;
78255+}
78256+
78257+int
78258+gr_acl_handle_procpidmem(const struct task_struct *task)
78259+{
78260+ return 0;
78261+}
78262+
78263+int
78264+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
78265+{
78266+ return 0;
78267+}
78268+
78269+int
78270+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
78271+{
78272+ return 0;
78273+}
78274+
78275+int
78276+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
78277+{
78278+ return 0;
78279+}
78280+
78281+int
78282+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
78283+{
78284+ return 0;
78285+}
78286+
78287+int gr_acl_enable_at_secure(void)
78288+{
78289+ return 0;
78290+}
78291+
78292+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
78293+{
78294+ return dentry->d_sb->s_dev;
78295+}
78296+
78297+u64 gr_get_ino_from_dentry(struct dentry *dentry)
78298+{
78299+ return dentry->d_inode->i_ino;
78300+}
78301+
78302+void gr_put_exec_file(struct task_struct *task)
78303+{
78304+ return;
78305+}
78306+
78307+#ifdef CONFIG_SECURITY
78308+EXPORT_SYMBOL_GPL(gr_check_user_change);
78309+EXPORT_SYMBOL_GPL(gr_check_group_change);
78310+#endif
78311diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
78312new file mode 100644
78313index 0000000..fb7531e
78314--- /dev/null
78315+++ b/grsecurity/grsec_exec.c
78316@@ -0,0 +1,189 @@
78317+#include <linux/kernel.h>
78318+#include <linux/sched.h>
78319+#include <linux/file.h>
78320+#include <linux/binfmts.h>
78321+#include <linux/fs.h>
78322+#include <linux/types.h>
78323+#include <linux/grdefs.h>
78324+#include <linux/grsecurity.h>
78325+#include <linux/grinternal.h>
78326+#include <linux/capability.h>
78327+#include <linux/module.h>
78328+#include <linux/compat.h>
78329+
78330+#include <asm/uaccess.h>
78331+
78332+#ifdef CONFIG_GRKERNSEC_EXECLOG
78333+static char gr_exec_arg_buf[132];
78334+static DEFINE_MUTEX(gr_exec_arg_mutex);
78335+#endif
78336+
78337+struct user_arg_ptr {
78338+#ifdef CONFIG_COMPAT
78339+ bool is_compat;
78340+#endif
78341+ union {
78342+ const char __user *const __user *native;
78343+#ifdef CONFIG_COMPAT
78344+ const compat_uptr_t __user *compat;
78345+#endif
78346+ } ptr;
78347+};
78348+
78349+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
78350+
78351+void
78352+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
78353+{
78354+#ifdef CONFIG_GRKERNSEC_EXECLOG
78355+ char *grarg = gr_exec_arg_buf;
78356+ unsigned int i, x, execlen = 0;
78357+ char c;
78358+
78359+ if (!((grsec_enable_execlog && grsec_enable_group &&
78360+ in_group_p(grsec_audit_gid))
78361+ || (grsec_enable_execlog && !grsec_enable_group)))
78362+ return;
78363+
78364+ mutex_lock(&gr_exec_arg_mutex);
78365+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
78366+
78367+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
78368+ const char __user *p;
78369+ unsigned int len;
78370+
78371+ p = get_user_arg_ptr(argv, i);
78372+ if (IS_ERR(p))
78373+ goto log;
78374+
78375+ len = strnlen_user(p, 128 - execlen);
78376+ if (len > 128 - execlen)
78377+ len = 128 - execlen;
78378+ else if (len > 0)
78379+ len--;
78380+ if (copy_from_user(grarg + execlen, p, len))
78381+ goto log;
78382+
78383+ /* rewrite unprintable characters */
78384+ for (x = 0; x < len; x++) {
78385+ c = *(grarg + execlen + x);
78386+ if (c < 32 || c > 126)
78387+ *(grarg + execlen + x) = ' ';
78388+ }
78389+
78390+ execlen += len;
78391+ *(grarg + execlen) = ' ';
78392+ *(grarg + execlen + 1) = '\0';
78393+ execlen++;
78394+ }
78395+
78396+ log:
78397+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
78398+ bprm->file->f_path.mnt, grarg);
78399+ mutex_unlock(&gr_exec_arg_mutex);
78400+#endif
78401+ return;
78402+}
78403+
78404+#ifdef CONFIG_GRKERNSEC
78405+extern int gr_acl_is_capable(const int cap);
78406+extern int gr_acl_is_capable_nolog(const int cap);
78407+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
78408+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
78409+extern int gr_chroot_is_capable(const int cap);
78410+extern int gr_chroot_is_capable_nolog(const int cap);
78411+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
78412+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
78413+#endif
78414+
78415+const char *captab_log[] = {
78416+ "CAP_CHOWN",
78417+ "CAP_DAC_OVERRIDE",
78418+ "CAP_DAC_READ_SEARCH",
78419+ "CAP_FOWNER",
78420+ "CAP_FSETID",
78421+ "CAP_KILL",
78422+ "CAP_SETGID",
78423+ "CAP_SETUID",
78424+ "CAP_SETPCAP",
78425+ "CAP_LINUX_IMMUTABLE",
78426+ "CAP_NET_BIND_SERVICE",
78427+ "CAP_NET_BROADCAST",
78428+ "CAP_NET_ADMIN",
78429+ "CAP_NET_RAW",
78430+ "CAP_IPC_LOCK",
78431+ "CAP_IPC_OWNER",
78432+ "CAP_SYS_MODULE",
78433+ "CAP_SYS_RAWIO",
78434+ "CAP_SYS_CHROOT",
78435+ "CAP_SYS_PTRACE",
78436+ "CAP_SYS_PACCT",
78437+ "CAP_SYS_ADMIN",
78438+ "CAP_SYS_BOOT",
78439+ "CAP_SYS_NICE",
78440+ "CAP_SYS_RESOURCE",
78441+ "CAP_SYS_TIME",
78442+ "CAP_SYS_TTY_CONFIG",
78443+ "CAP_MKNOD",
78444+ "CAP_LEASE",
78445+ "CAP_AUDIT_WRITE",
78446+ "CAP_AUDIT_CONTROL",
78447+ "CAP_SETFCAP",
78448+ "CAP_MAC_OVERRIDE",
78449+ "CAP_MAC_ADMIN",
78450+ "CAP_SYSLOG",
78451+ "CAP_WAKE_ALARM",
78452+ "CAP_BLOCK_SUSPEND",
78453+ "CAP_AUDIT_READ"
78454+};
78455+
78456+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
78457+
78458+int gr_is_capable(const int cap)
78459+{
78460+#ifdef CONFIG_GRKERNSEC
78461+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
78462+ return 1;
78463+ return 0;
78464+#else
78465+ return 1;
78466+#endif
78467+}
78468+
78469+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
78470+{
78471+#ifdef CONFIG_GRKERNSEC
78472+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
78473+ return 1;
78474+ return 0;
78475+#else
78476+ return 1;
78477+#endif
78478+}
78479+
78480+int gr_is_capable_nolog(const int cap)
78481+{
78482+#ifdef CONFIG_GRKERNSEC
78483+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
78484+ return 1;
78485+ return 0;
78486+#else
78487+ return 1;
78488+#endif
78489+}
78490+
78491+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
78492+{
78493+#ifdef CONFIG_GRKERNSEC
78494+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
78495+ return 1;
78496+ return 0;
78497+#else
78498+ return 1;
78499+#endif
78500+}
78501+
78502+EXPORT_SYMBOL_GPL(gr_is_capable);
78503+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
78504+EXPORT_SYMBOL_GPL(gr_task_is_capable);
78505+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
78506diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
78507new file mode 100644
78508index 0000000..06cc6ea
78509--- /dev/null
78510+++ b/grsecurity/grsec_fifo.c
78511@@ -0,0 +1,24 @@
78512+#include <linux/kernel.h>
78513+#include <linux/sched.h>
78514+#include <linux/fs.h>
78515+#include <linux/file.h>
78516+#include <linux/grinternal.h>
78517+
78518+int
78519+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
78520+ const struct dentry *dir, const int flag, const int acc_mode)
78521+{
78522+#ifdef CONFIG_GRKERNSEC_FIFO
78523+ const struct cred *cred = current_cred();
78524+
78525+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
78526+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
78527+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
78528+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
78529+ if (!inode_permission(dentry->d_inode, acc_mode))
78530+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
78531+ return -EACCES;
78532+ }
78533+#endif
78534+ return 0;
78535+}
78536diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
78537new file mode 100644
78538index 0000000..8ca18bf
78539--- /dev/null
78540+++ b/grsecurity/grsec_fork.c
78541@@ -0,0 +1,23 @@
78542+#include <linux/kernel.h>
78543+#include <linux/sched.h>
78544+#include <linux/grsecurity.h>
78545+#include <linux/grinternal.h>
78546+#include <linux/errno.h>
78547+
78548+void
78549+gr_log_forkfail(const int retval)
78550+{
78551+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78552+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
78553+ switch (retval) {
78554+ case -EAGAIN:
78555+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
78556+ break;
78557+ case -ENOMEM:
78558+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
78559+ break;
78560+ }
78561+ }
78562+#endif
78563+ return;
78564+}
78565diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
78566new file mode 100644
78567index 0000000..4ed9e7d
78568--- /dev/null
78569+++ b/grsecurity/grsec_init.c
78570@@ -0,0 +1,290 @@
78571+#include <linux/kernel.h>
78572+#include <linux/sched.h>
78573+#include <linux/mm.h>
78574+#include <linux/gracl.h>
78575+#include <linux/slab.h>
78576+#include <linux/vmalloc.h>
78577+#include <linux/percpu.h>
78578+#include <linux/module.h>
78579+
78580+int grsec_enable_ptrace_readexec;
78581+int grsec_enable_setxid;
78582+int grsec_enable_symlinkown;
78583+kgid_t grsec_symlinkown_gid;
78584+int grsec_enable_brute;
78585+int grsec_enable_link;
78586+int grsec_enable_dmesg;
78587+int grsec_enable_harden_ptrace;
78588+int grsec_enable_harden_ipc;
78589+int grsec_enable_fifo;
78590+int grsec_enable_execlog;
78591+int grsec_enable_signal;
78592+int grsec_enable_forkfail;
78593+int grsec_enable_audit_ptrace;
78594+int grsec_enable_time;
78595+int grsec_enable_group;
78596+kgid_t grsec_audit_gid;
78597+int grsec_enable_chdir;
78598+int grsec_enable_mount;
78599+int grsec_enable_rofs;
78600+int grsec_deny_new_usb;
78601+int grsec_enable_chroot_findtask;
78602+int grsec_enable_chroot_mount;
78603+int grsec_enable_chroot_shmat;
78604+int grsec_enable_chroot_fchdir;
78605+int grsec_enable_chroot_double;
78606+int grsec_enable_chroot_pivot;
78607+int grsec_enable_chroot_chdir;
78608+int grsec_enable_chroot_chmod;
78609+int grsec_enable_chroot_mknod;
78610+int grsec_enable_chroot_nice;
78611+int grsec_enable_chroot_execlog;
78612+int grsec_enable_chroot_caps;
78613+int grsec_enable_chroot_rename;
78614+int grsec_enable_chroot_sysctl;
78615+int grsec_enable_chroot_unix;
78616+int grsec_enable_tpe;
78617+kgid_t grsec_tpe_gid;
78618+int grsec_enable_blackhole;
78619+#ifdef CONFIG_IPV6_MODULE
78620+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
78621+#endif
78622+int grsec_lastack_retries;
78623+int grsec_enable_tpe_all;
78624+int grsec_enable_tpe_invert;
78625+int grsec_enable_socket_all;
78626+kgid_t grsec_socket_all_gid;
78627+int grsec_enable_socket_client;
78628+kgid_t grsec_socket_client_gid;
78629+int grsec_enable_socket_server;
78630+kgid_t grsec_socket_server_gid;
78631+int grsec_resource_logging;
78632+int grsec_disable_privio;
78633+int grsec_enable_log_rwxmaps;
78634+int grsec_lock;
78635+
78636+DEFINE_SPINLOCK(grsec_alert_lock);
78637+unsigned long grsec_alert_wtime = 0;
78638+unsigned long grsec_alert_fyet = 0;
78639+
78640+DEFINE_SPINLOCK(grsec_audit_lock);
78641+
78642+DEFINE_RWLOCK(grsec_exec_file_lock);
78643+
78644+char *gr_shared_page[4];
78645+
78646+char *gr_alert_log_fmt;
78647+char *gr_audit_log_fmt;
78648+char *gr_alert_log_buf;
78649+char *gr_audit_log_buf;
78650+
78651+extern struct gr_arg *gr_usermode;
78652+extern unsigned char *gr_system_salt;
78653+extern unsigned char *gr_system_sum;
78654+
78655+void __init
78656+grsecurity_init(void)
78657+{
78658+ int j;
78659+ /* create the per-cpu shared pages */
78660+
78661+#ifdef CONFIG_X86
78662+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
78663+#endif
78664+
78665+ for (j = 0; j < 4; j++) {
78666+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
78667+ if (gr_shared_page[j] == NULL) {
78668+ panic("Unable to allocate grsecurity shared page");
78669+ return;
78670+ }
78671+ }
78672+
78673+ /* allocate log buffers */
78674+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
78675+ if (!gr_alert_log_fmt) {
78676+ panic("Unable to allocate grsecurity alert log format buffer");
78677+ return;
78678+ }
78679+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
78680+ if (!gr_audit_log_fmt) {
78681+ panic("Unable to allocate grsecurity audit log format buffer");
78682+ return;
78683+ }
78684+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
78685+ if (!gr_alert_log_buf) {
78686+ panic("Unable to allocate grsecurity alert log buffer");
78687+ return;
78688+ }
78689+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
78690+ if (!gr_audit_log_buf) {
78691+ panic("Unable to allocate grsecurity audit log buffer");
78692+ return;
78693+ }
78694+
78695+ /* allocate memory for authentication structure */
78696+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
78697+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
78698+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
78699+
78700+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
78701+ panic("Unable to allocate grsecurity authentication structure");
78702+ return;
78703+ }
78704+
78705+#ifdef CONFIG_GRKERNSEC_IO
78706+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
78707+ grsec_disable_privio = 1;
78708+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
78709+ grsec_disable_privio = 1;
78710+#else
78711+ grsec_disable_privio = 0;
78712+#endif
78713+#endif
78714+
78715+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78716+ /* for backward compatibility, tpe_invert always defaults to on if
78717+ enabled in the kernel
78718+ */
78719+ grsec_enable_tpe_invert = 1;
78720+#endif
78721+
78722+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
78723+#ifndef CONFIG_GRKERNSEC_SYSCTL
78724+ grsec_lock = 1;
78725+#endif
78726+
78727+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78728+ grsec_enable_log_rwxmaps = 1;
78729+#endif
78730+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78731+ grsec_enable_group = 1;
78732+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
78733+#endif
78734+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78735+ grsec_enable_ptrace_readexec = 1;
78736+#endif
78737+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78738+ grsec_enable_chdir = 1;
78739+#endif
78740+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78741+ grsec_enable_harden_ptrace = 1;
78742+#endif
78743+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78744+ grsec_enable_harden_ipc = 1;
78745+#endif
78746+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78747+ grsec_enable_mount = 1;
78748+#endif
78749+#ifdef CONFIG_GRKERNSEC_LINK
78750+ grsec_enable_link = 1;
78751+#endif
78752+#ifdef CONFIG_GRKERNSEC_BRUTE
78753+ grsec_enable_brute = 1;
78754+#endif
78755+#ifdef CONFIG_GRKERNSEC_DMESG
78756+ grsec_enable_dmesg = 1;
78757+#endif
78758+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78759+ grsec_enable_blackhole = 1;
78760+ grsec_lastack_retries = 4;
78761+#endif
78762+#ifdef CONFIG_GRKERNSEC_FIFO
78763+ grsec_enable_fifo = 1;
78764+#endif
78765+#ifdef CONFIG_GRKERNSEC_EXECLOG
78766+ grsec_enable_execlog = 1;
78767+#endif
78768+#ifdef CONFIG_GRKERNSEC_SETXID
78769+ grsec_enable_setxid = 1;
78770+#endif
78771+#ifdef CONFIG_GRKERNSEC_SIGNAL
78772+ grsec_enable_signal = 1;
78773+#endif
78774+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78775+ grsec_enable_forkfail = 1;
78776+#endif
78777+#ifdef CONFIG_GRKERNSEC_TIME
78778+ grsec_enable_time = 1;
78779+#endif
78780+#ifdef CONFIG_GRKERNSEC_RESLOG
78781+ grsec_resource_logging = 1;
78782+#endif
78783+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78784+ grsec_enable_chroot_findtask = 1;
78785+#endif
78786+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78787+ grsec_enable_chroot_unix = 1;
78788+#endif
78789+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78790+ grsec_enable_chroot_mount = 1;
78791+#endif
78792+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78793+ grsec_enable_chroot_fchdir = 1;
78794+#endif
78795+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78796+ grsec_enable_chroot_shmat = 1;
78797+#endif
78798+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78799+ grsec_enable_audit_ptrace = 1;
78800+#endif
78801+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78802+ grsec_enable_chroot_double = 1;
78803+#endif
78804+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78805+ grsec_enable_chroot_pivot = 1;
78806+#endif
78807+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78808+ grsec_enable_chroot_chdir = 1;
78809+#endif
78810+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78811+ grsec_enable_chroot_chmod = 1;
78812+#endif
78813+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78814+ grsec_enable_chroot_mknod = 1;
78815+#endif
78816+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78817+ grsec_enable_chroot_nice = 1;
78818+#endif
78819+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78820+ grsec_enable_chroot_execlog = 1;
78821+#endif
78822+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78823+ grsec_enable_chroot_caps = 1;
78824+#endif
78825+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
78826+ grsec_enable_chroot_rename = 1;
78827+#endif
78828+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78829+ grsec_enable_chroot_sysctl = 1;
78830+#endif
78831+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78832+ grsec_enable_symlinkown = 1;
78833+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
78834+#endif
78835+#ifdef CONFIG_GRKERNSEC_TPE
78836+ grsec_enable_tpe = 1;
78837+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
78838+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78839+ grsec_enable_tpe_all = 1;
78840+#endif
78841+#endif
78842+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78843+ grsec_enable_socket_all = 1;
78844+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
78845+#endif
78846+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78847+ grsec_enable_socket_client = 1;
78848+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
78849+#endif
78850+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78851+ grsec_enable_socket_server = 1;
78852+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
78853+#endif
78854+#endif
78855+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
78856+ grsec_deny_new_usb = 1;
78857+#endif
78858+
78859+ return;
78860+}
78861diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
78862new file mode 100644
78863index 0000000..1773300
78864--- /dev/null
78865+++ b/grsecurity/grsec_ipc.c
78866@@ -0,0 +1,48 @@
78867+#include <linux/kernel.h>
78868+#include <linux/mm.h>
78869+#include <linux/sched.h>
78870+#include <linux/file.h>
78871+#include <linux/ipc.h>
78872+#include <linux/ipc_namespace.h>
78873+#include <linux/grsecurity.h>
78874+#include <linux/grinternal.h>
78875+
78876+int
78877+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
78878+{
78879+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78880+ int write;
78881+ int orig_granted_mode;
78882+ kuid_t euid;
78883+ kgid_t egid;
78884+
78885+ if (!grsec_enable_harden_ipc)
78886+ return 1;
78887+
78888+ euid = current_euid();
78889+ egid = current_egid();
78890+
78891+ write = requested_mode & 00002;
78892+ orig_granted_mode = ipcp->mode;
78893+
78894+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
78895+ orig_granted_mode >>= 6;
78896+ else {
78897+ /* if likely wrong permissions, lock to user */
78898+ if (orig_granted_mode & 0007)
78899+ orig_granted_mode = 0;
78900+ /* otherwise do a egid-only check */
78901+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
78902+ orig_granted_mode >>= 3;
78903+ /* otherwise, no access */
78904+ else
78905+ orig_granted_mode = 0;
78906+ }
78907+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
78908+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
78909+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
78910+ return 0;
78911+ }
78912+#endif
78913+ return 1;
78914+}
78915diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
78916new file mode 100644
78917index 0000000..5e05e20
78918--- /dev/null
78919+++ b/grsecurity/grsec_link.c
78920@@ -0,0 +1,58 @@
78921+#include <linux/kernel.h>
78922+#include <linux/sched.h>
78923+#include <linux/fs.h>
78924+#include <linux/file.h>
78925+#include <linux/grinternal.h>
78926+
78927+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
78928+{
78929+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78930+ const struct inode *link_inode = link->dentry->d_inode;
78931+
78932+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
78933+ /* ignore root-owned links, e.g. /proc/self */
78934+ gr_is_global_nonroot(link_inode->i_uid) && target &&
78935+ !uid_eq(link_inode->i_uid, target->i_uid)) {
78936+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
78937+ return 1;
78938+ }
78939+#endif
78940+ return 0;
78941+}
78942+
78943+int
78944+gr_handle_follow_link(const struct inode *parent,
78945+ const struct inode *inode,
78946+ const struct dentry *dentry, const struct vfsmount *mnt)
78947+{
78948+#ifdef CONFIG_GRKERNSEC_LINK
78949+ const struct cred *cred = current_cred();
78950+
78951+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
78952+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
78953+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
78954+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
78955+ return -EACCES;
78956+ }
78957+#endif
78958+ return 0;
78959+}
78960+
78961+int
78962+gr_handle_hardlink(const struct dentry *dentry,
78963+ const struct vfsmount *mnt,
78964+ struct inode *inode, const int mode, const struct filename *to)
78965+{
78966+#ifdef CONFIG_GRKERNSEC_LINK
78967+ const struct cred *cred = current_cred();
78968+
78969+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
78970+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
78971+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
78972+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
78973+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
78974+ return -EPERM;
78975+ }
78976+#endif
78977+ return 0;
78978+}
78979diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
78980new file mode 100644
78981index 0000000..dbe0a6b
78982--- /dev/null
78983+++ b/grsecurity/grsec_log.c
78984@@ -0,0 +1,341 @@
78985+#include <linux/kernel.h>
78986+#include <linux/sched.h>
78987+#include <linux/file.h>
78988+#include <linux/tty.h>
78989+#include <linux/fs.h>
78990+#include <linux/mm.h>
78991+#include <linux/grinternal.h>
78992+
78993+#ifdef CONFIG_TREE_PREEMPT_RCU
78994+#define DISABLE_PREEMPT() preempt_disable()
78995+#define ENABLE_PREEMPT() preempt_enable()
78996+#else
78997+#define DISABLE_PREEMPT()
78998+#define ENABLE_PREEMPT()
78999+#endif
79000+
79001+#define BEGIN_LOCKS(x) \
79002+ DISABLE_PREEMPT(); \
79003+ rcu_read_lock(); \
79004+ read_lock(&tasklist_lock); \
79005+ read_lock(&grsec_exec_file_lock); \
79006+ if (x != GR_DO_AUDIT) \
79007+ spin_lock(&grsec_alert_lock); \
79008+ else \
79009+ spin_lock(&grsec_audit_lock)
79010+
79011+#define END_LOCKS(x) \
79012+ if (x != GR_DO_AUDIT) \
79013+ spin_unlock(&grsec_alert_lock); \
79014+ else \
79015+ spin_unlock(&grsec_audit_lock); \
79016+ read_unlock(&grsec_exec_file_lock); \
79017+ read_unlock(&tasklist_lock); \
79018+ rcu_read_unlock(); \
79019+ ENABLE_PREEMPT(); \
79020+ if (x == GR_DONT_AUDIT) \
79021+ gr_handle_alertkill(current)
79022+
79023+enum {
79024+ FLOODING,
79025+ NO_FLOODING
79026+};
79027+
79028+extern char *gr_alert_log_fmt;
79029+extern char *gr_audit_log_fmt;
79030+extern char *gr_alert_log_buf;
79031+extern char *gr_audit_log_buf;
79032+
79033+static int gr_log_start(int audit)
79034+{
79035+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
79036+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
79037+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79038+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
79039+ unsigned long curr_secs = get_seconds();
79040+
79041+ if (audit == GR_DO_AUDIT)
79042+ goto set_fmt;
79043+
79044+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
79045+ grsec_alert_wtime = curr_secs;
79046+ grsec_alert_fyet = 0;
79047+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
79048+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
79049+ grsec_alert_fyet++;
79050+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
79051+ grsec_alert_wtime = curr_secs;
79052+ grsec_alert_fyet++;
79053+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
79054+ return FLOODING;
79055+ }
79056+ else return FLOODING;
79057+
79058+set_fmt:
79059+#endif
79060+ memset(buf, 0, PAGE_SIZE);
79061+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
79062+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
79063+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
79064+ } else if (current->signal->curr_ip) {
79065+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
79066+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
79067+ } else if (gr_acl_is_enabled()) {
79068+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
79069+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
79070+ } else {
79071+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
79072+ strcpy(buf, fmt);
79073+ }
79074+
79075+ return NO_FLOODING;
79076+}
79077+
79078+static void gr_log_middle(int audit, const char *msg, va_list ap)
79079+ __attribute__ ((format (printf, 2, 0)));
79080+
79081+static void gr_log_middle(int audit, const char *msg, va_list ap)
79082+{
79083+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79084+ unsigned int len = strlen(buf);
79085+
79086+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
79087+
79088+ return;
79089+}
79090+
79091+static void gr_log_middle_varargs(int audit, const char *msg, ...)
79092+ __attribute__ ((format (printf, 2, 3)));
79093+
79094+static void gr_log_middle_varargs(int audit, const char *msg, ...)
79095+{
79096+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79097+ unsigned int len = strlen(buf);
79098+ va_list ap;
79099+
79100+ va_start(ap, msg);
79101+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
79102+ va_end(ap);
79103+
79104+ return;
79105+}
79106+
79107+static void gr_log_end(int audit, int append_default)
79108+{
79109+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79110+ if (append_default) {
79111+ struct task_struct *task = current;
79112+ struct task_struct *parent = task->real_parent;
79113+ const struct cred *cred = __task_cred(task);
79114+ const struct cred *pcred = __task_cred(parent);
79115+ unsigned int len = strlen(buf);
79116+
79117+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79118+ }
79119+
79120+ printk("%s\n", buf);
79121+
79122+ return;
79123+}
79124+
79125+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
79126+{
79127+ int logtype;
79128+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
79129+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
79130+ void *voidptr = NULL;
79131+ int num1 = 0, num2 = 0;
79132+ unsigned long ulong1 = 0, ulong2 = 0;
79133+ struct dentry *dentry = NULL;
79134+ struct vfsmount *mnt = NULL;
79135+ struct file *file = NULL;
79136+ struct task_struct *task = NULL;
79137+ struct vm_area_struct *vma = NULL;
79138+ const struct cred *cred, *pcred;
79139+ va_list ap;
79140+
79141+ BEGIN_LOCKS(audit);
79142+ logtype = gr_log_start(audit);
79143+ if (logtype == FLOODING) {
79144+ END_LOCKS(audit);
79145+ return;
79146+ }
79147+ va_start(ap, argtypes);
79148+ switch (argtypes) {
79149+ case GR_TTYSNIFF:
79150+ task = va_arg(ap, struct task_struct *);
79151+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
79152+ break;
79153+ case GR_SYSCTL_HIDDEN:
79154+ str1 = va_arg(ap, char *);
79155+ gr_log_middle_varargs(audit, msg, result, str1);
79156+ break;
79157+ case GR_RBAC:
79158+ dentry = va_arg(ap, struct dentry *);
79159+ mnt = va_arg(ap, struct vfsmount *);
79160+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
79161+ break;
79162+ case GR_RBAC_STR:
79163+ dentry = va_arg(ap, struct dentry *);
79164+ mnt = va_arg(ap, struct vfsmount *);
79165+ str1 = va_arg(ap, char *);
79166+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
79167+ break;
79168+ case GR_STR_RBAC:
79169+ str1 = va_arg(ap, char *);
79170+ dentry = va_arg(ap, struct dentry *);
79171+ mnt = va_arg(ap, struct vfsmount *);
79172+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
79173+ break;
79174+ case GR_RBAC_MODE2:
79175+ dentry = va_arg(ap, struct dentry *);
79176+ mnt = va_arg(ap, struct vfsmount *);
79177+ str1 = va_arg(ap, char *);
79178+ str2 = va_arg(ap, char *);
79179+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
79180+ break;
79181+ case GR_RBAC_MODE3:
79182+ dentry = va_arg(ap, struct dentry *);
79183+ mnt = va_arg(ap, struct vfsmount *);
79184+ str1 = va_arg(ap, char *);
79185+ str2 = va_arg(ap, char *);
79186+ str3 = va_arg(ap, char *);
79187+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
79188+ break;
79189+ case GR_FILENAME:
79190+ dentry = va_arg(ap, struct dentry *);
79191+ mnt = va_arg(ap, struct vfsmount *);
79192+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
79193+ break;
79194+ case GR_STR_FILENAME:
79195+ str1 = va_arg(ap, char *);
79196+ dentry = va_arg(ap, struct dentry *);
79197+ mnt = va_arg(ap, struct vfsmount *);
79198+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
79199+ break;
79200+ case GR_FILENAME_STR:
79201+ dentry = va_arg(ap, struct dentry *);
79202+ mnt = va_arg(ap, struct vfsmount *);
79203+ str1 = va_arg(ap, char *);
79204+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
79205+ break;
79206+ case GR_FILENAME_TWO_INT:
79207+ dentry = va_arg(ap, struct dentry *);
79208+ mnt = va_arg(ap, struct vfsmount *);
79209+ num1 = va_arg(ap, int);
79210+ num2 = va_arg(ap, int);
79211+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
79212+ break;
79213+ case GR_FILENAME_TWO_INT_STR:
79214+ dentry = va_arg(ap, struct dentry *);
79215+ mnt = va_arg(ap, struct vfsmount *);
79216+ num1 = va_arg(ap, int);
79217+ num2 = va_arg(ap, int);
79218+ str1 = va_arg(ap, char *);
79219+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
79220+ break;
79221+ case GR_TEXTREL:
79222+ file = va_arg(ap, struct file *);
79223+ ulong1 = va_arg(ap, unsigned long);
79224+ ulong2 = va_arg(ap, unsigned long);
79225+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
79226+ break;
79227+ case GR_PTRACE:
79228+ task = va_arg(ap, struct task_struct *);
79229+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
79230+ break;
79231+ case GR_RESOURCE:
79232+ task = va_arg(ap, struct task_struct *);
79233+ cred = __task_cred(task);
79234+ pcred = __task_cred(task->real_parent);
79235+ ulong1 = va_arg(ap, unsigned long);
79236+ str1 = va_arg(ap, char *);
79237+ ulong2 = va_arg(ap, unsigned long);
79238+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79239+ break;
79240+ case GR_CAP:
79241+ task = va_arg(ap, struct task_struct *);
79242+ cred = __task_cred(task);
79243+ pcred = __task_cred(task->real_parent);
79244+ str1 = va_arg(ap, char *);
79245+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79246+ break;
79247+ case GR_SIG:
79248+ str1 = va_arg(ap, char *);
79249+ voidptr = va_arg(ap, void *);
79250+ gr_log_middle_varargs(audit, msg, str1, voidptr);
79251+ break;
79252+ case GR_SIG2:
79253+ task = va_arg(ap, struct task_struct *);
79254+ cred = __task_cred(task);
79255+ pcred = __task_cred(task->real_parent);
79256+ num1 = va_arg(ap, int);
79257+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79258+ break;
79259+ case GR_CRASH1:
79260+ task = va_arg(ap, struct task_struct *);
79261+ cred = __task_cred(task);
79262+ pcred = __task_cred(task->real_parent);
79263+ ulong1 = va_arg(ap, unsigned long);
79264+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
79265+ break;
79266+ case GR_CRASH2:
79267+ task = va_arg(ap, struct task_struct *);
79268+ cred = __task_cred(task);
79269+ pcred = __task_cred(task->real_parent);
79270+ ulong1 = va_arg(ap, unsigned long);
79271+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
79272+ break;
79273+ case GR_RWXMAP:
79274+ file = va_arg(ap, struct file *);
79275+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
79276+ break;
79277+ case GR_RWXMAPVMA:
79278+ vma = va_arg(ap, struct vm_area_struct *);
79279+ if (vma->vm_file)
79280+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
79281+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
79282+ str1 = "<stack>";
79283+ else if (vma->vm_start <= current->mm->brk &&
79284+ vma->vm_end >= current->mm->start_brk)
79285+ str1 = "<heap>";
79286+ else
79287+ str1 = "<anonymous mapping>";
79288+ gr_log_middle_varargs(audit, msg, str1);
79289+ break;
79290+ case GR_PSACCT:
79291+ {
79292+ unsigned int wday, cday;
79293+ __u8 whr, chr;
79294+ __u8 wmin, cmin;
79295+ __u8 wsec, csec;
79296+ char cur_tty[64] = { 0 };
79297+ char parent_tty[64] = { 0 };
79298+
79299+ task = va_arg(ap, struct task_struct *);
79300+ wday = va_arg(ap, unsigned int);
79301+ cday = va_arg(ap, unsigned int);
79302+ whr = va_arg(ap, int);
79303+ chr = va_arg(ap, int);
79304+ wmin = va_arg(ap, int);
79305+ cmin = va_arg(ap, int);
79306+ wsec = va_arg(ap, int);
79307+ csec = va_arg(ap, int);
79308+ ulong1 = va_arg(ap, unsigned long);
79309+ cred = __task_cred(task);
79310+ pcred = __task_cred(task->real_parent);
79311+
79312+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79313+ }
79314+ break;
79315+ default:
79316+ gr_log_middle(audit, msg, ap);
79317+ }
79318+ va_end(ap);
79319+ // these don't need DEFAULTSECARGS printed on the end
79320+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
79321+ gr_log_end(audit, 0);
79322+ else
79323+ gr_log_end(audit, 1);
79324+ END_LOCKS(audit);
79325+}
79326diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
79327new file mode 100644
79328index 0000000..0e39d8c
79329--- /dev/null
79330+++ b/grsecurity/grsec_mem.c
79331@@ -0,0 +1,48 @@
79332+#include <linux/kernel.h>
79333+#include <linux/sched.h>
79334+#include <linux/mm.h>
79335+#include <linux/mman.h>
79336+#include <linux/module.h>
79337+#include <linux/grinternal.h>
79338+
79339+void gr_handle_msr_write(void)
79340+{
79341+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
79342+ return;
79343+}
79344+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
79345+
79346+void
79347+gr_handle_ioperm(void)
79348+{
79349+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
79350+ return;
79351+}
79352+
79353+void
79354+gr_handle_iopl(void)
79355+{
79356+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
79357+ return;
79358+}
79359+
79360+void
79361+gr_handle_mem_readwrite(u64 from, u64 to)
79362+{
79363+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
79364+ return;
79365+}
79366+
79367+void
79368+gr_handle_vm86(void)
79369+{
79370+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
79371+ return;
79372+}
79373+
79374+void
79375+gr_log_badprocpid(const char *entry)
79376+{
79377+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
79378+ return;
79379+}
79380diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
79381new file mode 100644
79382index 0000000..6f9eb73
79383--- /dev/null
79384+++ b/grsecurity/grsec_mount.c
79385@@ -0,0 +1,65 @@
79386+#include <linux/kernel.h>
79387+#include <linux/sched.h>
79388+#include <linux/mount.h>
79389+#include <linux/major.h>
79390+#include <linux/grsecurity.h>
79391+#include <linux/grinternal.h>
79392+
79393+void
79394+gr_log_remount(const char *devname, const int retval)
79395+{
79396+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79397+ if (grsec_enable_mount && (retval >= 0))
79398+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
79399+#endif
79400+ return;
79401+}
79402+
79403+void
79404+gr_log_unmount(const char *devname, const int retval)
79405+{
79406+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79407+ if (grsec_enable_mount && (retval >= 0))
79408+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
79409+#endif
79410+ return;
79411+}
79412+
79413+void
79414+gr_log_mount(const char *from, struct path *to, const int retval)
79415+{
79416+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79417+ if (grsec_enable_mount && (retval >= 0))
79418+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
79419+#endif
79420+ return;
79421+}
79422+
79423+int
79424+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
79425+{
79426+#ifdef CONFIG_GRKERNSEC_ROFS
79427+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
79428+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
79429+ return -EPERM;
79430+ } else
79431+ return 0;
79432+#endif
79433+ return 0;
79434+}
79435+
79436+int
79437+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
79438+{
79439+#ifdef CONFIG_GRKERNSEC_ROFS
79440+ struct inode *inode = dentry->d_inode;
79441+
79442+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
79443+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
79444+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
79445+ return -EPERM;
79446+ } else
79447+ return 0;
79448+#endif
79449+ return 0;
79450+}
79451diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
79452new file mode 100644
79453index 0000000..6ee9d50
79454--- /dev/null
79455+++ b/grsecurity/grsec_pax.c
79456@@ -0,0 +1,45 @@
79457+#include <linux/kernel.h>
79458+#include <linux/sched.h>
79459+#include <linux/mm.h>
79460+#include <linux/file.h>
79461+#include <linux/grinternal.h>
79462+#include <linux/grsecurity.h>
79463+
79464+void
79465+gr_log_textrel(struct vm_area_struct * vma)
79466+{
79467+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79468+ if (grsec_enable_log_rwxmaps)
79469+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
79470+#endif
79471+ return;
79472+}
79473+
79474+void gr_log_ptgnustack(struct file *file)
79475+{
79476+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79477+ if (grsec_enable_log_rwxmaps)
79478+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
79479+#endif
79480+ return;
79481+}
79482+
79483+void
79484+gr_log_rwxmmap(struct file *file)
79485+{
79486+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79487+ if (grsec_enable_log_rwxmaps)
79488+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
79489+#endif
79490+ return;
79491+}
79492+
79493+void
79494+gr_log_rwxmprotect(struct vm_area_struct *vma)
79495+{
79496+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79497+ if (grsec_enable_log_rwxmaps)
79498+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
79499+#endif
79500+ return;
79501+}
79502diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
79503new file mode 100644
79504index 0000000..2005a3a
79505--- /dev/null
79506+++ b/grsecurity/grsec_proc.c
79507@@ -0,0 +1,20 @@
79508+#include <linux/kernel.h>
79509+#include <linux/sched.h>
79510+#include <linux/grsecurity.h>
79511+#include <linux/grinternal.h>
79512+
79513+int gr_proc_is_restricted(void)
79514+{
79515+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79516+ const struct cred *cred = current_cred();
79517+#endif
79518+
79519+#ifdef CONFIG_GRKERNSEC_PROC_USER
79520+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
79521+ return -EACCES;
79522+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79523+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
79524+ return -EACCES;
79525+#endif
79526+ return 0;
79527+}
79528diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
79529new file mode 100644
79530index 0000000..f7f29aa
79531--- /dev/null
79532+++ b/grsecurity/grsec_ptrace.c
79533@@ -0,0 +1,30 @@
79534+#include <linux/kernel.h>
79535+#include <linux/sched.h>
79536+#include <linux/grinternal.h>
79537+#include <linux/security.h>
79538+
79539+void
79540+gr_audit_ptrace(struct task_struct *task)
79541+{
79542+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
79543+ if (grsec_enable_audit_ptrace)
79544+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
79545+#endif
79546+ return;
79547+}
79548+
79549+int
79550+gr_ptrace_readexec(struct file *file, int unsafe_flags)
79551+{
79552+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
79553+ const struct dentry *dentry = file->f_path.dentry;
79554+ const struct vfsmount *mnt = file->f_path.mnt;
79555+
79556+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
79557+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
79558+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
79559+ return -EACCES;
79560+ }
79561+#endif
79562+ return 0;
79563+}
79564diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
79565new file mode 100644
79566index 0000000..3860c7e
79567--- /dev/null
79568+++ b/grsecurity/grsec_sig.c
79569@@ -0,0 +1,236 @@
79570+#include <linux/kernel.h>
79571+#include <linux/sched.h>
79572+#include <linux/fs.h>
79573+#include <linux/delay.h>
79574+#include <linux/grsecurity.h>
79575+#include <linux/grinternal.h>
79576+#include <linux/hardirq.h>
79577+
79578+char *signames[] = {
79579+ [SIGSEGV] = "Segmentation fault",
79580+ [SIGILL] = "Illegal instruction",
79581+ [SIGABRT] = "Abort",
79582+ [SIGBUS] = "Invalid alignment/Bus error"
79583+};
79584+
79585+void
79586+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
79587+{
79588+#ifdef CONFIG_GRKERNSEC_SIGNAL
79589+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
79590+ (sig == SIGABRT) || (sig == SIGBUS))) {
79591+ if (task_pid_nr(t) == task_pid_nr(current)) {
79592+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
79593+ } else {
79594+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
79595+ }
79596+ }
79597+#endif
79598+ return;
79599+}
79600+
79601+int
79602+gr_handle_signal(const struct task_struct *p, const int sig)
79603+{
79604+#ifdef CONFIG_GRKERNSEC
79605+ /* ignore the 0 signal for protected task checks */
79606+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
79607+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
79608+ return -EPERM;
79609+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
79610+ return -EPERM;
79611+ }
79612+#endif
79613+ return 0;
79614+}
79615+
79616+#ifdef CONFIG_GRKERNSEC
79617+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
79618+
79619+int gr_fake_force_sig(int sig, struct task_struct *t)
79620+{
79621+ unsigned long int flags;
79622+ int ret, blocked, ignored;
79623+ struct k_sigaction *action;
79624+
79625+ spin_lock_irqsave(&t->sighand->siglock, flags);
79626+ action = &t->sighand->action[sig-1];
79627+ ignored = action->sa.sa_handler == SIG_IGN;
79628+ blocked = sigismember(&t->blocked, sig);
79629+ if (blocked || ignored) {
79630+ action->sa.sa_handler = SIG_DFL;
79631+ if (blocked) {
79632+ sigdelset(&t->blocked, sig);
79633+ recalc_sigpending_and_wake(t);
79634+ }
79635+ }
79636+ if (action->sa.sa_handler == SIG_DFL)
79637+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
79638+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
79639+
79640+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
79641+
79642+ return ret;
79643+}
79644+#endif
79645+
79646+#define GR_USER_BAN_TIME (15 * 60)
79647+#define GR_DAEMON_BRUTE_TIME (30 * 60)
79648+
79649+void gr_handle_brute_attach(int dumpable)
79650+{
79651+#ifdef CONFIG_GRKERNSEC_BRUTE
79652+ struct task_struct *p = current;
79653+ kuid_t uid = GLOBAL_ROOT_UID;
79654+ int daemon = 0;
79655+
79656+ if (!grsec_enable_brute)
79657+ return;
79658+
79659+ rcu_read_lock();
79660+ read_lock(&tasklist_lock);
79661+ read_lock(&grsec_exec_file_lock);
79662+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
79663+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
79664+ p->real_parent->brute = 1;
79665+ daemon = 1;
79666+ } else {
79667+ const struct cred *cred = __task_cred(p), *cred2;
79668+ struct task_struct *tsk, *tsk2;
79669+
79670+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
79671+ struct user_struct *user;
79672+
79673+ uid = cred->uid;
79674+
79675+ /* this is put upon execution past expiration */
79676+ user = find_user(uid);
79677+ if (user == NULL)
79678+ goto unlock;
79679+ user->suid_banned = 1;
79680+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
79681+ if (user->suid_ban_expires == ~0UL)
79682+ user->suid_ban_expires--;
79683+
79684+ /* only kill other threads of the same binary, from the same user */
79685+ do_each_thread(tsk2, tsk) {
79686+ cred2 = __task_cred(tsk);
79687+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
79688+ gr_fake_force_sig(SIGKILL, tsk);
79689+ } while_each_thread(tsk2, tsk);
79690+ }
79691+ }
79692+unlock:
79693+ read_unlock(&grsec_exec_file_lock);
79694+ read_unlock(&tasklist_lock);
79695+ rcu_read_unlock();
79696+
79697+ if (gr_is_global_nonroot(uid))
79698+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
79699+ else if (daemon)
79700+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
79701+
79702+#endif
79703+ return;
79704+}
79705+
79706+void gr_handle_brute_check(void)
79707+{
79708+#ifdef CONFIG_GRKERNSEC_BRUTE
79709+ struct task_struct *p = current;
79710+
79711+ if (unlikely(p->brute)) {
79712+ if (!grsec_enable_brute)
79713+ p->brute = 0;
79714+ else if (time_before(get_seconds(), p->brute_expires))
79715+ msleep(30 * 1000);
79716+ }
79717+#endif
79718+ return;
79719+}
79720+
79721+void gr_handle_kernel_exploit(void)
79722+{
79723+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79724+ const struct cred *cred;
79725+ struct task_struct *tsk, *tsk2;
79726+ struct user_struct *user;
79727+ kuid_t uid;
79728+
79729+ if (in_irq() || in_serving_softirq() || in_nmi())
79730+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
79731+
79732+ uid = current_uid();
79733+
79734+ if (gr_is_global_root(uid))
79735+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
79736+ else {
79737+ /* kill all the processes of this user, hold a reference
79738+ to their creds struct, and prevent them from creating
79739+ another process until system reset
79740+ */
79741+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
79742+ GR_GLOBAL_UID(uid));
79743+ /* we intentionally leak this ref */
79744+ user = get_uid(current->cred->user);
79745+ if (user)
79746+ user->kernel_banned = 1;
79747+
79748+ /* kill all processes of this user */
79749+ read_lock(&tasklist_lock);
79750+ do_each_thread(tsk2, tsk) {
79751+ cred = __task_cred(tsk);
79752+ if (uid_eq(cred->uid, uid))
79753+ gr_fake_force_sig(SIGKILL, tsk);
79754+ } while_each_thread(tsk2, tsk);
79755+ read_unlock(&tasklist_lock);
79756+ }
79757+#endif
79758+}
79759+
79760+#ifdef CONFIG_GRKERNSEC_BRUTE
79761+static bool suid_ban_expired(struct user_struct *user)
79762+{
79763+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
79764+ user->suid_banned = 0;
79765+ user->suid_ban_expires = 0;
79766+ free_uid(user);
79767+ return true;
79768+ }
79769+
79770+ return false;
79771+}
79772+#endif
79773+
79774+int gr_process_kernel_exec_ban(void)
79775+{
79776+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79777+ if (unlikely(current->cred->user->kernel_banned))
79778+ return -EPERM;
79779+#endif
79780+ return 0;
79781+}
79782+
79783+int gr_process_kernel_setuid_ban(struct user_struct *user)
79784+{
79785+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79786+ if (unlikely(user->kernel_banned))
79787+ gr_fake_force_sig(SIGKILL, current);
79788+#endif
79789+ return 0;
79790+}
79791+
79792+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
79793+{
79794+#ifdef CONFIG_GRKERNSEC_BRUTE
79795+ struct user_struct *user = current->cred->user;
79796+ if (unlikely(user->suid_banned)) {
79797+ if (suid_ban_expired(user))
79798+ return 0;
79799+ /* disallow execution of suid binaries only */
79800+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
79801+ return -EPERM;
79802+ }
79803+#endif
79804+ return 0;
79805+}
79806diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
79807new file mode 100644
79808index 0000000..e3650b6
79809--- /dev/null
79810+++ b/grsecurity/grsec_sock.c
79811@@ -0,0 +1,244 @@
79812+#include <linux/kernel.h>
79813+#include <linux/module.h>
79814+#include <linux/sched.h>
79815+#include <linux/file.h>
79816+#include <linux/net.h>
79817+#include <linux/in.h>
79818+#include <linux/ip.h>
79819+#include <net/sock.h>
79820+#include <net/inet_sock.h>
79821+#include <linux/grsecurity.h>
79822+#include <linux/grinternal.h>
79823+#include <linux/gracl.h>
79824+
79825+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
79826+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
79827+
79828+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
79829+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
79830+
79831+#ifdef CONFIG_UNIX_MODULE
79832+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
79833+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
79834+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
79835+EXPORT_SYMBOL_GPL(gr_handle_create);
79836+#endif
79837+
79838+#ifdef CONFIG_GRKERNSEC
79839+#define gr_conn_table_size 32749
79840+struct conn_table_entry {
79841+ struct conn_table_entry *next;
79842+ struct signal_struct *sig;
79843+};
79844+
79845+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
79846+DEFINE_SPINLOCK(gr_conn_table_lock);
79847+
79848+extern const char * gr_socktype_to_name(unsigned char type);
79849+extern const char * gr_proto_to_name(unsigned char proto);
79850+extern const char * gr_sockfamily_to_name(unsigned char family);
79851+
79852+static __inline__ int
79853+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
79854+{
79855+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
79856+}
79857+
79858+static __inline__ int
79859+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
79860+ __u16 sport, __u16 dport)
79861+{
79862+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
79863+ sig->gr_sport == sport && sig->gr_dport == dport))
79864+ return 1;
79865+ else
79866+ return 0;
79867+}
79868+
79869+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
79870+{
79871+ struct conn_table_entry **match;
79872+ unsigned int index;
79873+
79874+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
79875+ sig->gr_sport, sig->gr_dport,
79876+ gr_conn_table_size);
79877+
79878+ newent->sig = sig;
79879+
79880+ match = &gr_conn_table[index];
79881+ newent->next = *match;
79882+ *match = newent;
79883+
79884+ return;
79885+}
79886+
79887+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
79888+{
79889+ struct conn_table_entry *match, *last = NULL;
79890+ unsigned int index;
79891+
79892+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
79893+ sig->gr_sport, sig->gr_dport,
79894+ gr_conn_table_size);
79895+
79896+ match = gr_conn_table[index];
79897+ while (match && !conn_match(match->sig,
79898+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
79899+ sig->gr_dport)) {
79900+ last = match;
79901+ match = match->next;
79902+ }
79903+
79904+ if (match) {
79905+ if (last)
79906+ last->next = match->next;
79907+ else
79908+ gr_conn_table[index] = NULL;
79909+ kfree(match);
79910+ }
79911+
79912+ return;
79913+}
79914+
79915+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
79916+ __u16 sport, __u16 dport)
79917+{
79918+ struct conn_table_entry *match;
79919+ unsigned int index;
79920+
79921+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
79922+
79923+ match = gr_conn_table[index];
79924+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
79925+ match = match->next;
79926+
79927+ if (match)
79928+ return match->sig;
79929+ else
79930+ return NULL;
79931+}
79932+
79933+#endif
79934+
79935+void gr_update_task_in_ip_table(const struct inet_sock *inet)
79936+{
79937+#ifdef CONFIG_GRKERNSEC
79938+ struct signal_struct *sig = current->signal;
79939+ struct conn_table_entry *newent;
79940+
79941+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
79942+ if (newent == NULL)
79943+ return;
79944+ /* no bh lock needed since we are called with bh disabled */
79945+ spin_lock(&gr_conn_table_lock);
79946+ gr_del_task_from_ip_table_nolock(sig);
79947+ sig->gr_saddr = inet->inet_rcv_saddr;
79948+ sig->gr_daddr = inet->inet_daddr;
79949+ sig->gr_sport = inet->inet_sport;
79950+ sig->gr_dport = inet->inet_dport;
79951+ gr_add_to_task_ip_table_nolock(sig, newent);
79952+ spin_unlock(&gr_conn_table_lock);
79953+#endif
79954+ return;
79955+}
79956+
79957+void gr_del_task_from_ip_table(struct task_struct *task)
79958+{
79959+#ifdef CONFIG_GRKERNSEC
79960+ spin_lock_bh(&gr_conn_table_lock);
79961+ gr_del_task_from_ip_table_nolock(task->signal);
79962+ spin_unlock_bh(&gr_conn_table_lock);
79963+#endif
79964+ return;
79965+}
79966+
79967+void
79968+gr_attach_curr_ip(const struct sock *sk)
79969+{
79970+#ifdef CONFIG_GRKERNSEC
79971+ struct signal_struct *p, *set;
79972+ const struct inet_sock *inet = inet_sk(sk);
79973+
79974+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
79975+ return;
79976+
79977+ set = current->signal;
79978+
79979+ spin_lock_bh(&gr_conn_table_lock);
79980+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
79981+ inet->inet_dport, inet->inet_sport);
79982+ if (unlikely(p != NULL)) {
79983+ set->curr_ip = p->curr_ip;
79984+ set->used_accept = 1;
79985+ gr_del_task_from_ip_table_nolock(p);
79986+ spin_unlock_bh(&gr_conn_table_lock);
79987+ return;
79988+ }
79989+ spin_unlock_bh(&gr_conn_table_lock);
79990+
79991+ set->curr_ip = inet->inet_daddr;
79992+ set->used_accept = 1;
79993+#endif
79994+ return;
79995+}
79996+
79997+int
79998+gr_handle_sock_all(const int family, const int type, const int protocol)
79999+{
80000+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
80001+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
80002+ (family != AF_UNIX)) {
80003+ if (family == AF_INET)
80004+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
80005+ else
80006+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
80007+ return -EACCES;
80008+ }
80009+#endif
80010+ return 0;
80011+}
80012+
80013+int
80014+gr_handle_sock_server(const struct sockaddr *sck)
80015+{
80016+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
80017+ if (grsec_enable_socket_server &&
80018+ in_group_p(grsec_socket_server_gid) &&
80019+ sck && (sck->sa_family != AF_UNIX) &&
80020+ (sck->sa_family != AF_LOCAL)) {
80021+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
80022+ return -EACCES;
80023+ }
80024+#endif
80025+ return 0;
80026+}
80027+
80028+int
80029+gr_handle_sock_server_other(const struct sock *sck)
80030+{
80031+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
80032+ if (grsec_enable_socket_server &&
80033+ in_group_p(grsec_socket_server_gid) &&
80034+ sck && (sck->sk_family != AF_UNIX) &&
80035+ (sck->sk_family != AF_LOCAL)) {
80036+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
80037+ return -EACCES;
80038+ }
80039+#endif
80040+ return 0;
80041+}
80042+
80043+int
80044+gr_handle_sock_client(const struct sockaddr *sck)
80045+{
80046+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
80047+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
80048+ sck && (sck->sa_family != AF_UNIX) &&
80049+ (sck->sa_family != AF_LOCAL)) {
80050+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
80051+ return -EACCES;
80052+ }
80053+#endif
80054+ return 0;
80055+}
80056diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
80057new file mode 100644
80058index 0000000..cce889e
80059--- /dev/null
80060+++ b/grsecurity/grsec_sysctl.c
80061@@ -0,0 +1,488 @@
80062+#include <linux/kernel.h>
80063+#include <linux/sched.h>
80064+#include <linux/sysctl.h>
80065+#include <linux/grsecurity.h>
80066+#include <linux/grinternal.h>
80067+
80068+int
80069+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
80070+{
80071+#ifdef CONFIG_GRKERNSEC_SYSCTL
80072+ if (dirname == NULL || name == NULL)
80073+ return 0;
80074+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
80075+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
80076+ return -EACCES;
80077+ }
80078+#endif
80079+ return 0;
80080+}
80081+
80082+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
80083+static int __maybe_unused __read_only one = 1;
80084+#endif
80085+
80086+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
80087+ defined(CONFIG_GRKERNSEC_DENYUSB)
80088+struct ctl_table grsecurity_table[] = {
80089+#ifdef CONFIG_GRKERNSEC_SYSCTL
80090+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
80091+#ifdef CONFIG_GRKERNSEC_IO
80092+ {
80093+ .procname = "disable_priv_io",
80094+ .data = &grsec_disable_privio,
80095+ .maxlen = sizeof(int),
80096+ .mode = 0600,
80097+ .proc_handler = &proc_dointvec,
80098+ },
80099+#endif
80100+#endif
80101+#ifdef CONFIG_GRKERNSEC_LINK
80102+ {
80103+ .procname = "linking_restrictions",
80104+ .data = &grsec_enable_link,
80105+ .maxlen = sizeof(int),
80106+ .mode = 0600,
80107+ .proc_handler = &proc_dointvec,
80108+ },
80109+#endif
80110+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
80111+ {
80112+ .procname = "enforce_symlinksifowner",
80113+ .data = &grsec_enable_symlinkown,
80114+ .maxlen = sizeof(int),
80115+ .mode = 0600,
80116+ .proc_handler = &proc_dointvec,
80117+ },
80118+ {
80119+ .procname = "symlinkown_gid",
80120+ .data = &grsec_symlinkown_gid,
80121+ .maxlen = sizeof(int),
80122+ .mode = 0600,
80123+ .proc_handler = &proc_dointvec,
80124+ },
80125+#endif
80126+#ifdef CONFIG_GRKERNSEC_BRUTE
80127+ {
80128+ .procname = "deter_bruteforce",
80129+ .data = &grsec_enable_brute,
80130+ .maxlen = sizeof(int),
80131+ .mode = 0600,
80132+ .proc_handler = &proc_dointvec,
80133+ },
80134+#endif
80135+#ifdef CONFIG_GRKERNSEC_FIFO
80136+ {
80137+ .procname = "fifo_restrictions",
80138+ .data = &grsec_enable_fifo,
80139+ .maxlen = sizeof(int),
80140+ .mode = 0600,
80141+ .proc_handler = &proc_dointvec,
80142+ },
80143+#endif
80144+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
80145+ {
80146+ .procname = "ptrace_readexec",
80147+ .data = &grsec_enable_ptrace_readexec,
80148+ .maxlen = sizeof(int),
80149+ .mode = 0600,
80150+ .proc_handler = &proc_dointvec,
80151+ },
80152+#endif
80153+#ifdef CONFIG_GRKERNSEC_SETXID
80154+ {
80155+ .procname = "consistent_setxid",
80156+ .data = &grsec_enable_setxid,
80157+ .maxlen = sizeof(int),
80158+ .mode = 0600,
80159+ .proc_handler = &proc_dointvec,
80160+ },
80161+#endif
80162+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80163+ {
80164+ .procname = "ip_blackhole",
80165+ .data = &grsec_enable_blackhole,
80166+ .maxlen = sizeof(int),
80167+ .mode = 0600,
80168+ .proc_handler = &proc_dointvec,
80169+ },
80170+ {
80171+ .procname = "lastack_retries",
80172+ .data = &grsec_lastack_retries,
80173+ .maxlen = sizeof(int),
80174+ .mode = 0600,
80175+ .proc_handler = &proc_dointvec,
80176+ },
80177+#endif
80178+#ifdef CONFIG_GRKERNSEC_EXECLOG
80179+ {
80180+ .procname = "exec_logging",
80181+ .data = &grsec_enable_execlog,
80182+ .maxlen = sizeof(int),
80183+ .mode = 0600,
80184+ .proc_handler = &proc_dointvec,
80185+ },
80186+#endif
80187+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
80188+ {
80189+ .procname = "rwxmap_logging",
80190+ .data = &grsec_enable_log_rwxmaps,
80191+ .maxlen = sizeof(int),
80192+ .mode = 0600,
80193+ .proc_handler = &proc_dointvec,
80194+ },
80195+#endif
80196+#ifdef CONFIG_GRKERNSEC_SIGNAL
80197+ {
80198+ .procname = "signal_logging",
80199+ .data = &grsec_enable_signal,
80200+ .maxlen = sizeof(int),
80201+ .mode = 0600,
80202+ .proc_handler = &proc_dointvec,
80203+ },
80204+#endif
80205+#ifdef CONFIG_GRKERNSEC_FORKFAIL
80206+ {
80207+ .procname = "forkfail_logging",
80208+ .data = &grsec_enable_forkfail,
80209+ .maxlen = sizeof(int),
80210+ .mode = 0600,
80211+ .proc_handler = &proc_dointvec,
80212+ },
80213+#endif
80214+#ifdef CONFIG_GRKERNSEC_TIME
80215+ {
80216+ .procname = "timechange_logging",
80217+ .data = &grsec_enable_time,
80218+ .maxlen = sizeof(int),
80219+ .mode = 0600,
80220+ .proc_handler = &proc_dointvec,
80221+ },
80222+#endif
80223+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
80224+ {
80225+ .procname = "chroot_deny_shmat",
80226+ .data = &grsec_enable_chroot_shmat,
80227+ .maxlen = sizeof(int),
80228+ .mode = 0600,
80229+ .proc_handler = &proc_dointvec,
80230+ },
80231+#endif
80232+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
80233+ {
80234+ .procname = "chroot_deny_unix",
80235+ .data = &grsec_enable_chroot_unix,
80236+ .maxlen = sizeof(int),
80237+ .mode = 0600,
80238+ .proc_handler = &proc_dointvec,
80239+ },
80240+#endif
80241+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
80242+ {
80243+ .procname = "chroot_deny_mount",
80244+ .data = &grsec_enable_chroot_mount,
80245+ .maxlen = sizeof(int),
80246+ .mode = 0600,
80247+ .proc_handler = &proc_dointvec,
80248+ },
80249+#endif
80250+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
80251+ {
80252+ .procname = "chroot_deny_fchdir",
80253+ .data = &grsec_enable_chroot_fchdir,
80254+ .maxlen = sizeof(int),
80255+ .mode = 0600,
80256+ .proc_handler = &proc_dointvec,
80257+ },
80258+#endif
80259+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
80260+ {
80261+ .procname = "chroot_deny_chroot",
80262+ .data = &grsec_enable_chroot_double,
80263+ .maxlen = sizeof(int),
80264+ .mode = 0600,
80265+ .proc_handler = &proc_dointvec,
80266+ },
80267+#endif
80268+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
80269+ {
80270+ .procname = "chroot_deny_pivot",
80271+ .data = &grsec_enable_chroot_pivot,
80272+ .maxlen = sizeof(int),
80273+ .mode = 0600,
80274+ .proc_handler = &proc_dointvec,
80275+ },
80276+#endif
80277+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
80278+ {
80279+ .procname = "chroot_enforce_chdir",
80280+ .data = &grsec_enable_chroot_chdir,
80281+ .maxlen = sizeof(int),
80282+ .mode = 0600,
80283+ .proc_handler = &proc_dointvec,
80284+ },
80285+#endif
80286+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
80287+ {
80288+ .procname = "chroot_deny_chmod",
80289+ .data = &grsec_enable_chroot_chmod,
80290+ .maxlen = sizeof(int),
80291+ .mode = 0600,
80292+ .proc_handler = &proc_dointvec,
80293+ },
80294+#endif
80295+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
80296+ {
80297+ .procname = "chroot_deny_mknod",
80298+ .data = &grsec_enable_chroot_mknod,
80299+ .maxlen = sizeof(int),
80300+ .mode = 0600,
80301+ .proc_handler = &proc_dointvec,
80302+ },
80303+#endif
80304+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
80305+ {
80306+ .procname = "chroot_restrict_nice",
80307+ .data = &grsec_enable_chroot_nice,
80308+ .maxlen = sizeof(int),
80309+ .mode = 0600,
80310+ .proc_handler = &proc_dointvec,
80311+ },
80312+#endif
80313+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
80314+ {
80315+ .procname = "chroot_execlog",
80316+ .data = &grsec_enable_chroot_execlog,
80317+ .maxlen = sizeof(int),
80318+ .mode = 0600,
80319+ .proc_handler = &proc_dointvec,
80320+ },
80321+#endif
80322+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
80323+ {
80324+ .procname = "chroot_caps",
80325+ .data = &grsec_enable_chroot_caps,
80326+ .maxlen = sizeof(int),
80327+ .mode = 0600,
80328+ .proc_handler = &proc_dointvec,
80329+ },
80330+#endif
80331+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
80332+ {
80333+ .procname = "chroot_deny_bad_rename",
80334+ .data = &grsec_enable_chroot_rename,
80335+ .maxlen = sizeof(int),
80336+ .mode = 0600,
80337+ .proc_handler = &proc_dointvec,
80338+ },
80339+#endif
80340+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
80341+ {
80342+ .procname = "chroot_deny_sysctl",
80343+ .data = &grsec_enable_chroot_sysctl,
80344+ .maxlen = sizeof(int),
80345+ .mode = 0600,
80346+ .proc_handler = &proc_dointvec,
80347+ },
80348+#endif
80349+#ifdef CONFIG_GRKERNSEC_TPE
80350+ {
80351+ .procname = "tpe",
80352+ .data = &grsec_enable_tpe,
80353+ .maxlen = sizeof(int),
80354+ .mode = 0600,
80355+ .proc_handler = &proc_dointvec,
80356+ },
80357+ {
80358+ .procname = "tpe_gid",
80359+ .data = &grsec_tpe_gid,
80360+ .maxlen = sizeof(int),
80361+ .mode = 0600,
80362+ .proc_handler = &proc_dointvec,
80363+ },
80364+#endif
80365+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
80366+ {
80367+ .procname = "tpe_invert",
80368+ .data = &grsec_enable_tpe_invert,
80369+ .maxlen = sizeof(int),
80370+ .mode = 0600,
80371+ .proc_handler = &proc_dointvec,
80372+ },
80373+#endif
80374+#ifdef CONFIG_GRKERNSEC_TPE_ALL
80375+ {
80376+ .procname = "tpe_restrict_all",
80377+ .data = &grsec_enable_tpe_all,
80378+ .maxlen = sizeof(int),
80379+ .mode = 0600,
80380+ .proc_handler = &proc_dointvec,
80381+ },
80382+#endif
80383+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
80384+ {
80385+ .procname = "socket_all",
80386+ .data = &grsec_enable_socket_all,
80387+ .maxlen = sizeof(int),
80388+ .mode = 0600,
80389+ .proc_handler = &proc_dointvec,
80390+ },
80391+ {
80392+ .procname = "socket_all_gid",
80393+ .data = &grsec_socket_all_gid,
80394+ .maxlen = sizeof(int),
80395+ .mode = 0600,
80396+ .proc_handler = &proc_dointvec,
80397+ },
80398+#endif
80399+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
80400+ {
80401+ .procname = "socket_client",
80402+ .data = &grsec_enable_socket_client,
80403+ .maxlen = sizeof(int),
80404+ .mode = 0600,
80405+ .proc_handler = &proc_dointvec,
80406+ },
80407+ {
80408+ .procname = "socket_client_gid",
80409+ .data = &grsec_socket_client_gid,
80410+ .maxlen = sizeof(int),
80411+ .mode = 0600,
80412+ .proc_handler = &proc_dointvec,
80413+ },
80414+#endif
80415+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
80416+ {
80417+ .procname = "socket_server",
80418+ .data = &grsec_enable_socket_server,
80419+ .maxlen = sizeof(int),
80420+ .mode = 0600,
80421+ .proc_handler = &proc_dointvec,
80422+ },
80423+ {
80424+ .procname = "socket_server_gid",
80425+ .data = &grsec_socket_server_gid,
80426+ .maxlen = sizeof(int),
80427+ .mode = 0600,
80428+ .proc_handler = &proc_dointvec,
80429+ },
80430+#endif
80431+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
80432+ {
80433+ .procname = "audit_group",
80434+ .data = &grsec_enable_group,
80435+ .maxlen = sizeof(int),
80436+ .mode = 0600,
80437+ .proc_handler = &proc_dointvec,
80438+ },
80439+ {
80440+ .procname = "audit_gid",
80441+ .data = &grsec_audit_gid,
80442+ .maxlen = sizeof(int),
80443+ .mode = 0600,
80444+ .proc_handler = &proc_dointvec,
80445+ },
80446+#endif
80447+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
80448+ {
80449+ .procname = "audit_chdir",
80450+ .data = &grsec_enable_chdir,
80451+ .maxlen = sizeof(int),
80452+ .mode = 0600,
80453+ .proc_handler = &proc_dointvec,
80454+ },
80455+#endif
80456+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
80457+ {
80458+ .procname = "audit_mount",
80459+ .data = &grsec_enable_mount,
80460+ .maxlen = sizeof(int),
80461+ .mode = 0600,
80462+ .proc_handler = &proc_dointvec,
80463+ },
80464+#endif
80465+#ifdef CONFIG_GRKERNSEC_DMESG
80466+ {
80467+ .procname = "dmesg",
80468+ .data = &grsec_enable_dmesg,
80469+ .maxlen = sizeof(int),
80470+ .mode = 0600,
80471+ .proc_handler = &proc_dointvec,
80472+ },
80473+#endif
80474+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80475+ {
80476+ .procname = "chroot_findtask",
80477+ .data = &grsec_enable_chroot_findtask,
80478+ .maxlen = sizeof(int),
80479+ .mode = 0600,
80480+ .proc_handler = &proc_dointvec,
80481+ },
80482+#endif
80483+#ifdef CONFIG_GRKERNSEC_RESLOG
80484+ {
80485+ .procname = "resource_logging",
80486+ .data = &grsec_resource_logging,
80487+ .maxlen = sizeof(int),
80488+ .mode = 0600,
80489+ .proc_handler = &proc_dointvec,
80490+ },
80491+#endif
80492+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
80493+ {
80494+ .procname = "audit_ptrace",
80495+ .data = &grsec_enable_audit_ptrace,
80496+ .maxlen = sizeof(int),
80497+ .mode = 0600,
80498+ .proc_handler = &proc_dointvec,
80499+ },
80500+#endif
80501+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
80502+ {
80503+ .procname = "harden_ptrace",
80504+ .data = &grsec_enable_harden_ptrace,
80505+ .maxlen = sizeof(int),
80506+ .mode = 0600,
80507+ .proc_handler = &proc_dointvec,
80508+ },
80509+#endif
80510+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
80511+ {
80512+ .procname = "harden_ipc",
80513+ .data = &grsec_enable_harden_ipc,
80514+ .maxlen = sizeof(int),
80515+ .mode = 0600,
80516+ .proc_handler = &proc_dointvec,
80517+ },
80518+#endif
80519+ {
80520+ .procname = "grsec_lock",
80521+ .data = &grsec_lock,
80522+ .maxlen = sizeof(int),
80523+ .mode = 0600,
80524+ .proc_handler = &proc_dointvec,
80525+ },
80526+#endif
80527+#ifdef CONFIG_GRKERNSEC_ROFS
80528+ {
80529+ .procname = "romount_protect",
80530+ .data = &grsec_enable_rofs,
80531+ .maxlen = sizeof(int),
80532+ .mode = 0600,
80533+ .proc_handler = &proc_dointvec_minmax,
80534+ .extra1 = &one,
80535+ .extra2 = &one,
80536+ },
80537+#endif
80538+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
80539+ {
80540+ .procname = "deny_new_usb",
80541+ .data = &grsec_deny_new_usb,
80542+ .maxlen = sizeof(int),
80543+ .mode = 0600,
80544+ .proc_handler = &proc_dointvec,
80545+ },
80546+#endif
80547+ { }
80548+};
80549+#endif
80550diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
80551new file mode 100644
80552index 0000000..61b514e
80553--- /dev/null
80554+++ b/grsecurity/grsec_time.c
80555@@ -0,0 +1,16 @@
80556+#include <linux/kernel.h>
80557+#include <linux/sched.h>
80558+#include <linux/grinternal.h>
80559+#include <linux/module.h>
80560+
80561+void
80562+gr_log_timechange(void)
80563+{
80564+#ifdef CONFIG_GRKERNSEC_TIME
80565+ if (grsec_enable_time)
80566+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
80567+#endif
80568+ return;
80569+}
80570+
80571+EXPORT_SYMBOL_GPL(gr_log_timechange);
80572diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
80573new file mode 100644
80574index 0000000..d1953de
80575--- /dev/null
80576+++ b/grsecurity/grsec_tpe.c
80577@@ -0,0 +1,78 @@
80578+#include <linux/kernel.h>
80579+#include <linux/sched.h>
80580+#include <linux/file.h>
80581+#include <linux/fs.h>
80582+#include <linux/grinternal.h>
80583+
80584+extern int gr_acl_tpe_check(void);
80585+
80586+int
80587+gr_tpe_allow(const struct file *file)
80588+{
80589+#ifdef CONFIG_GRKERNSEC
80590+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
80591+ struct inode *file_inode = file->f_path.dentry->d_inode;
80592+ const struct cred *cred = current_cred();
80593+ char *msg = NULL;
80594+ char *msg2 = NULL;
80595+
80596+ // never restrict root
80597+ if (gr_is_global_root(cred->uid))
80598+ return 1;
80599+
80600+ if (grsec_enable_tpe) {
80601+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
80602+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
80603+ msg = "not being in trusted group";
80604+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
80605+ msg = "being in untrusted group";
80606+#else
80607+ if (in_group_p(grsec_tpe_gid))
80608+ msg = "being in untrusted group";
80609+#endif
80610+ }
80611+ if (!msg && gr_acl_tpe_check())
80612+ msg = "being in untrusted role";
80613+
80614+ // not in any affected group/role
80615+ if (!msg)
80616+ goto next_check;
80617+
80618+ if (gr_is_global_nonroot(inode->i_uid))
80619+ msg2 = "file in non-root-owned directory";
80620+ else if (inode->i_mode & S_IWOTH)
80621+ msg2 = "file in world-writable directory";
80622+ else if (inode->i_mode & S_IWGRP)
80623+ msg2 = "file in group-writable directory";
80624+ else if (file_inode->i_mode & S_IWOTH)
80625+ msg2 = "file is world-writable";
80626+
80627+ if (msg && msg2) {
80628+ char fullmsg[70] = {0};
80629+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
80630+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
80631+ return 0;
80632+ }
80633+ msg = NULL;
80634+next_check:
80635+#ifdef CONFIG_GRKERNSEC_TPE_ALL
80636+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
80637+ return 1;
80638+
80639+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
80640+ msg = "directory not owned by user";
80641+ else if (inode->i_mode & S_IWOTH)
80642+ msg = "file in world-writable directory";
80643+ else if (inode->i_mode & S_IWGRP)
80644+ msg = "file in group-writable directory";
80645+ else if (file_inode->i_mode & S_IWOTH)
80646+ msg = "file is world-writable";
80647+
80648+ if (msg) {
80649+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
80650+ return 0;
80651+ }
80652+#endif
80653+#endif
80654+ return 1;
80655+}
80656diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
80657new file mode 100644
80658index 0000000..ae02d8e
80659--- /dev/null
80660+++ b/grsecurity/grsec_usb.c
80661@@ -0,0 +1,15 @@
80662+#include <linux/kernel.h>
80663+#include <linux/grinternal.h>
80664+#include <linux/module.h>
80665+
80666+int gr_handle_new_usb(void)
80667+{
80668+#ifdef CONFIG_GRKERNSEC_DENYUSB
80669+ if (grsec_deny_new_usb) {
80670+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
80671+ return 1;
80672+ }
80673+#endif
80674+ return 0;
80675+}
80676+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
80677diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
80678new file mode 100644
80679index 0000000..158b330
80680--- /dev/null
80681+++ b/grsecurity/grsum.c
80682@@ -0,0 +1,64 @@
80683+#include <linux/err.h>
80684+#include <linux/kernel.h>
80685+#include <linux/sched.h>
80686+#include <linux/mm.h>
80687+#include <linux/scatterlist.h>
80688+#include <linux/crypto.h>
80689+#include <linux/gracl.h>
80690+
80691+
80692+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
80693+#error "crypto and sha256 must be built into the kernel"
80694+#endif
80695+
80696+int
80697+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
80698+{
80699+ struct crypto_hash *tfm;
80700+ struct hash_desc desc;
80701+ struct scatterlist sg[2];
80702+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
80703+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
80704+ unsigned long *sumptr = (unsigned long *)sum;
80705+ int cryptres;
80706+ int retval = 1;
80707+ volatile int mismatched = 0;
80708+ volatile int dummy = 0;
80709+ unsigned int i;
80710+
80711+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
80712+ if (IS_ERR(tfm)) {
80713+ /* should never happen, since sha256 should be built in */
80714+ memset(entry->pw, 0, GR_PW_LEN);
80715+ return 1;
80716+ }
80717+
80718+ sg_init_table(sg, 2);
80719+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
80720+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
80721+
80722+ desc.tfm = tfm;
80723+ desc.flags = 0;
80724+
80725+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
80726+ temp_sum);
80727+
80728+ memset(entry->pw, 0, GR_PW_LEN);
80729+
80730+ if (cryptres)
80731+ goto out;
80732+
80733+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
80734+ if (sumptr[i] != tmpsumptr[i])
80735+ mismatched = 1;
80736+ else
80737+ dummy = 1; // waste a cycle
80738+
80739+ if (!mismatched)
80740+ retval = dummy - 1;
80741+
80742+out:
80743+ crypto_free_hash(tfm);
80744+
80745+ return retval;
80746+}
80747diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
80748index 5bdab6b..9ae82fe 100644
80749--- a/include/asm-generic/4level-fixup.h
80750+++ b/include/asm-generic/4level-fixup.h
80751@@ -14,8 +14,10 @@
80752 #define pmd_alloc(mm, pud, address) \
80753 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
80754 NULL: pmd_offset(pud, address))
80755+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
80756
80757 #define pud_alloc(mm, pgd, address) (pgd)
80758+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
80759 #define pud_offset(pgd, start) (pgd)
80760 #define pud_none(pud) 0
80761 #define pud_bad(pud) 0
80762diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
80763index b7babf0..1e4b4f1 100644
80764--- a/include/asm-generic/atomic-long.h
80765+++ b/include/asm-generic/atomic-long.h
80766@@ -22,6 +22,12 @@
80767
80768 typedef atomic64_t atomic_long_t;
80769
80770+#ifdef CONFIG_PAX_REFCOUNT
80771+typedef atomic64_unchecked_t atomic_long_unchecked_t;
80772+#else
80773+typedef atomic64_t atomic_long_unchecked_t;
80774+#endif
80775+
80776 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
80777
80778 static inline long atomic_long_read(atomic_long_t *l)
80779@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80780 return (long)atomic64_read(v);
80781 }
80782
80783+#ifdef CONFIG_PAX_REFCOUNT
80784+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80785+{
80786+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80787+
80788+ return (long)atomic64_read_unchecked(v);
80789+}
80790+#endif
80791+
80792 static inline void atomic_long_set(atomic_long_t *l, long i)
80793 {
80794 atomic64_t *v = (atomic64_t *)l;
80795@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80796 atomic64_set(v, i);
80797 }
80798
80799+#ifdef CONFIG_PAX_REFCOUNT
80800+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80801+{
80802+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80803+
80804+ atomic64_set_unchecked(v, i);
80805+}
80806+#endif
80807+
80808 static inline void atomic_long_inc(atomic_long_t *l)
80809 {
80810 atomic64_t *v = (atomic64_t *)l;
80811@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80812 atomic64_inc(v);
80813 }
80814
80815+#ifdef CONFIG_PAX_REFCOUNT
80816+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80817+{
80818+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80819+
80820+ atomic64_inc_unchecked(v);
80821+}
80822+#endif
80823+
80824 static inline void atomic_long_dec(atomic_long_t *l)
80825 {
80826 atomic64_t *v = (atomic64_t *)l;
80827@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80828 atomic64_dec(v);
80829 }
80830
80831+#ifdef CONFIG_PAX_REFCOUNT
80832+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80833+{
80834+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80835+
80836+ atomic64_dec_unchecked(v);
80837+}
80838+#endif
80839+
80840 static inline void atomic_long_add(long i, atomic_long_t *l)
80841 {
80842 atomic64_t *v = (atomic64_t *)l;
80843@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80844 atomic64_add(i, v);
80845 }
80846
80847+#ifdef CONFIG_PAX_REFCOUNT
80848+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80849+{
80850+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80851+
80852+ atomic64_add_unchecked(i, v);
80853+}
80854+#endif
80855+
80856 static inline void atomic_long_sub(long i, atomic_long_t *l)
80857 {
80858 atomic64_t *v = (atomic64_t *)l;
80859@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
80860 atomic64_sub(i, v);
80861 }
80862
80863+#ifdef CONFIG_PAX_REFCOUNT
80864+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80865+{
80866+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80867+
80868+ atomic64_sub_unchecked(i, v);
80869+}
80870+#endif
80871+
80872 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
80873 {
80874 atomic64_t *v = (atomic64_t *)l;
80875@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
80876 return atomic64_add_negative(i, v);
80877 }
80878
80879-static inline long atomic_long_add_return(long i, atomic_long_t *l)
80880+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
80881 {
80882 atomic64_t *v = (atomic64_t *)l;
80883
80884 return (long)atomic64_add_return(i, v);
80885 }
80886
80887+#ifdef CONFIG_PAX_REFCOUNT
80888+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
80889+{
80890+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80891+
80892+ return (long)atomic64_add_return_unchecked(i, v);
80893+}
80894+#endif
80895+
80896 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
80897 {
80898 atomic64_t *v = (atomic64_t *)l;
80899@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
80900 return (long)atomic64_inc_return(v);
80901 }
80902
80903+#ifdef CONFIG_PAX_REFCOUNT
80904+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
80905+{
80906+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80907+
80908+ return (long)atomic64_inc_return_unchecked(v);
80909+}
80910+#endif
80911+
80912 static inline long atomic_long_dec_return(atomic_long_t *l)
80913 {
80914 atomic64_t *v = (atomic64_t *)l;
80915@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
80916
80917 typedef atomic_t atomic_long_t;
80918
80919+#ifdef CONFIG_PAX_REFCOUNT
80920+typedef atomic_unchecked_t atomic_long_unchecked_t;
80921+#else
80922+typedef atomic_t atomic_long_unchecked_t;
80923+#endif
80924+
80925 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
80926 static inline long atomic_long_read(atomic_long_t *l)
80927 {
80928@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80929 return (long)atomic_read(v);
80930 }
80931
80932+#ifdef CONFIG_PAX_REFCOUNT
80933+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80934+{
80935+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80936+
80937+ return (long)atomic_read_unchecked(v);
80938+}
80939+#endif
80940+
80941 static inline void atomic_long_set(atomic_long_t *l, long i)
80942 {
80943 atomic_t *v = (atomic_t *)l;
80944@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80945 atomic_set(v, i);
80946 }
80947
80948+#ifdef CONFIG_PAX_REFCOUNT
80949+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80950+{
80951+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80952+
80953+ atomic_set_unchecked(v, i);
80954+}
80955+#endif
80956+
80957 static inline void atomic_long_inc(atomic_long_t *l)
80958 {
80959 atomic_t *v = (atomic_t *)l;
80960@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80961 atomic_inc(v);
80962 }
80963
80964+#ifdef CONFIG_PAX_REFCOUNT
80965+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80966+{
80967+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80968+
80969+ atomic_inc_unchecked(v);
80970+}
80971+#endif
80972+
80973 static inline void atomic_long_dec(atomic_long_t *l)
80974 {
80975 atomic_t *v = (atomic_t *)l;
80976@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80977 atomic_dec(v);
80978 }
80979
80980+#ifdef CONFIG_PAX_REFCOUNT
80981+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80982+{
80983+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80984+
80985+ atomic_dec_unchecked(v);
80986+}
80987+#endif
80988+
80989 static inline void atomic_long_add(long i, atomic_long_t *l)
80990 {
80991 atomic_t *v = (atomic_t *)l;
80992@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80993 atomic_add(i, v);
80994 }
80995
80996+#ifdef CONFIG_PAX_REFCOUNT
80997+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80998+{
80999+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81000+
81001+ atomic_add_unchecked(i, v);
81002+}
81003+#endif
81004+
81005 static inline void atomic_long_sub(long i, atomic_long_t *l)
81006 {
81007 atomic_t *v = (atomic_t *)l;
81008@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
81009 atomic_sub(i, v);
81010 }
81011
81012+#ifdef CONFIG_PAX_REFCOUNT
81013+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
81014+{
81015+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81016+
81017+ atomic_sub_unchecked(i, v);
81018+}
81019+#endif
81020+
81021 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
81022 {
81023 atomic_t *v = (atomic_t *)l;
81024@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
81025 return atomic_add_negative(i, v);
81026 }
81027
81028-static inline long atomic_long_add_return(long i, atomic_long_t *l)
81029+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
81030 {
81031 atomic_t *v = (atomic_t *)l;
81032
81033 return (long)atomic_add_return(i, v);
81034 }
81035
81036+#ifdef CONFIG_PAX_REFCOUNT
81037+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
81038+{
81039+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81040+
81041+ return (long)atomic_add_return_unchecked(i, v);
81042+}
81043+
81044+#endif
81045+
81046 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
81047 {
81048 atomic_t *v = (atomic_t *)l;
81049@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
81050 return (long)atomic_inc_return(v);
81051 }
81052
81053+#ifdef CONFIG_PAX_REFCOUNT
81054+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
81055+{
81056+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81057+
81058+ return (long)atomic_inc_return_unchecked(v);
81059+}
81060+#endif
81061+
81062 static inline long atomic_long_dec_return(atomic_long_t *l)
81063 {
81064 atomic_t *v = (atomic_t *)l;
81065@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
81066
81067 #endif /* BITS_PER_LONG == 64 */
81068
81069+#ifdef CONFIG_PAX_REFCOUNT
81070+static inline void pax_refcount_needs_these_functions(void)
81071+{
81072+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
81073+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
81074+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
81075+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
81076+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
81077+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
81078+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
81079+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
81080+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
81081+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
81082+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
81083+#ifdef CONFIG_X86
81084+ atomic_clear_mask_unchecked(0, NULL);
81085+ atomic_set_mask_unchecked(0, NULL);
81086+#endif
81087+
81088+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
81089+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
81090+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
81091+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
81092+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
81093+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
81094+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
81095+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
81096+}
81097+#else
81098+#define atomic_read_unchecked(v) atomic_read(v)
81099+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
81100+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
81101+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
81102+#define atomic_inc_unchecked(v) atomic_inc(v)
81103+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
81104+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
81105+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
81106+#define atomic_dec_unchecked(v) atomic_dec(v)
81107+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
81108+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
81109+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
81110+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
81111+
81112+#define atomic_long_read_unchecked(v) atomic_long_read(v)
81113+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
81114+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
81115+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
81116+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
81117+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
81118+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
81119+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
81120+#endif
81121+
81122 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
81123diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
81124index 30ad9c8..c70c170 100644
81125--- a/include/asm-generic/atomic64.h
81126+++ b/include/asm-generic/atomic64.h
81127@@ -16,6 +16,8 @@ typedef struct {
81128 long long counter;
81129 } atomic64_t;
81130
81131+typedef atomic64_t atomic64_unchecked_t;
81132+
81133 #define ATOMIC64_INIT(i) { (i) }
81134
81135 extern long long atomic64_read(const atomic64_t *v);
81136@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
81137 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
81138 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
81139
81140+#define atomic64_read_unchecked(v) atomic64_read(v)
81141+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
81142+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
81143+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
81144+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
81145+#define atomic64_inc_unchecked(v) atomic64_inc(v)
81146+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
81147+#define atomic64_dec_unchecked(v) atomic64_dec(v)
81148+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
81149+
81150 #endif /* _ASM_GENERIC_ATOMIC64_H */
81151diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
81152index f5c40b0..e902f9d 100644
81153--- a/include/asm-generic/barrier.h
81154+++ b/include/asm-generic/barrier.h
81155@@ -82,7 +82,7 @@
81156 do { \
81157 compiletime_assert_atomic_type(*p); \
81158 smp_mb(); \
81159- ACCESS_ONCE(*p) = (v); \
81160+ ACCESS_ONCE_RW(*p) = (v); \
81161 } while (0)
81162
81163 #define smp_load_acquire(p) \
81164diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
81165index a60a7cc..0fe12f2 100644
81166--- a/include/asm-generic/bitops/__fls.h
81167+++ b/include/asm-generic/bitops/__fls.h
81168@@ -9,7 +9,7 @@
81169 *
81170 * Undefined if no set bit exists, so code should check against 0 first.
81171 */
81172-static __always_inline unsigned long __fls(unsigned long word)
81173+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
81174 {
81175 int num = BITS_PER_LONG - 1;
81176
81177diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
81178index 0576d1f..dad6c71 100644
81179--- a/include/asm-generic/bitops/fls.h
81180+++ b/include/asm-generic/bitops/fls.h
81181@@ -9,7 +9,7 @@
81182 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
81183 */
81184
81185-static __always_inline int fls(int x)
81186+static __always_inline int __intentional_overflow(-1) fls(int x)
81187 {
81188 int r = 32;
81189
81190diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
81191index b097cf8..3d40e14 100644
81192--- a/include/asm-generic/bitops/fls64.h
81193+++ b/include/asm-generic/bitops/fls64.h
81194@@ -15,7 +15,7 @@
81195 * at position 64.
81196 */
81197 #if BITS_PER_LONG == 32
81198-static __always_inline int fls64(__u64 x)
81199+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
81200 {
81201 __u32 h = x >> 32;
81202 if (h)
81203@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
81204 return fls(x);
81205 }
81206 #elif BITS_PER_LONG == 64
81207-static __always_inline int fls64(__u64 x)
81208+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
81209 {
81210 if (x == 0)
81211 return 0;
81212diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
81213index 1bfcfe5..e04c5c9 100644
81214--- a/include/asm-generic/cache.h
81215+++ b/include/asm-generic/cache.h
81216@@ -6,7 +6,7 @@
81217 * cache lines need to provide their own cache.h.
81218 */
81219
81220-#define L1_CACHE_SHIFT 5
81221-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
81222+#define L1_CACHE_SHIFT 5UL
81223+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
81224
81225 #endif /* __ASM_GENERIC_CACHE_H */
81226diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
81227index 0d68a1e..b74a761 100644
81228--- a/include/asm-generic/emergency-restart.h
81229+++ b/include/asm-generic/emergency-restart.h
81230@@ -1,7 +1,7 @@
81231 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
81232 #define _ASM_GENERIC_EMERGENCY_RESTART_H
81233
81234-static inline void machine_emergency_restart(void)
81235+static inline __noreturn void machine_emergency_restart(void)
81236 {
81237 machine_restart(NULL);
81238 }
81239diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
81240index 90f99c7..00ce236 100644
81241--- a/include/asm-generic/kmap_types.h
81242+++ b/include/asm-generic/kmap_types.h
81243@@ -2,9 +2,9 @@
81244 #define _ASM_GENERIC_KMAP_TYPES_H
81245
81246 #ifdef __WITH_KM_FENCE
81247-# define KM_TYPE_NR 41
81248+# define KM_TYPE_NR 42
81249 #else
81250-# define KM_TYPE_NR 20
81251+# define KM_TYPE_NR 21
81252 #endif
81253
81254 #endif
81255diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
81256index 9ceb03b..62b0b8f 100644
81257--- a/include/asm-generic/local.h
81258+++ b/include/asm-generic/local.h
81259@@ -23,24 +23,37 @@ typedef struct
81260 atomic_long_t a;
81261 } local_t;
81262
81263+typedef struct {
81264+ atomic_long_unchecked_t a;
81265+} local_unchecked_t;
81266+
81267 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
81268
81269 #define local_read(l) atomic_long_read(&(l)->a)
81270+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
81271 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
81272+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
81273 #define local_inc(l) atomic_long_inc(&(l)->a)
81274+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
81275 #define local_dec(l) atomic_long_dec(&(l)->a)
81276+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
81277 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
81278+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
81279 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
81280+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
81281
81282 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
81283 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
81284 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
81285 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
81286 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
81287+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
81288 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
81289 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
81290+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
81291
81292 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
81293+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
81294 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
81295 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
81296 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
81297diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
81298index 725612b..9cc513a 100644
81299--- a/include/asm-generic/pgtable-nopmd.h
81300+++ b/include/asm-generic/pgtable-nopmd.h
81301@@ -1,14 +1,19 @@
81302 #ifndef _PGTABLE_NOPMD_H
81303 #define _PGTABLE_NOPMD_H
81304
81305-#ifndef __ASSEMBLY__
81306-
81307 #include <asm-generic/pgtable-nopud.h>
81308
81309-struct mm_struct;
81310-
81311 #define __PAGETABLE_PMD_FOLDED
81312
81313+#define PMD_SHIFT PUD_SHIFT
81314+#define PTRS_PER_PMD 1
81315+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
81316+#define PMD_MASK (~(PMD_SIZE-1))
81317+
81318+#ifndef __ASSEMBLY__
81319+
81320+struct mm_struct;
81321+
81322 /*
81323 * Having the pmd type consist of a pud gets the size right, and allows
81324 * us to conceptually access the pud entry that this pmd is folded into
81325@@ -16,11 +21,6 @@ struct mm_struct;
81326 */
81327 typedef struct { pud_t pud; } pmd_t;
81328
81329-#define PMD_SHIFT PUD_SHIFT
81330-#define PTRS_PER_PMD 1
81331-#define PMD_SIZE (1UL << PMD_SHIFT)
81332-#define PMD_MASK (~(PMD_SIZE-1))
81333-
81334 /*
81335 * The "pud_xxx()" functions here are trivial for a folded two-level
81336 * setup: the pmd is never bad, and a pmd always exists (as it's folded
81337diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
81338index 810431d..0ec4804f 100644
81339--- a/include/asm-generic/pgtable-nopud.h
81340+++ b/include/asm-generic/pgtable-nopud.h
81341@@ -1,10 +1,15 @@
81342 #ifndef _PGTABLE_NOPUD_H
81343 #define _PGTABLE_NOPUD_H
81344
81345-#ifndef __ASSEMBLY__
81346-
81347 #define __PAGETABLE_PUD_FOLDED
81348
81349+#define PUD_SHIFT PGDIR_SHIFT
81350+#define PTRS_PER_PUD 1
81351+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
81352+#define PUD_MASK (~(PUD_SIZE-1))
81353+
81354+#ifndef __ASSEMBLY__
81355+
81356 /*
81357 * Having the pud type consist of a pgd gets the size right, and allows
81358 * us to conceptually access the pgd entry that this pud is folded into
81359@@ -12,11 +17,6 @@
81360 */
81361 typedef struct { pgd_t pgd; } pud_t;
81362
81363-#define PUD_SHIFT PGDIR_SHIFT
81364-#define PTRS_PER_PUD 1
81365-#define PUD_SIZE (1UL << PUD_SHIFT)
81366-#define PUD_MASK (~(PUD_SIZE-1))
81367-
81368 /*
81369 * The "pgd_xxx()" functions here are trivial for a folded two-level
81370 * setup: the pud is never bad, and a pud always exists (as it's folded
81371@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
81372 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
81373
81374 #define pgd_populate(mm, pgd, pud) do { } while (0)
81375+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
81376 /*
81377 * (puds are folded into pgds so this doesn't get actually called,
81378 * but the define is needed for a generic inline function.)
81379diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
81380index 4d46085..f4e92ef 100644
81381--- a/include/asm-generic/pgtable.h
81382+++ b/include/asm-generic/pgtable.h
81383@@ -689,6 +689,22 @@ static inline int pmd_protnone(pmd_t pmd)
81384 }
81385 #endif /* CONFIG_NUMA_BALANCING */
81386
81387+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
81388+#ifdef CONFIG_PAX_KERNEXEC
81389+#error KERNEXEC requires pax_open_kernel
81390+#else
81391+static inline unsigned long pax_open_kernel(void) { return 0; }
81392+#endif
81393+#endif
81394+
81395+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
81396+#ifdef CONFIG_PAX_KERNEXEC
81397+#error KERNEXEC requires pax_close_kernel
81398+#else
81399+static inline unsigned long pax_close_kernel(void) { return 0; }
81400+#endif
81401+#endif
81402+
81403 #endif /* CONFIG_MMU */
81404
81405 #endif /* !__ASSEMBLY__ */
81406diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
81407index 72d8803..cb9749c 100644
81408--- a/include/asm-generic/uaccess.h
81409+++ b/include/asm-generic/uaccess.h
81410@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
81411 return __clear_user(to, n);
81412 }
81413
81414+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
81415+#ifdef CONFIG_PAX_MEMORY_UDEREF
81416+#error UDEREF requires pax_open_userland
81417+#else
81418+static inline unsigned long pax_open_userland(void) { return 0; }
81419+#endif
81420+#endif
81421+
81422+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
81423+#ifdef CONFIG_PAX_MEMORY_UDEREF
81424+#error UDEREF requires pax_close_userland
81425+#else
81426+static inline unsigned long pax_close_userland(void) { return 0; }
81427+#endif
81428+#endif
81429+
81430 #endif /* __ASM_GENERIC_UACCESS_H */
81431diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
81432index ac78910..775a306 100644
81433--- a/include/asm-generic/vmlinux.lds.h
81434+++ b/include/asm-generic/vmlinux.lds.h
81435@@ -234,6 +234,7 @@
81436 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
81437 VMLINUX_SYMBOL(__start_rodata) = .; \
81438 *(.rodata) *(.rodata.*) \
81439+ *(.data..read_only) \
81440 *(__vermagic) /* Kernel version magic */ \
81441 . = ALIGN(8); \
81442 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
81443@@ -727,17 +728,18 @@
81444 * section in the linker script will go there too. @phdr should have
81445 * a leading colon.
81446 *
81447- * Note that this macros defines __per_cpu_load as an absolute symbol.
81448+ * Note that this macros defines per_cpu_load as an absolute symbol.
81449 * If there is no need to put the percpu section at a predetermined
81450 * address, use PERCPU_SECTION.
81451 */
81452 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
81453- VMLINUX_SYMBOL(__per_cpu_load) = .; \
81454- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
81455+ per_cpu_load = .; \
81456+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
81457 - LOAD_OFFSET) { \
81458+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
81459 PERCPU_INPUT(cacheline) \
81460 } phdr \
81461- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
81462+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
81463
81464 /**
81465 * PERCPU_SECTION - define output section for percpu area, simple version
81466diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
81467index 623a59c..1e79ab9 100644
81468--- a/include/crypto/algapi.h
81469+++ b/include/crypto/algapi.h
81470@@ -34,7 +34,7 @@ struct crypto_type {
81471 unsigned int maskclear;
81472 unsigned int maskset;
81473 unsigned int tfmsize;
81474-};
81475+} __do_const;
81476
81477 struct crypto_instance {
81478 struct crypto_alg alg;
81479diff --git a/include/drm/drmP.h b/include/drm/drmP.h
81480index e928625..ff97886 100644
81481--- a/include/drm/drmP.h
81482+++ b/include/drm/drmP.h
81483@@ -59,6 +59,7 @@
81484
81485 #include <asm/mman.h>
81486 #include <asm/pgalloc.h>
81487+#include <asm/local.h>
81488 #include <asm/uaccess.h>
81489
81490 #include <uapi/drm/drm.h>
81491@@ -133,17 +134,18 @@ void drm_err(const char *format, ...);
81492 /*@{*/
81493
81494 /* driver capabilities and requirements mask */
81495-#define DRIVER_USE_AGP 0x1
81496-#define DRIVER_PCI_DMA 0x8
81497-#define DRIVER_SG 0x10
81498-#define DRIVER_HAVE_DMA 0x20
81499-#define DRIVER_HAVE_IRQ 0x40
81500-#define DRIVER_IRQ_SHARED 0x80
81501-#define DRIVER_GEM 0x1000
81502-#define DRIVER_MODESET 0x2000
81503-#define DRIVER_PRIME 0x4000
81504-#define DRIVER_RENDER 0x8000
81505-#define DRIVER_ATOMIC 0x10000
81506+#define DRIVER_USE_AGP 0x1
81507+#define DRIVER_PCI_DMA 0x8
81508+#define DRIVER_SG 0x10
81509+#define DRIVER_HAVE_DMA 0x20
81510+#define DRIVER_HAVE_IRQ 0x40
81511+#define DRIVER_IRQ_SHARED 0x80
81512+#define DRIVER_GEM 0x1000
81513+#define DRIVER_MODESET 0x2000
81514+#define DRIVER_PRIME 0x4000
81515+#define DRIVER_RENDER 0x8000
81516+#define DRIVER_ATOMIC 0x10000
81517+#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
81518
81519 /***********************************************************************/
81520 /** \name Macros to make printk easier */
81521@@ -224,10 +226,12 @@ void drm_err(const char *format, ...);
81522 * \param cmd command.
81523 * \param arg argument.
81524 */
81525-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
81526+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
81527+ struct drm_file *file_priv);
81528+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
81529 struct drm_file *file_priv);
81530
81531-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
81532+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
81533 unsigned long arg);
81534
81535 #define DRM_IOCTL_NR(n) _IOC_NR(n)
81536@@ -243,10 +247,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
81537 struct drm_ioctl_desc {
81538 unsigned int cmd;
81539 int flags;
81540- drm_ioctl_t *func;
81541+ drm_ioctl_t func;
81542 unsigned int cmd_drv;
81543 const char *name;
81544-};
81545+} __do_const;
81546
81547 /**
81548 * Creates a driver or general drm_ioctl_desc array entry for the given
81549@@ -632,7 +636,8 @@ struct drm_info_list {
81550 int (*show)(struct seq_file*, void*); /** show callback */
81551 u32 driver_features; /**< Required driver features for this entry */
81552 void *data;
81553-};
81554+} __do_const;
81555+typedef struct drm_info_list __no_const drm_info_list_no_const;
81556
81557 /**
81558 * debugfs node structure. This structure represents a debugfs file.
81559@@ -716,7 +721,7 @@ struct drm_device {
81560
81561 /** \name Usage Counters */
81562 /*@{ */
81563- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
81564+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
81565 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
81566 int buf_use; /**< Buffers in use -- cannot alloc */
81567 atomic_t buf_alloc; /**< Buffer allocation in progress */
81568diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
81569index c250a22..59d2094 100644
81570--- a/include/drm/drm_crtc_helper.h
81571+++ b/include/drm/drm_crtc_helper.h
81572@@ -160,7 +160,7 @@ struct drm_encoder_helper_funcs {
81573 int (*atomic_check)(struct drm_encoder *encoder,
81574 struct drm_crtc_state *crtc_state,
81575 struct drm_connector_state *conn_state);
81576-};
81577+} __no_const;
81578
81579 /**
81580 * struct drm_connector_helper_funcs - helper operations for connectors
81581diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
81582index d016dc5..3951fe0 100644
81583--- a/include/drm/i915_pciids.h
81584+++ b/include/drm/i915_pciids.h
81585@@ -37,7 +37,7 @@
81586 */
81587 #define INTEL_VGA_DEVICE(id, info) { \
81588 0x8086, id, \
81589- ~0, ~0, \
81590+ PCI_ANY_ID, PCI_ANY_ID, \
81591 0x030000, 0xff0000, \
81592 (unsigned long) info }
81593
81594diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
81595index 72dcbe8..8db58d7 100644
81596--- a/include/drm/ttm/ttm_memory.h
81597+++ b/include/drm/ttm/ttm_memory.h
81598@@ -48,7 +48,7 @@
81599
81600 struct ttm_mem_shrink {
81601 int (*do_shrink) (struct ttm_mem_shrink *);
81602-};
81603+} __no_const;
81604
81605 /**
81606 * struct ttm_mem_global - Global memory accounting structure.
81607diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
81608index 49a8284..9643967 100644
81609--- a/include/drm/ttm/ttm_page_alloc.h
81610+++ b/include/drm/ttm/ttm_page_alloc.h
81611@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
81612 */
81613 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
81614
81615+struct device;
81616 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
81617 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
81618
81619diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
81620index 4b840e8..155d235 100644
81621--- a/include/keys/asymmetric-subtype.h
81622+++ b/include/keys/asymmetric-subtype.h
81623@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
81624 /* Verify the signature on a key of this subtype (optional) */
81625 int (*verify_signature)(const struct key *key,
81626 const struct public_key_signature *sig);
81627-};
81628+} __do_const;
81629
81630 /**
81631 * asymmetric_key_subtype - Get the subtype from an asymmetric key
81632diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
81633index c1da539..1dcec55 100644
81634--- a/include/linux/atmdev.h
81635+++ b/include/linux/atmdev.h
81636@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
81637 #endif
81638
81639 struct k_atm_aal_stats {
81640-#define __HANDLE_ITEM(i) atomic_t i
81641+#define __HANDLE_ITEM(i) atomic_unchecked_t i
81642 __AAL_STAT_ITEMS
81643 #undef __HANDLE_ITEM
81644 };
81645@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
81646 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
81647 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
81648 struct module *owner;
81649-};
81650+} __do_const ;
81651
81652 struct atmphy_ops {
81653 int (*start)(struct atm_dev *dev);
81654diff --git a/include/linux/atomic.h b/include/linux/atomic.h
81655index 5b08a85..60922fb 100644
81656--- a/include/linux/atomic.h
81657+++ b/include/linux/atomic.h
81658@@ -12,7 +12,7 @@
81659 * Atomically adds @a to @v, so long as @v was not already @u.
81660 * Returns non-zero if @v was not @u, and zero otherwise.
81661 */
81662-static inline int atomic_add_unless(atomic_t *v, int a, int u)
81663+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
81664 {
81665 return __atomic_add_unless(v, a, u) != u;
81666 }
81667diff --git a/include/linux/audit.h b/include/linux/audit.h
81668index c2e7e3a..8bfc0e1 100644
81669--- a/include/linux/audit.h
81670+++ b/include/linux/audit.h
81671@@ -223,7 +223,7 @@ static inline void audit_ptrace(struct task_struct *t)
81672 extern unsigned int audit_serial(void);
81673 extern int auditsc_get_stamp(struct audit_context *ctx,
81674 struct timespec *t, unsigned int *serial);
81675-extern int audit_set_loginuid(kuid_t loginuid);
81676+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
81677
81678 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
81679 {
81680diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
81681index 576e463..28fd926 100644
81682--- a/include/linux/binfmts.h
81683+++ b/include/linux/binfmts.h
81684@@ -44,7 +44,7 @@ struct linux_binprm {
81685 unsigned interp_flags;
81686 unsigned interp_data;
81687 unsigned long loader, exec;
81688-};
81689+} __randomize_layout;
81690
81691 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
81692 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
81693@@ -77,8 +77,10 @@ struct linux_binfmt {
81694 int (*load_binary)(struct linux_binprm *);
81695 int (*load_shlib)(struct file *);
81696 int (*core_dump)(struct coredump_params *cprm);
81697+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
81698+ void (*handle_mmap)(struct file *);
81699 unsigned long min_coredump; /* minimal dump size */
81700-};
81701+} __do_const __randomize_layout;
81702
81703 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
81704
81705diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
81706index dbfbf49..10be372 100644
81707--- a/include/linux/bitmap.h
81708+++ b/include/linux/bitmap.h
81709@@ -299,7 +299,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
81710 return __bitmap_full(src, nbits);
81711 }
81712
81713-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
81714+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
81715 {
81716 if (small_const_nbits(nbits))
81717 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
81718diff --git a/include/linux/bitops.h b/include/linux/bitops.h
81719index 5d858e0..336c1d9 100644
81720--- a/include/linux/bitops.h
81721+++ b/include/linux/bitops.h
81722@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
81723 * @word: value to rotate
81724 * @shift: bits to roll
81725 */
81726-static inline __u32 rol32(__u32 word, unsigned int shift)
81727+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
81728 {
81729 return (word << shift) | (word >> (32 - shift));
81730 }
81731@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
81732 * @word: value to rotate
81733 * @shift: bits to roll
81734 */
81735-static inline __u32 ror32(__u32 word, unsigned int shift)
81736+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
81737 {
81738 return (word >> shift) | (word << (32 - shift));
81739 }
81740@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
81741 return (__s32)(value << shift) >> shift;
81742 }
81743
81744-static inline unsigned fls_long(unsigned long l)
81745+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
81746 {
81747 if (sizeof(l) == 4)
81748 return fls(l);
81749diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
81750index 7f9a516..8889453 100644
81751--- a/include/linux/blkdev.h
81752+++ b/include/linux/blkdev.h
81753@@ -1616,7 +1616,7 @@ struct block_device_operations {
81754 /* this callback is with swap_lock and sometimes page table lock held */
81755 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
81756 struct module *owner;
81757-};
81758+} __do_const;
81759
81760 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
81761 unsigned long);
81762diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
81763index afc1343..9735539 100644
81764--- a/include/linux/blktrace_api.h
81765+++ b/include/linux/blktrace_api.h
81766@@ -25,7 +25,7 @@ struct blk_trace {
81767 struct dentry *dropped_file;
81768 struct dentry *msg_file;
81769 struct list_head running_list;
81770- atomic_t dropped;
81771+ atomic_unchecked_t dropped;
81772 };
81773
81774 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
81775diff --git a/include/linux/cache.h b/include/linux/cache.h
81776index 17e7e82..1d7da26 100644
81777--- a/include/linux/cache.h
81778+++ b/include/linux/cache.h
81779@@ -16,6 +16,14 @@
81780 #define __read_mostly
81781 #endif
81782
81783+#ifndef __read_only
81784+#ifdef CONFIG_PAX_KERNEXEC
81785+#error KERNEXEC requires __read_only
81786+#else
81787+#define __read_only __read_mostly
81788+#endif
81789+#endif
81790+
81791 #ifndef ____cacheline_aligned
81792 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
81793 #endif
81794diff --git a/include/linux/capability.h b/include/linux/capability.h
81795index aa93e5e..985a1b0 100644
81796--- a/include/linux/capability.h
81797+++ b/include/linux/capability.h
81798@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
81799 extern bool capable(int cap);
81800 extern bool ns_capable(struct user_namespace *ns, int cap);
81801 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
81802+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
81803 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
81804+extern bool capable_nolog(int cap);
81805+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
81806
81807 /* audit system wants to get cap info from files as well */
81808 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
81809
81810+extern int is_privileged_binary(const struct dentry *dentry);
81811+
81812 #endif /* !_LINUX_CAPABILITY_H */
81813diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
81814index 8609d57..86e4d79 100644
81815--- a/include/linux/cdrom.h
81816+++ b/include/linux/cdrom.h
81817@@ -87,7 +87,6 @@ struct cdrom_device_ops {
81818
81819 /* driver specifications */
81820 const int capability; /* capability flags */
81821- int n_minors; /* number of active minor devices */
81822 /* handle uniform packets for scsi type devices (scsi,atapi) */
81823 int (*generic_packet) (struct cdrom_device_info *,
81824 struct packet_command *);
81825diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
81826index 4ce9056..86caac6 100644
81827--- a/include/linux/cleancache.h
81828+++ b/include/linux/cleancache.h
81829@@ -31,7 +31,7 @@ struct cleancache_ops {
81830 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
81831 void (*invalidate_inode)(int, struct cleancache_filekey);
81832 void (*invalidate_fs)(int);
81833-};
81834+} __no_const;
81835
81836 extern struct cleancache_ops *
81837 cleancache_register_ops(struct cleancache_ops *ops);
81838diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
81839index 5591ea7..61b77ce 100644
81840--- a/include/linux/clk-provider.h
81841+++ b/include/linux/clk-provider.h
81842@@ -195,6 +195,7 @@ struct clk_ops {
81843 void (*init)(struct clk_hw *hw);
81844 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
81845 };
81846+typedef struct clk_ops __no_const clk_ops_no_const;
81847
81848 /**
81849 * struct clk_init_data - holds init data that's common to all clocks and is
81850diff --git a/include/linux/compat.h b/include/linux/compat.h
81851index ab25814..d1540d1 100644
81852--- a/include/linux/compat.h
81853+++ b/include/linux/compat.h
81854@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
81855 compat_size_t __user *len_ptr);
81856
81857 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
81858-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
81859+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
81860 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
81861 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
81862 compat_ssize_t msgsz, int msgflg);
81863@@ -325,7 +325,7 @@ asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
81864 long compat_sys_msgctl(int first, int second, void __user *uptr);
81865 long compat_sys_shmctl(int first, int second, void __user *uptr);
81866 long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
81867- unsigned nsems, const struct compat_timespec __user *timeout);
81868+ compat_long_t nsems, const struct compat_timespec __user *timeout);
81869 asmlinkage long compat_sys_keyctl(u32 option,
81870 u32 arg2, u32 arg3, u32 arg4, u32 arg5);
81871 asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
81872@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
81873 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
81874 compat_ulong_t addr, compat_ulong_t data);
81875 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
81876- compat_long_t addr, compat_long_t data);
81877+ compat_ulong_t addr, compat_ulong_t data);
81878
81879 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
81880 /*
81881diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
81882index 769e198..f670585 100644
81883--- a/include/linux/compiler-gcc4.h
81884+++ b/include/linux/compiler-gcc4.h
81885@@ -39,9 +39,34 @@
81886 # define __compiletime_warning(message) __attribute__((warning(message)))
81887 # define __compiletime_error(message) __attribute__((error(message)))
81888 #endif /* __CHECKER__ */
81889+
81890+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
81891+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
81892+#define __bos0(ptr) __bos((ptr), 0)
81893+#define __bos1(ptr) __bos((ptr), 1)
81894 #endif /* GCC_VERSION >= 40300 */
81895
81896 #if GCC_VERSION >= 40500
81897+
81898+#ifdef RANDSTRUCT_PLUGIN
81899+#define __randomize_layout __attribute__((randomize_layout))
81900+#define __no_randomize_layout __attribute__((no_randomize_layout))
81901+#endif
81902+
81903+#ifdef CONSTIFY_PLUGIN
81904+#define __no_const __attribute__((no_const))
81905+#define __do_const __attribute__((do_const))
81906+#endif
81907+
81908+#ifdef SIZE_OVERFLOW_PLUGIN
81909+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
81910+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
81911+#endif
81912+
81913+#ifdef LATENT_ENTROPY_PLUGIN
81914+#define __latent_entropy __attribute__((latent_entropy))
81915+#endif
81916+
81917 /*
81918 * Mark a position in code as unreachable. This can be used to
81919 * suppress control flow warnings after asm blocks that transfer
81920diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
81921index efee493..c388661 100644
81922--- a/include/linux/compiler-gcc5.h
81923+++ b/include/linux/compiler-gcc5.h
81924@@ -28,6 +28,25 @@
81925 # define __compiletime_error(message) __attribute__((error(message)))
81926 #endif /* __CHECKER__ */
81927
81928+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
81929+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
81930+#define __bos0(ptr) __bos((ptr), 0)
81931+#define __bos1(ptr) __bos((ptr), 1)
81932+
81933+#ifdef CONSTIFY_PLUGIN
81934+#define __no_const __attribute__((no_const))
81935+#define __do_const __attribute__((do_const))
81936+#endif
81937+
81938+#ifdef SIZE_OVERFLOW_PLUGIN
81939+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
81940+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
81941+#endif
81942+
81943+#ifdef LATENT_ENTROPY_PLUGIN
81944+#define __latent_entropy __attribute__((latent_entropy))
81945+#endif
81946+
81947 /*
81948 * Mark a position in code as unreachable. This can be used to
81949 * suppress control flow warnings after asm blocks that transfer
81950diff --git a/include/linux/compiler.h b/include/linux/compiler.h
81951index 1b45e4a..33028cd 100644
81952--- a/include/linux/compiler.h
81953+++ b/include/linux/compiler.h
81954@@ -5,11 +5,14 @@
81955
81956 #ifdef __CHECKER__
81957 # define __user __attribute__((noderef, address_space(1)))
81958+# define __force_user __force __user
81959 # define __kernel __attribute__((address_space(0)))
81960+# define __force_kernel __force __kernel
81961 # define __safe __attribute__((safe))
81962 # define __force __attribute__((force))
81963 # define __nocast __attribute__((nocast))
81964 # define __iomem __attribute__((noderef, address_space(2)))
81965+# define __force_iomem __force __iomem
81966 # define __must_hold(x) __attribute__((context(x,1,1)))
81967 # define __acquires(x) __attribute__((context(x,0,1)))
81968 # define __releases(x) __attribute__((context(x,1,0)))
81969@@ -17,20 +20,37 @@
81970 # define __release(x) __context__(x,-1)
81971 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
81972 # define __percpu __attribute__((noderef, address_space(3)))
81973+# define __force_percpu __force __percpu
81974 #ifdef CONFIG_SPARSE_RCU_POINTER
81975 # define __rcu __attribute__((noderef, address_space(4)))
81976+# define __force_rcu __force __rcu
81977 #else
81978 # define __rcu
81979+# define __force_rcu
81980 #endif
81981 extern void __chk_user_ptr(const volatile void __user *);
81982 extern void __chk_io_ptr(const volatile void __iomem *);
81983 #else
81984-# define __user
81985-# define __kernel
81986+# ifdef CHECKER_PLUGIN
81987+//# define __user
81988+//# define __force_user
81989+//# define __kernel
81990+//# define __force_kernel
81991+# else
81992+# ifdef STRUCTLEAK_PLUGIN
81993+# define __user __attribute__((user))
81994+# else
81995+# define __user
81996+# endif
81997+# define __force_user
81998+# define __kernel
81999+# define __force_kernel
82000+# endif
82001 # define __safe
82002 # define __force
82003 # define __nocast
82004 # define __iomem
82005+# define __force_iomem
82006 # define __chk_user_ptr(x) (void)0
82007 # define __chk_io_ptr(x) (void)0
82008 # define __builtin_warning(x, y...) (1)
82009@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
82010 # define __release(x) (void)0
82011 # define __cond_lock(x,c) (c)
82012 # define __percpu
82013+# define __force_percpu
82014 # define __rcu
82015+# define __force_rcu
82016 #endif
82017
82018 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
82019@@ -205,32 +227,32 @@ static __always_inline void data_access_exceeds_word_size(void)
82020 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
82021 {
82022 switch (size) {
82023- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
82024- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
82025- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
82026+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
82027+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
82028+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
82029 #ifdef CONFIG_64BIT
82030- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
82031+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
82032 #endif
82033 default:
82034 barrier();
82035- __builtin_memcpy((void *)res, (const void *)p, size);
82036+ __builtin_memcpy(res, (const void *)p, size);
82037 data_access_exceeds_word_size();
82038 barrier();
82039 }
82040 }
82041
82042-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
82043+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
82044 {
82045 switch (size) {
82046- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
82047- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
82048- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
82049+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
82050+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
82051+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
82052 #ifdef CONFIG_64BIT
82053- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
82054+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
82055 #endif
82056 default:
82057 barrier();
82058- __builtin_memcpy((void *)p, (const void *)res, size);
82059+ __builtin_memcpy((void *)p, res, size);
82060 data_access_exceeds_word_size();
82061 barrier();
82062 }
82063@@ -364,6 +386,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
82064 # define __attribute_const__ /* unimplemented */
82065 #endif
82066
82067+#ifndef __randomize_layout
82068+# define __randomize_layout
82069+#endif
82070+
82071+#ifndef __no_randomize_layout
82072+# define __no_randomize_layout
82073+#endif
82074+
82075+#ifndef __no_const
82076+# define __no_const
82077+#endif
82078+
82079+#ifndef __do_const
82080+# define __do_const
82081+#endif
82082+
82083+#ifndef __size_overflow
82084+# define __size_overflow(...)
82085+#endif
82086+
82087+#ifndef __intentional_overflow
82088+# define __intentional_overflow(...)
82089+#endif
82090+
82091+#ifndef __latent_entropy
82092+# define __latent_entropy
82093+#endif
82094+
82095 /*
82096 * Tell gcc if a function is cold. The compiler will assume any path
82097 * directly leading to the call is unlikely.
82098@@ -373,6 +423,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
82099 #define __cold
82100 #endif
82101
82102+#ifndef __alloc_size
82103+#define __alloc_size(...)
82104+#endif
82105+
82106+#ifndef __bos
82107+#define __bos(ptr, arg)
82108+#endif
82109+
82110+#ifndef __bos0
82111+#define __bos0(ptr)
82112+#endif
82113+
82114+#ifndef __bos1
82115+#define __bos1(ptr)
82116+#endif
82117+
82118 /* Simple shorthand for a section definition */
82119 #ifndef __section
82120 # define __section(S) __attribute__ ((__section__(#S)))
82121@@ -387,6 +453,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
82122 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
82123 #endif
82124
82125+#define __type_is_unsigned(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
82126+
82127 /* Is this type a native word size -- useful for atomic operations */
82128 #ifndef __native_word
82129 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
82130@@ -466,8 +534,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
82131 */
82132 #define __ACCESS_ONCE(x) ({ \
82133 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
82134- (volatile typeof(x) *)&(x); })
82135+ (volatile const typeof(x) *)&(x); })
82136 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
82137+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
82138
82139 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
82140 #ifdef CONFIG_KPROBES
82141diff --git a/include/linux/completion.h b/include/linux/completion.h
82142index 5d5aaae..0ea9b84 100644
82143--- a/include/linux/completion.h
82144+++ b/include/linux/completion.h
82145@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
82146
82147 extern void wait_for_completion(struct completion *);
82148 extern void wait_for_completion_io(struct completion *);
82149-extern int wait_for_completion_interruptible(struct completion *x);
82150-extern int wait_for_completion_killable(struct completion *x);
82151+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
82152+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
82153 extern unsigned long wait_for_completion_timeout(struct completion *x,
82154- unsigned long timeout);
82155+ unsigned long timeout) __intentional_overflow(-1);
82156 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
82157- unsigned long timeout);
82158+ unsigned long timeout) __intentional_overflow(-1);
82159 extern long wait_for_completion_interruptible_timeout(
82160- struct completion *x, unsigned long timeout);
82161+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
82162 extern long wait_for_completion_killable_timeout(
82163- struct completion *x, unsigned long timeout);
82164+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
82165 extern bool try_wait_for_completion(struct completion *x);
82166 extern bool completion_done(struct completion *x);
82167
82168diff --git a/include/linux/configfs.h b/include/linux/configfs.h
82169index 34025df..d94bbbc 100644
82170--- a/include/linux/configfs.h
82171+++ b/include/linux/configfs.h
82172@@ -125,7 +125,7 @@ struct configfs_attribute {
82173 const char *ca_name;
82174 struct module *ca_owner;
82175 umode_t ca_mode;
82176-};
82177+} __do_const;
82178
82179 /*
82180 * Users often need to create attribute structures for their configurable
82181diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
82182index 2ee4888..0451f5e 100644
82183--- a/include/linux/cpufreq.h
82184+++ b/include/linux/cpufreq.h
82185@@ -207,6 +207,7 @@ struct global_attr {
82186 ssize_t (*store)(struct kobject *a, struct attribute *b,
82187 const char *c, size_t count);
82188 };
82189+typedef struct global_attr __no_const global_attr_no_const;
82190
82191 #define define_one_global_ro(_name) \
82192 static struct global_attr _name = \
82193@@ -278,7 +279,7 @@ struct cpufreq_driver {
82194 bool boost_supported;
82195 bool boost_enabled;
82196 int (*set_boost)(int state);
82197-};
82198+} __do_const;
82199
82200 /* flags */
82201 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
82202diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
82203index 9c5e892..feb34e0 100644
82204--- a/include/linux/cpuidle.h
82205+++ b/include/linux/cpuidle.h
82206@@ -59,7 +59,8 @@ struct cpuidle_state {
82207 void (*enter_freeze) (struct cpuidle_device *dev,
82208 struct cpuidle_driver *drv,
82209 int index);
82210-};
82211+} __do_const;
82212+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
82213
82214 /* Idle State Flags */
82215 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
82216@@ -227,7 +228,7 @@ struct cpuidle_governor {
82217 void (*reflect) (struct cpuidle_device *dev, int index);
82218
82219 struct module *owner;
82220-};
82221+} __do_const;
82222
82223 #ifdef CONFIG_CPU_IDLE
82224 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
82225diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
82226index 086549a..a572d94 100644
82227--- a/include/linux/cpumask.h
82228+++ b/include/linux/cpumask.h
82229@@ -126,17 +126,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
82230 }
82231
82232 /* Valid inputs for n are -1 and 0. */
82233-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
82234+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
82235 {
82236 return n+1;
82237 }
82238
82239-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
82240+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
82241 {
82242 return n+1;
82243 }
82244
82245-static inline unsigned int cpumask_next_and(int n,
82246+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
82247 const struct cpumask *srcp,
82248 const struct cpumask *andp)
82249 {
82250@@ -182,7 +182,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
82251 *
82252 * Returns >= nr_cpu_ids if no further cpus set.
82253 */
82254-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
82255+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
82256 {
82257 /* -1 is a legal arg here. */
82258 if (n != -1)
82259@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
82260 *
82261 * Returns >= nr_cpu_ids if no further cpus unset.
82262 */
82263-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
82264+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
82265 {
82266 /* -1 is a legal arg here. */
82267 if (n != -1)
82268@@ -205,7 +205,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
82269 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
82270 }
82271
82272-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
82273+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
82274 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
82275 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
82276
82277@@ -472,7 +472,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
82278 * cpumask_weight - Count of bits in *srcp
82279 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
82280 */
82281-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
82282+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
82283 {
82284 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
82285 }
82286diff --git a/include/linux/cred.h b/include/linux/cred.h
82287index 2fb2ca2..d6a3340 100644
82288--- a/include/linux/cred.h
82289+++ b/include/linux/cred.h
82290@@ -35,7 +35,7 @@ struct group_info {
82291 int nblocks;
82292 kgid_t small_block[NGROUPS_SMALL];
82293 kgid_t *blocks[0];
82294-};
82295+} __randomize_layout;
82296
82297 /**
82298 * get_group_info - Get a reference to a group info structure
82299@@ -137,7 +137,7 @@ struct cred {
82300 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
82301 struct group_info *group_info; /* supplementary groups for euid/fsgid */
82302 struct rcu_head rcu; /* RCU deletion hook */
82303-};
82304+} __randomize_layout;
82305
82306 extern void __put_cred(struct cred *);
82307 extern void exit_creds(struct task_struct *);
82308@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
82309 static inline void validate_process_creds(void)
82310 {
82311 }
82312+static inline void validate_task_creds(struct task_struct *task)
82313+{
82314+}
82315 #endif
82316
82317 /**
82318@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
82319
82320 #define task_uid(task) (task_cred_xxx((task), uid))
82321 #define task_euid(task) (task_cred_xxx((task), euid))
82322+#define task_securebits(task) (task_cred_xxx((task), securebits))
82323
82324 #define current_cred_xxx(xxx) \
82325 ({ \
82326diff --git a/include/linux/crypto.h b/include/linux/crypto.h
82327index fb5ef16..05d1e59 100644
82328--- a/include/linux/crypto.h
82329+++ b/include/linux/crypto.h
82330@@ -626,7 +626,7 @@ struct cipher_tfm {
82331 const u8 *key, unsigned int keylen);
82332 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
82333 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
82334-};
82335+} __no_const;
82336
82337 struct hash_tfm {
82338 int (*init)(struct hash_desc *desc);
82339@@ -647,13 +647,13 @@ struct compress_tfm {
82340 int (*cot_decompress)(struct crypto_tfm *tfm,
82341 const u8 *src, unsigned int slen,
82342 u8 *dst, unsigned int *dlen);
82343-};
82344+} __no_const;
82345
82346 struct rng_tfm {
82347 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
82348 unsigned int dlen);
82349 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
82350-};
82351+} __no_const;
82352
82353 #define crt_ablkcipher crt_u.ablkcipher
82354 #define crt_aead crt_u.aead
82355diff --git a/include/linux/ctype.h b/include/linux/ctype.h
82356index 653589e..4ef254a 100644
82357--- a/include/linux/ctype.h
82358+++ b/include/linux/ctype.h
82359@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
82360 * Fast implementation of tolower() for internal usage. Do not use in your
82361 * code.
82362 */
82363-static inline char _tolower(const char c)
82364+static inline unsigned char _tolower(const unsigned char c)
82365 {
82366 return c | 0x20;
82367 }
82368diff --git a/include/linux/dcache.h b/include/linux/dcache.h
82369index d835879..c8e5b92 100644
82370--- a/include/linux/dcache.h
82371+++ b/include/linux/dcache.h
82372@@ -123,6 +123,9 @@ struct dentry {
82373 unsigned long d_time; /* used by d_revalidate */
82374 void *d_fsdata; /* fs-specific data */
82375
82376+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
82377+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
82378+#endif
82379 struct list_head d_lru; /* LRU list */
82380 struct list_head d_child; /* child of parent list */
82381 struct list_head d_subdirs; /* our children */
82382@@ -133,7 +136,7 @@ struct dentry {
82383 struct hlist_node d_alias; /* inode alias list */
82384 struct rcu_head d_rcu;
82385 } d_u;
82386-};
82387+} __randomize_layout;
82388
82389 /*
82390 * dentry->d_lock spinlock nesting subclasses:
82391@@ -319,7 +322,7 @@ extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
82392
82393 static inline unsigned d_count(const struct dentry *dentry)
82394 {
82395- return dentry->d_lockref.count;
82396+ return __lockref_read(&dentry->d_lockref);
82397 }
82398
82399 /*
82400@@ -347,7 +350,7 @@ extern char *dentry_path(struct dentry *, char *, int);
82401 static inline struct dentry *dget_dlock(struct dentry *dentry)
82402 {
82403 if (dentry)
82404- dentry->d_lockref.count++;
82405+ __lockref_inc(&dentry->d_lockref);
82406 return dentry;
82407 }
82408
82409diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
82410index 7925bf0..d5143d2 100644
82411--- a/include/linux/decompress/mm.h
82412+++ b/include/linux/decompress/mm.h
82413@@ -77,7 +77,7 @@ static void free(void *where)
82414 * warnings when not needed (indeed large_malloc / large_free are not
82415 * needed by inflate */
82416
82417-#define malloc(a) kmalloc(a, GFP_KERNEL)
82418+#define malloc(a) kmalloc((a), GFP_KERNEL)
82419 #define free(a) kfree(a)
82420
82421 #define large_malloc(a) vmalloc(a)
82422diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
82423index ce447f0..83c66bd 100644
82424--- a/include/linux/devfreq.h
82425+++ b/include/linux/devfreq.h
82426@@ -114,7 +114,7 @@ struct devfreq_governor {
82427 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
82428 int (*event_handler)(struct devfreq *devfreq,
82429 unsigned int event, void *data);
82430-};
82431+} __do_const;
82432
82433 /**
82434 * struct devfreq - Device devfreq structure
82435diff --git a/include/linux/device.h b/include/linux/device.h
82436index 0eb8ee2..c603b6a 100644
82437--- a/include/linux/device.h
82438+++ b/include/linux/device.h
82439@@ -311,7 +311,7 @@ struct subsys_interface {
82440 struct list_head node;
82441 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
82442 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
82443-};
82444+} __do_const;
82445
82446 int subsys_interface_register(struct subsys_interface *sif);
82447 void subsys_interface_unregister(struct subsys_interface *sif);
82448@@ -507,7 +507,7 @@ struct device_type {
82449 void (*release)(struct device *dev);
82450
82451 const struct dev_pm_ops *pm;
82452-};
82453+} __do_const;
82454
82455 /* interface for exporting device attributes */
82456 struct device_attribute {
82457@@ -517,11 +517,12 @@ struct device_attribute {
82458 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
82459 const char *buf, size_t count);
82460 };
82461+typedef struct device_attribute __no_const device_attribute_no_const;
82462
82463 struct dev_ext_attribute {
82464 struct device_attribute attr;
82465 void *var;
82466-};
82467+} __do_const;
82468
82469 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
82470 char *buf);
82471diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
82472index c3007cb..43efc8c 100644
82473--- a/include/linux/dma-mapping.h
82474+++ b/include/linux/dma-mapping.h
82475@@ -60,7 +60,7 @@ struct dma_map_ops {
82476 u64 (*get_required_mask)(struct device *dev);
82477 #endif
82478 int is_phys;
82479-};
82480+} __do_const;
82481
82482 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
82483
82484diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
82485index b6997a0..108be6c 100644
82486--- a/include/linux/dmaengine.h
82487+++ b/include/linux/dmaengine.h
82488@@ -1133,9 +1133,9 @@ struct dma_pinned_list {
82489 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
82490 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
82491
82492-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
82493+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
82494 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
82495-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
82496+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
82497 struct dma_pinned_list *pinned_list, struct page *page,
82498 unsigned int offset, size_t len);
82499
82500diff --git a/include/linux/efi.h b/include/linux/efi.h
82501index cf7e431..d239dce 100644
82502--- a/include/linux/efi.h
82503+++ b/include/linux/efi.h
82504@@ -1056,6 +1056,7 @@ struct efivar_operations {
82505 efi_set_variable_nonblocking_t *set_variable_nonblocking;
82506 efi_query_variable_store_t *query_variable_store;
82507 };
82508+typedef struct efivar_operations __no_const efivar_operations_no_const;
82509
82510 struct efivars {
82511 /*
82512diff --git a/include/linux/elf.h b/include/linux/elf.h
82513index 20fa8d8..3d0dd18 100644
82514--- a/include/linux/elf.h
82515+++ b/include/linux/elf.h
82516@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
82517 #define elf_note elf32_note
82518 #define elf_addr_t Elf32_Off
82519 #define Elf_Half Elf32_Half
82520+#define elf_dyn Elf32_Dyn
82521
82522 #else
82523
82524@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
82525 #define elf_note elf64_note
82526 #define elf_addr_t Elf64_Off
82527 #define Elf_Half Elf64_Half
82528+#define elf_dyn Elf64_Dyn
82529
82530 #endif
82531
82532diff --git a/include/linux/err.h b/include/linux/err.h
82533index a729120..6ede2c9 100644
82534--- a/include/linux/err.h
82535+++ b/include/linux/err.h
82536@@ -20,12 +20,12 @@
82537
82538 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
82539
82540-static inline void * __must_check ERR_PTR(long error)
82541+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
82542 {
82543 return (void *) error;
82544 }
82545
82546-static inline long __must_check PTR_ERR(__force const void *ptr)
82547+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
82548 {
82549 return (long) ptr;
82550 }
82551diff --git a/include/linux/extcon.h b/include/linux/extcon.h
82552index 36f49c4..a2a1f4c 100644
82553--- a/include/linux/extcon.h
82554+++ b/include/linux/extcon.h
82555@@ -135,7 +135,7 @@ struct extcon_dev {
82556 /* /sys/class/extcon/.../mutually_exclusive/... */
82557 struct attribute_group attr_g_muex;
82558 struct attribute **attrs_muex;
82559- struct device_attribute *d_attrs_muex;
82560+ device_attribute_no_const *d_attrs_muex;
82561 };
82562
82563 /**
82564diff --git a/include/linux/fb.h b/include/linux/fb.h
82565index 043f328..180ccbf 100644
82566--- a/include/linux/fb.h
82567+++ b/include/linux/fb.h
82568@@ -305,7 +305,8 @@ struct fb_ops {
82569 /* called at KDB enter and leave time to prepare the console */
82570 int (*fb_debug_enter)(struct fb_info *info);
82571 int (*fb_debug_leave)(struct fb_info *info);
82572-};
82573+} __do_const;
82574+typedef struct fb_ops __no_const fb_ops_no_const;
82575
82576 #ifdef CONFIG_FB_TILEBLITTING
82577 #define FB_TILE_CURSOR_NONE 0
82578diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
82579index 230f87b..1fd0485 100644
82580--- a/include/linux/fdtable.h
82581+++ b/include/linux/fdtable.h
82582@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
82583 void put_files_struct(struct files_struct *fs);
82584 void reset_files_struct(struct files_struct *);
82585 int unshare_files(struct files_struct **);
82586-struct files_struct *dup_fd(struct files_struct *, int *);
82587+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
82588 void do_close_on_exec(struct files_struct *);
82589 int iterate_fd(struct files_struct *, unsigned,
82590 int (*)(const void *, struct file *, unsigned),
82591diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
82592index 8293262..2b3b8bd 100644
82593--- a/include/linux/frontswap.h
82594+++ b/include/linux/frontswap.h
82595@@ -11,7 +11,7 @@ struct frontswap_ops {
82596 int (*load)(unsigned, pgoff_t, struct page *);
82597 void (*invalidate_page)(unsigned, pgoff_t);
82598 void (*invalidate_area)(unsigned);
82599-};
82600+} __no_const;
82601
82602 extern bool frontswap_enabled;
82603 extern struct frontswap_ops *
82604diff --git a/include/linux/fs.h b/include/linux/fs.h
82605index 52cc449..58b25c9 100644
82606--- a/include/linux/fs.h
82607+++ b/include/linux/fs.h
82608@@ -410,7 +410,7 @@ struct address_space {
82609 spinlock_t private_lock; /* for use by the address_space */
82610 struct list_head private_list; /* ditto */
82611 void *private_data; /* ditto */
82612-} __attribute__((aligned(sizeof(long))));
82613+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
82614 /*
82615 * On most architectures that alignment is already the case; but
82616 * must be enforced here for CRIS, to let the least significant bit
82617@@ -453,7 +453,7 @@ struct block_device {
82618 int bd_fsfreeze_count;
82619 /* Mutex for freeze */
82620 struct mutex bd_fsfreeze_mutex;
82621-};
82622+} __randomize_layout;
82623
82624 /*
82625 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
82626@@ -639,7 +639,7 @@ struct inode {
82627 #endif
82628
82629 void *i_private; /* fs or device private pointer */
82630-};
82631+} __randomize_layout;
82632
82633 static inline int inode_unhashed(struct inode *inode)
82634 {
82635@@ -834,7 +834,7 @@ struct file {
82636 struct list_head f_tfile_llink;
82637 #endif /* #ifdef CONFIG_EPOLL */
82638 struct address_space *f_mapping;
82639-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
82640+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
82641
82642 struct file_handle {
82643 __u32 handle_bytes;
82644@@ -962,7 +962,7 @@ struct file_lock {
82645 int state; /* state of grant or error if -ve */
82646 } afs;
82647 } fl_u;
82648-};
82649+} __randomize_layout;
82650
82651 struct file_lock_context {
82652 spinlock_t flc_lock;
82653@@ -1316,7 +1316,7 @@ struct super_block {
82654 * Indicates how deep in a filesystem stack this SB is
82655 */
82656 int s_stack_depth;
82657-};
82658+} __randomize_layout;
82659
82660 extern struct timespec current_fs_time(struct super_block *sb);
82661
82662@@ -1570,7 +1570,8 @@ struct file_operations {
82663 #ifndef CONFIG_MMU
82664 unsigned (*mmap_capabilities)(struct file *);
82665 #endif
82666-};
82667+} __do_const __randomize_layout;
82668+typedef struct file_operations __no_const file_operations_no_const;
82669
82670 struct inode_operations {
82671 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
82672@@ -2918,4 +2919,14 @@ static inline bool dir_relax(struct inode *inode)
82673 return !IS_DEADDIR(inode);
82674 }
82675
82676+static inline bool is_sidechannel_device(const struct inode *inode)
82677+{
82678+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
82679+ umode_t mode = inode->i_mode;
82680+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
82681+#else
82682+ return false;
82683+#endif
82684+}
82685+
82686 #endif /* _LINUX_FS_H */
82687diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
82688index 0efc3e6..fd23610 100644
82689--- a/include/linux/fs_struct.h
82690+++ b/include/linux/fs_struct.h
82691@@ -6,13 +6,13 @@
82692 #include <linux/seqlock.h>
82693
82694 struct fs_struct {
82695- int users;
82696+ atomic_t users;
82697 spinlock_t lock;
82698 seqcount_t seq;
82699 int umask;
82700 int in_exec;
82701 struct path root, pwd;
82702-};
82703+} __randomize_layout;
82704
82705 extern struct kmem_cache *fs_cachep;
82706
82707diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
82708index 7714849..a4a5c7a 100644
82709--- a/include/linux/fscache-cache.h
82710+++ b/include/linux/fscache-cache.h
82711@@ -113,7 +113,7 @@ struct fscache_operation {
82712 fscache_operation_release_t release;
82713 };
82714
82715-extern atomic_t fscache_op_debug_id;
82716+extern atomic_unchecked_t fscache_op_debug_id;
82717 extern void fscache_op_work_func(struct work_struct *work);
82718
82719 extern void fscache_enqueue_operation(struct fscache_operation *);
82720@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
82721 INIT_WORK(&op->work, fscache_op_work_func);
82722 atomic_set(&op->usage, 1);
82723 op->state = FSCACHE_OP_ST_INITIALISED;
82724- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
82725+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
82726 op->processor = processor;
82727 op->release = release;
82728 INIT_LIST_HEAD(&op->pend_link);
82729diff --git a/include/linux/fscache.h b/include/linux/fscache.h
82730index 115bb81..e7b812b 100644
82731--- a/include/linux/fscache.h
82732+++ b/include/linux/fscache.h
82733@@ -152,7 +152,7 @@ struct fscache_cookie_def {
82734 * - this is mandatory for any object that may have data
82735 */
82736 void (*now_uncached)(void *cookie_netfs_data);
82737-};
82738+} __do_const;
82739
82740 /*
82741 * fscache cached network filesystem type
82742diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
82743index 7ee1774..72505b8 100644
82744--- a/include/linux/fsnotify.h
82745+++ b/include/linux/fsnotify.h
82746@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
82747 struct inode *inode = file_inode(file);
82748 __u32 mask = FS_ACCESS;
82749
82750+ if (is_sidechannel_device(inode))
82751+ return;
82752+
82753 if (S_ISDIR(inode->i_mode))
82754 mask |= FS_ISDIR;
82755
82756@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
82757 struct inode *inode = file_inode(file);
82758 __u32 mask = FS_MODIFY;
82759
82760+ if (is_sidechannel_device(inode))
82761+ return;
82762+
82763 if (S_ISDIR(inode->i_mode))
82764 mask |= FS_ISDIR;
82765
82766@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
82767 */
82768 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
82769 {
82770- return kstrdup(name, GFP_KERNEL);
82771+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
82772 }
82773
82774 /*
82775diff --git a/include/linux/genhd.h b/include/linux/genhd.h
82776index ec274e0..e678159 100644
82777--- a/include/linux/genhd.h
82778+++ b/include/linux/genhd.h
82779@@ -194,7 +194,7 @@ struct gendisk {
82780 struct kobject *slave_dir;
82781
82782 struct timer_rand_state *random;
82783- atomic_t sync_io; /* RAID */
82784+ atomic_unchecked_t sync_io; /* RAID */
82785 struct disk_events *ev;
82786 #ifdef CONFIG_BLK_DEV_INTEGRITY
82787 struct blk_integrity *integrity;
82788@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
82789 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
82790
82791 /* drivers/char/random.c */
82792-extern void add_disk_randomness(struct gendisk *disk);
82793+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
82794 extern void rand_initialize_disk(struct gendisk *disk);
82795
82796 static inline sector_t get_start_sect(struct block_device *bdev)
82797diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
82798index 667c311..abac2a7 100644
82799--- a/include/linux/genl_magic_func.h
82800+++ b/include/linux/genl_magic_func.h
82801@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
82802 },
82803
82804 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
82805-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
82806+static struct genl_ops ZZZ_genl_ops[] = {
82807 #include GENL_MAGIC_INCLUDE_FILE
82808 };
82809
82810diff --git a/include/linux/gfp.h b/include/linux/gfp.h
82811index 51bd1e7..0486343 100644
82812--- a/include/linux/gfp.h
82813+++ b/include/linux/gfp.h
82814@@ -34,6 +34,13 @@ struct vm_area_struct;
82815 #define ___GFP_NO_KSWAPD 0x400000u
82816 #define ___GFP_OTHER_NODE 0x800000u
82817 #define ___GFP_WRITE 0x1000000u
82818+
82819+#ifdef CONFIG_PAX_USERCOPY_SLABS
82820+#define ___GFP_USERCOPY 0x2000000u
82821+#else
82822+#define ___GFP_USERCOPY 0
82823+#endif
82824+
82825 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
82826
82827 /*
82828@@ -90,6 +97,7 @@ struct vm_area_struct;
82829 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
82830 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
82831 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
82832+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
82833
82834 /*
82835 * This may seem redundant, but it's a way of annotating false positives vs.
82836@@ -97,7 +105,7 @@ struct vm_area_struct;
82837 */
82838 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
82839
82840-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
82841+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
82842 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
82843
82844 /* This equals 0, but use constants in case they ever change */
82845@@ -152,6 +160,8 @@ struct vm_area_struct;
82846 /* 4GB DMA on some platforms */
82847 #define GFP_DMA32 __GFP_DMA32
82848
82849+#define GFP_USERCOPY __GFP_USERCOPY
82850+
82851 /* Convert GFP flags to their corresponding migrate type */
82852 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
82853 {
82854diff --git a/include/linux/gracl.h b/include/linux/gracl.h
82855new file mode 100644
82856index 0000000..91858e4
82857--- /dev/null
82858+++ b/include/linux/gracl.h
82859@@ -0,0 +1,342 @@
82860+#ifndef GR_ACL_H
82861+#define GR_ACL_H
82862+
82863+#include <linux/grdefs.h>
82864+#include <linux/resource.h>
82865+#include <linux/capability.h>
82866+#include <linux/dcache.h>
82867+#include <asm/resource.h>
82868+
82869+/* Major status information */
82870+
82871+#define GR_VERSION "grsecurity 3.1"
82872+#define GRSECURITY_VERSION 0x3100
82873+
82874+enum {
82875+ GR_SHUTDOWN = 0,
82876+ GR_ENABLE = 1,
82877+ GR_SPROLE = 2,
82878+ GR_OLDRELOAD = 3,
82879+ GR_SEGVMOD = 4,
82880+ GR_STATUS = 5,
82881+ GR_UNSPROLE = 6,
82882+ GR_PASSSET = 7,
82883+ GR_SPROLEPAM = 8,
82884+ GR_RELOAD = 9,
82885+};
82886+
82887+/* Password setup definitions
82888+ * kernel/grhash.c */
82889+enum {
82890+ GR_PW_LEN = 128,
82891+ GR_SALT_LEN = 16,
82892+ GR_SHA_LEN = 32,
82893+};
82894+
82895+enum {
82896+ GR_SPROLE_LEN = 64,
82897+};
82898+
82899+enum {
82900+ GR_NO_GLOB = 0,
82901+ GR_REG_GLOB,
82902+ GR_CREATE_GLOB
82903+};
82904+
82905+#define GR_NLIMITS 32
82906+
82907+/* Begin Data Structures */
82908+
82909+struct sprole_pw {
82910+ unsigned char *rolename;
82911+ unsigned char salt[GR_SALT_LEN];
82912+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
82913+};
82914+
82915+struct name_entry {
82916+ __u32 key;
82917+ u64 inode;
82918+ dev_t device;
82919+ char *name;
82920+ __u16 len;
82921+ __u8 deleted;
82922+ struct name_entry *prev;
82923+ struct name_entry *next;
82924+};
82925+
82926+struct inodev_entry {
82927+ struct name_entry *nentry;
82928+ struct inodev_entry *prev;
82929+ struct inodev_entry *next;
82930+};
82931+
82932+struct acl_role_db {
82933+ struct acl_role_label **r_hash;
82934+ __u32 r_size;
82935+};
82936+
82937+struct inodev_db {
82938+ struct inodev_entry **i_hash;
82939+ __u32 i_size;
82940+};
82941+
82942+struct name_db {
82943+ struct name_entry **n_hash;
82944+ __u32 n_size;
82945+};
82946+
82947+struct crash_uid {
82948+ uid_t uid;
82949+ unsigned long expires;
82950+};
82951+
82952+struct gr_hash_struct {
82953+ void **table;
82954+ void **nametable;
82955+ void *first;
82956+ __u32 table_size;
82957+ __u32 used_size;
82958+ int type;
82959+};
82960+
82961+/* Userspace Grsecurity ACL data structures */
82962+
82963+struct acl_subject_label {
82964+ char *filename;
82965+ u64 inode;
82966+ dev_t device;
82967+ __u32 mode;
82968+ kernel_cap_t cap_mask;
82969+ kernel_cap_t cap_lower;
82970+ kernel_cap_t cap_invert_audit;
82971+
82972+ struct rlimit res[GR_NLIMITS];
82973+ __u32 resmask;
82974+
82975+ __u8 user_trans_type;
82976+ __u8 group_trans_type;
82977+ uid_t *user_transitions;
82978+ gid_t *group_transitions;
82979+ __u16 user_trans_num;
82980+ __u16 group_trans_num;
82981+
82982+ __u32 sock_families[2];
82983+ __u32 ip_proto[8];
82984+ __u32 ip_type;
82985+ struct acl_ip_label **ips;
82986+ __u32 ip_num;
82987+ __u32 inaddr_any_override;
82988+
82989+ __u32 crashes;
82990+ unsigned long expires;
82991+
82992+ struct acl_subject_label *parent_subject;
82993+ struct gr_hash_struct *hash;
82994+ struct acl_subject_label *prev;
82995+ struct acl_subject_label *next;
82996+
82997+ struct acl_object_label **obj_hash;
82998+ __u32 obj_hash_size;
82999+ __u16 pax_flags;
83000+};
83001+
83002+struct role_allowed_ip {
83003+ __u32 addr;
83004+ __u32 netmask;
83005+
83006+ struct role_allowed_ip *prev;
83007+ struct role_allowed_ip *next;
83008+};
83009+
83010+struct role_transition {
83011+ char *rolename;
83012+
83013+ struct role_transition *prev;
83014+ struct role_transition *next;
83015+};
83016+
83017+struct acl_role_label {
83018+ char *rolename;
83019+ uid_t uidgid;
83020+ __u16 roletype;
83021+
83022+ __u16 auth_attempts;
83023+ unsigned long expires;
83024+
83025+ struct acl_subject_label *root_label;
83026+ struct gr_hash_struct *hash;
83027+
83028+ struct acl_role_label *prev;
83029+ struct acl_role_label *next;
83030+
83031+ struct role_transition *transitions;
83032+ struct role_allowed_ip *allowed_ips;
83033+ uid_t *domain_children;
83034+ __u16 domain_child_num;
83035+
83036+ umode_t umask;
83037+
83038+ struct acl_subject_label **subj_hash;
83039+ __u32 subj_hash_size;
83040+};
83041+
83042+struct user_acl_role_db {
83043+ struct acl_role_label **r_table;
83044+ __u32 num_pointers; /* Number of allocations to track */
83045+ __u32 num_roles; /* Number of roles */
83046+ __u32 num_domain_children; /* Number of domain children */
83047+ __u32 num_subjects; /* Number of subjects */
83048+ __u32 num_objects; /* Number of objects */
83049+};
83050+
83051+struct acl_object_label {
83052+ char *filename;
83053+ u64 inode;
83054+ dev_t device;
83055+ __u32 mode;
83056+
83057+ struct acl_subject_label *nested;
83058+ struct acl_object_label *globbed;
83059+
83060+ /* next two structures not used */
83061+
83062+ struct acl_object_label *prev;
83063+ struct acl_object_label *next;
83064+};
83065+
83066+struct acl_ip_label {
83067+ char *iface;
83068+ __u32 addr;
83069+ __u32 netmask;
83070+ __u16 low, high;
83071+ __u8 mode;
83072+ __u32 type;
83073+ __u32 proto[8];
83074+
83075+ /* next two structures not used */
83076+
83077+ struct acl_ip_label *prev;
83078+ struct acl_ip_label *next;
83079+};
83080+
83081+struct gr_arg {
83082+ struct user_acl_role_db role_db;
83083+ unsigned char pw[GR_PW_LEN];
83084+ unsigned char salt[GR_SALT_LEN];
83085+ unsigned char sum[GR_SHA_LEN];
83086+ unsigned char sp_role[GR_SPROLE_LEN];
83087+ struct sprole_pw *sprole_pws;
83088+ dev_t segv_device;
83089+ u64 segv_inode;
83090+ uid_t segv_uid;
83091+ __u16 num_sprole_pws;
83092+ __u16 mode;
83093+};
83094+
83095+struct gr_arg_wrapper {
83096+ struct gr_arg *arg;
83097+ __u32 version;
83098+ __u32 size;
83099+};
83100+
83101+struct subject_map {
83102+ struct acl_subject_label *user;
83103+ struct acl_subject_label *kernel;
83104+ struct subject_map *prev;
83105+ struct subject_map *next;
83106+};
83107+
83108+struct acl_subj_map_db {
83109+ struct subject_map **s_hash;
83110+ __u32 s_size;
83111+};
83112+
83113+struct gr_policy_state {
83114+ struct sprole_pw **acl_special_roles;
83115+ __u16 num_sprole_pws;
83116+ struct acl_role_label *kernel_role;
83117+ struct acl_role_label *role_list;
83118+ struct acl_role_label *default_role;
83119+ struct acl_role_db acl_role_set;
83120+ struct acl_subj_map_db subj_map_set;
83121+ struct name_db name_set;
83122+ struct inodev_db inodev_set;
83123+};
83124+
83125+struct gr_alloc_state {
83126+ unsigned long alloc_stack_next;
83127+ unsigned long alloc_stack_size;
83128+ void **alloc_stack;
83129+};
83130+
83131+struct gr_reload_state {
83132+ struct gr_policy_state oldpolicy;
83133+ struct gr_alloc_state oldalloc;
83134+ struct gr_policy_state newpolicy;
83135+ struct gr_alloc_state newalloc;
83136+ struct gr_policy_state *oldpolicy_ptr;
83137+ struct gr_alloc_state *oldalloc_ptr;
83138+ unsigned char oldmode;
83139+};
83140+
83141+/* End Data Structures Section */
83142+
83143+/* Hash functions generated by empirical testing by Brad Spengler
83144+ Makes good use of the low bits of the inode. Generally 0-1 times
83145+ in loop for successful match. 0-3 for unsuccessful match.
83146+ Shift/add algorithm with modulus of table size and an XOR*/
83147+
83148+static __inline__ unsigned int
83149+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
83150+{
83151+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
83152+}
83153+
83154+ static __inline__ unsigned int
83155+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
83156+{
83157+ return ((const unsigned long)userp % sz);
83158+}
83159+
83160+static __inline__ unsigned int
83161+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
83162+{
83163+ unsigned int rem;
83164+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
83165+ return rem;
83166+}
83167+
83168+static __inline__ unsigned int
83169+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
83170+{
83171+ return full_name_hash((const unsigned char *)name, len) % sz;
83172+}
83173+
83174+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
83175+ subj = NULL; \
83176+ iter = 0; \
83177+ while (iter < role->subj_hash_size) { \
83178+ if (subj == NULL) \
83179+ subj = role->subj_hash[iter]; \
83180+ if (subj == NULL) { \
83181+ iter++; \
83182+ continue; \
83183+ }
83184+
83185+#define FOR_EACH_SUBJECT_END(subj,iter) \
83186+ subj = subj->next; \
83187+ if (subj == NULL) \
83188+ iter++; \
83189+ }
83190+
83191+
83192+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
83193+ subj = role->hash->first; \
83194+ while (subj != NULL) {
83195+
83196+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
83197+ subj = subj->next; \
83198+ }
83199+
83200+#endif
83201+
83202diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
83203new file mode 100644
83204index 0000000..af64092
83205--- /dev/null
83206+++ b/include/linux/gracl_compat.h
83207@@ -0,0 +1,156 @@
83208+#ifndef GR_ACL_COMPAT_H
83209+#define GR_ACL_COMPAT_H
83210+
83211+#include <linux/resource.h>
83212+#include <asm/resource.h>
83213+
83214+struct sprole_pw_compat {
83215+ compat_uptr_t rolename;
83216+ unsigned char salt[GR_SALT_LEN];
83217+ unsigned char sum[GR_SHA_LEN];
83218+};
83219+
83220+struct gr_hash_struct_compat {
83221+ compat_uptr_t table;
83222+ compat_uptr_t nametable;
83223+ compat_uptr_t first;
83224+ __u32 table_size;
83225+ __u32 used_size;
83226+ int type;
83227+};
83228+
83229+struct acl_subject_label_compat {
83230+ compat_uptr_t filename;
83231+ compat_u64 inode;
83232+ __u32 device;
83233+ __u32 mode;
83234+ kernel_cap_t cap_mask;
83235+ kernel_cap_t cap_lower;
83236+ kernel_cap_t cap_invert_audit;
83237+
83238+ struct compat_rlimit res[GR_NLIMITS];
83239+ __u32 resmask;
83240+
83241+ __u8 user_trans_type;
83242+ __u8 group_trans_type;
83243+ compat_uptr_t user_transitions;
83244+ compat_uptr_t group_transitions;
83245+ __u16 user_trans_num;
83246+ __u16 group_trans_num;
83247+
83248+ __u32 sock_families[2];
83249+ __u32 ip_proto[8];
83250+ __u32 ip_type;
83251+ compat_uptr_t ips;
83252+ __u32 ip_num;
83253+ __u32 inaddr_any_override;
83254+
83255+ __u32 crashes;
83256+ compat_ulong_t expires;
83257+
83258+ compat_uptr_t parent_subject;
83259+ compat_uptr_t hash;
83260+ compat_uptr_t prev;
83261+ compat_uptr_t next;
83262+
83263+ compat_uptr_t obj_hash;
83264+ __u32 obj_hash_size;
83265+ __u16 pax_flags;
83266+};
83267+
83268+struct role_allowed_ip_compat {
83269+ __u32 addr;
83270+ __u32 netmask;
83271+
83272+ compat_uptr_t prev;
83273+ compat_uptr_t next;
83274+};
83275+
83276+struct role_transition_compat {
83277+ compat_uptr_t rolename;
83278+
83279+ compat_uptr_t prev;
83280+ compat_uptr_t next;
83281+};
83282+
83283+struct acl_role_label_compat {
83284+ compat_uptr_t rolename;
83285+ uid_t uidgid;
83286+ __u16 roletype;
83287+
83288+ __u16 auth_attempts;
83289+ compat_ulong_t expires;
83290+
83291+ compat_uptr_t root_label;
83292+ compat_uptr_t hash;
83293+
83294+ compat_uptr_t prev;
83295+ compat_uptr_t next;
83296+
83297+ compat_uptr_t transitions;
83298+ compat_uptr_t allowed_ips;
83299+ compat_uptr_t domain_children;
83300+ __u16 domain_child_num;
83301+
83302+ umode_t umask;
83303+
83304+ compat_uptr_t subj_hash;
83305+ __u32 subj_hash_size;
83306+};
83307+
83308+struct user_acl_role_db_compat {
83309+ compat_uptr_t r_table;
83310+ __u32 num_pointers;
83311+ __u32 num_roles;
83312+ __u32 num_domain_children;
83313+ __u32 num_subjects;
83314+ __u32 num_objects;
83315+};
83316+
83317+struct acl_object_label_compat {
83318+ compat_uptr_t filename;
83319+ compat_u64 inode;
83320+ __u32 device;
83321+ __u32 mode;
83322+
83323+ compat_uptr_t nested;
83324+ compat_uptr_t globbed;
83325+
83326+ compat_uptr_t prev;
83327+ compat_uptr_t next;
83328+};
83329+
83330+struct acl_ip_label_compat {
83331+ compat_uptr_t iface;
83332+ __u32 addr;
83333+ __u32 netmask;
83334+ __u16 low, high;
83335+ __u8 mode;
83336+ __u32 type;
83337+ __u32 proto[8];
83338+
83339+ compat_uptr_t prev;
83340+ compat_uptr_t next;
83341+};
83342+
83343+struct gr_arg_compat {
83344+ struct user_acl_role_db_compat role_db;
83345+ unsigned char pw[GR_PW_LEN];
83346+ unsigned char salt[GR_SALT_LEN];
83347+ unsigned char sum[GR_SHA_LEN];
83348+ unsigned char sp_role[GR_SPROLE_LEN];
83349+ compat_uptr_t sprole_pws;
83350+ __u32 segv_device;
83351+ compat_u64 segv_inode;
83352+ uid_t segv_uid;
83353+ __u16 num_sprole_pws;
83354+ __u16 mode;
83355+};
83356+
83357+struct gr_arg_wrapper_compat {
83358+ compat_uptr_t arg;
83359+ __u32 version;
83360+ __u32 size;
83361+};
83362+
83363+#endif
83364diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
83365new file mode 100644
83366index 0000000..323ecf2
83367--- /dev/null
83368+++ b/include/linux/gralloc.h
83369@@ -0,0 +1,9 @@
83370+#ifndef __GRALLOC_H
83371+#define __GRALLOC_H
83372+
83373+void acl_free_all(void);
83374+int acl_alloc_stack_init(unsigned long size);
83375+void *acl_alloc(unsigned long len);
83376+void *acl_alloc_num(unsigned long num, unsigned long len);
83377+
83378+#endif
83379diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
83380new file mode 100644
83381index 0000000..be66033
83382--- /dev/null
83383+++ b/include/linux/grdefs.h
83384@@ -0,0 +1,140 @@
83385+#ifndef GRDEFS_H
83386+#define GRDEFS_H
83387+
83388+/* Begin grsecurity status declarations */
83389+
83390+enum {
83391+ GR_READY = 0x01,
83392+ GR_STATUS_INIT = 0x00 // disabled state
83393+};
83394+
83395+/* Begin ACL declarations */
83396+
83397+/* Role flags */
83398+
83399+enum {
83400+ GR_ROLE_USER = 0x0001,
83401+ GR_ROLE_GROUP = 0x0002,
83402+ GR_ROLE_DEFAULT = 0x0004,
83403+ GR_ROLE_SPECIAL = 0x0008,
83404+ GR_ROLE_AUTH = 0x0010,
83405+ GR_ROLE_NOPW = 0x0020,
83406+ GR_ROLE_GOD = 0x0040,
83407+ GR_ROLE_LEARN = 0x0080,
83408+ GR_ROLE_TPE = 0x0100,
83409+ GR_ROLE_DOMAIN = 0x0200,
83410+ GR_ROLE_PAM = 0x0400,
83411+ GR_ROLE_PERSIST = 0x0800
83412+};
83413+
83414+/* ACL Subject and Object mode flags */
83415+enum {
83416+ GR_DELETED = 0x80000000
83417+};
83418+
83419+/* ACL Object-only mode flags */
83420+enum {
83421+ GR_READ = 0x00000001,
83422+ GR_APPEND = 0x00000002,
83423+ GR_WRITE = 0x00000004,
83424+ GR_EXEC = 0x00000008,
83425+ GR_FIND = 0x00000010,
83426+ GR_INHERIT = 0x00000020,
83427+ GR_SETID = 0x00000040,
83428+ GR_CREATE = 0x00000080,
83429+ GR_DELETE = 0x00000100,
83430+ GR_LINK = 0x00000200,
83431+ GR_AUDIT_READ = 0x00000400,
83432+ GR_AUDIT_APPEND = 0x00000800,
83433+ GR_AUDIT_WRITE = 0x00001000,
83434+ GR_AUDIT_EXEC = 0x00002000,
83435+ GR_AUDIT_FIND = 0x00004000,
83436+ GR_AUDIT_INHERIT= 0x00008000,
83437+ GR_AUDIT_SETID = 0x00010000,
83438+ GR_AUDIT_CREATE = 0x00020000,
83439+ GR_AUDIT_DELETE = 0x00040000,
83440+ GR_AUDIT_LINK = 0x00080000,
83441+ GR_PTRACERD = 0x00100000,
83442+ GR_NOPTRACE = 0x00200000,
83443+ GR_SUPPRESS = 0x00400000,
83444+ GR_NOLEARN = 0x00800000,
83445+ GR_INIT_TRANSFER= 0x01000000
83446+};
83447+
83448+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
83449+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
83450+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
83451+
83452+/* ACL subject-only mode flags */
83453+enum {
83454+ GR_KILL = 0x00000001,
83455+ GR_VIEW = 0x00000002,
83456+ GR_PROTECTED = 0x00000004,
83457+ GR_LEARN = 0x00000008,
83458+ GR_OVERRIDE = 0x00000010,
83459+ /* just a placeholder, this mode is only used in userspace */
83460+ GR_DUMMY = 0x00000020,
83461+ GR_PROTSHM = 0x00000040,
83462+ GR_KILLPROC = 0x00000080,
83463+ GR_KILLIPPROC = 0x00000100,
83464+ /* just a placeholder, this mode is only used in userspace */
83465+ GR_NOTROJAN = 0x00000200,
83466+ GR_PROTPROCFD = 0x00000400,
83467+ GR_PROCACCT = 0x00000800,
83468+ GR_RELAXPTRACE = 0x00001000,
83469+ //GR_NESTED = 0x00002000,
83470+ GR_INHERITLEARN = 0x00004000,
83471+ GR_PROCFIND = 0x00008000,
83472+ GR_POVERRIDE = 0x00010000,
83473+ GR_KERNELAUTH = 0x00020000,
83474+ GR_ATSECURE = 0x00040000,
83475+ GR_SHMEXEC = 0x00080000
83476+};
83477+
83478+enum {
83479+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
83480+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
83481+ GR_PAX_ENABLE_MPROTECT = 0x0004,
83482+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
83483+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
83484+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
83485+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
83486+ GR_PAX_DISABLE_MPROTECT = 0x0400,
83487+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
83488+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
83489+};
83490+
83491+enum {
83492+ GR_ID_USER = 0x01,
83493+ GR_ID_GROUP = 0x02,
83494+};
83495+
83496+enum {
83497+ GR_ID_ALLOW = 0x01,
83498+ GR_ID_DENY = 0x02,
83499+};
83500+
83501+#define GR_CRASH_RES 31
83502+#define GR_UIDTABLE_MAX 500
83503+
83504+/* begin resource learning section */
83505+enum {
83506+ GR_RLIM_CPU_BUMP = 60,
83507+ GR_RLIM_FSIZE_BUMP = 50000,
83508+ GR_RLIM_DATA_BUMP = 10000,
83509+ GR_RLIM_STACK_BUMP = 1000,
83510+ GR_RLIM_CORE_BUMP = 10000,
83511+ GR_RLIM_RSS_BUMP = 500000,
83512+ GR_RLIM_NPROC_BUMP = 1,
83513+ GR_RLIM_NOFILE_BUMP = 5,
83514+ GR_RLIM_MEMLOCK_BUMP = 50000,
83515+ GR_RLIM_AS_BUMP = 500000,
83516+ GR_RLIM_LOCKS_BUMP = 2,
83517+ GR_RLIM_SIGPENDING_BUMP = 5,
83518+ GR_RLIM_MSGQUEUE_BUMP = 10000,
83519+ GR_RLIM_NICE_BUMP = 1,
83520+ GR_RLIM_RTPRIO_BUMP = 1,
83521+ GR_RLIM_RTTIME_BUMP = 1000000
83522+};
83523+
83524+#endif
83525diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
83526new file mode 100644
83527index 0000000..fb1de5d
83528--- /dev/null
83529+++ b/include/linux/grinternal.h
83530@@ -0,0 +1,230 @@
83531+#ifndef __GRINTERNAL_H
83532+#define __GRINTERNAL_H
83533+
83534+#ifdef CONFIG_GRKERNSEC
83535+
83536+#include <linux/fs.h>
83537+#include <linux/mnt_namespace.h>
83538+#include <linux/nsproxy.h>
83539+#include <linux/gracl.h>
83540+#include <linux/grdefs.h>
83541+#include <linux/grmsg.h>
83542+
83543+void gr_add_learn_entry(const char *fmt, ...)
83544+ __attribute__ ((format (printf, 1, 2)));
83545+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
83546+ const struct vfsmount *mnt);
83547+__u32 gr_check_create(const struct dentry *new_dentry,
83548+ const struct dentry *parent,
83549+ const struct vfsmount *mnt, const __u32 mode);
83550+int gr_check_protected_task(const struct task_struct *task);
83551+__u32 to_gr_audit(const __u32 reqmode);
83552+int gr_set_acls(const int type);
83553+int gr_acl_is_enabled(void);
83554+char gr_roletype_to_char(void);
83555+
83556+void gr_handle_alertkill(struct task_struct *task);
83557+char *gr_to_filename(const struct dentry *dentry,
83558+ const struct vfsmount *mnt);
83559+char *gr_to_filename1(const struct dentry *dentry,
83560+ const struct vfsmount *mnt);
83561+char *gr_to_filename2(const struct dentry *dentry,
83562+ const struct vfsmount *mnt);
83563+char *gr_to_filename3(const struct dentry *dentry,
83564+ const struct vfsmount *mnt);
83565+
83566+extern int grsec_enable_ptrace_readexec;
83567+extern int grsec_enable_harden_ptrace;
83568+extern int grsec_enable_link;
83569+extern int grsec_enable_fifo;
83570+extern int grsec_enable_execve;
83571+extern int grsec_enable_shm;
83572+extern int grsec_enable_execlog;
83573+extern int grsec_enable_signal;
83574+extern int grsec_enable_audit_ptrace;
83575+extern int grsec_enable_forkfail;
83576+extern int grsec_enable_time;
83577+extern int grsec_enable_rofs;
83578+extern int grsec_deny_new_usb;
83579+extern int grsec_enable_chroot_shmat;
83580+extern int grsec_enable_chroot_mount;
83581+extern int grsec_enable_chroot_double;
83582+extern int grsec_enable_chroot_pivot;
83583+extern int grsec_enable_chroot_chdir;
83584+extern int grsec_enable_chroot_chmod;
83585+extern int grsec_enable_chroot_mknod;
83586+extern int grsec_enable_chroot_fchdir;
83587+extern int grsec_enable_chroot_nice;
83588+extern int grsec_enable_chroot_execlog;
83589+extern int grsec_enable_chroot_caps;
83590+extern int grsec_enable_chroot_rename;
83591+extern int grsec_enable_chroot_sysctl;
83592+extern int grsec_enable_chroot_unix;
83593+extern int grsec_enable_symlinkown;
83594+extern kgid_t grsec_symlinkown_gid;
83595+extern int grsec_enable_tpe;
83596+extern kgid_t grsec_tpe_gid;
83597+extern int grsec_enable_tpe_all;
83598+extern int grsec_enable_tpe_invert;
83599+extern int grsec_enable_socket_all;
83600+extern kgid_t grsec_socket_all_gid;
83601+extern int grsec_enable_socket_client;
83602+extern kgid_t grsec_socket_client_gid;
83603+extern int grsec_enable_socket_server;
83604+extern kgid_t grsec_socket_server_gid;
83605+extern kgid_t grsec_audit_gid;
83606+extern int grsec_enable_group;
83607+extern int grsec_enable_log_rwxmaps;
83608+extern int grsec_enable_mount;
83609+extern int grsec_enable_chdir;
83610+extern int grsec_resource_logging;
83611+extern int grsec_enable_blackhole;
83612+extern int grsec_lastack_retries;
83613+extern int grsec_enable_brute;
83614+extern int grsec_enable_harden_ipc;
83615+extern int grsec_lock;
83616+
83617+extern spinlock_t grsec_alert_lock;
83618+extern unsigned long grsec_alert_wtime;
83619+extern unsigned long grsec_alert_fyet;
83620+
83621+extern spinlock_t grsec_audit_lock;
83622+
83623+extern rwlock_t grsec_exec_file_lock;
83624+
83625+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
83626+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
83627+ (tsk)->exec_file->f_path.mnt) : "/")
83628+
83629+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
83630+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
83631+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
83632+
83633+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
83634+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
83635+ (tsk)->exec_file->f_path.mnt) : "/")
83636+
83637+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
83638+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
83639+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
83640+
83641+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
83642+
83643+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
83644+
83645+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
83646+{
83647+ if (file1 && file2) {
83648+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
83649+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
83650+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
83651+ return true;
83652+ }
83653+
83654+ return false;
83655+}
83656+
83657+#define GR_CHROOT_CAPS {{ \
83658+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
83659+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
83660+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
83661+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
83662+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
83663+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
83664+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
83665+
83666+#define security_learn(normal_msg,args...) \
83667+({ \
83668+ read_lock(&grsec_exec_file_lock); \
83669+ gr_add_learn_entry(normal_msg "\n", ## args); \
83670+ read_unlock(&grsec_exec_file_lock); \
83671+})
83672+
83673+enum {
83674+ GR_DO_AUDIT,
83675+ GR_DONT_AUDIT,
83676+ /* used for non-audit messages that we shouldn't kill the task on */
83677+ GR_DONT_AUDIT_GOOD
83678+};
83679+
83680+enum {
83681+ GR_TTYSNIFF,
83682+ GR_RBAC,
83683+ GR_RBAC_STR,
83684+ GR_STR_RBAC,
83685+ GR_RBAC_MODE2,
83686+ GR_RBAC_MODE3,
83687+ GR_FILENAME,
83688+ GR_SYSCTL_HIDDEN,
83689+ GR_NOARGS,
83690+ GR_ONE_INT,
83691+ GR_ONE_INT_TWO_STR,
83692+ GR_ONE_STR,
83693+ GR_STR_INT,
83694+ GR_TWO_STR_INT,
83695+ GR_TWO_INT,
83696+ GR_TWO_U64,
83697+ GR_THREE_INT,
83698+ GR_FIVE_INT_TWO_STR,
83699+ GR_TWO_STR,
83700+ GR_THREE_STR,
83701+ GR_FOUR_STR,
83702+ GR_STR_FILENAME,
83703+ GR_FILENAME_STR,
83704+ GR_FILENAME_TWO_INT,
83705+ GR_FILENAME_TWO_INT_STR,
83706+ GR_TEXTREL,
83707+ GR_PTRACE,
83708+ GR_RESOURCE,
83709+ GR_CAP,
83710+ GR_SIG,
83711+ GR_SIG2,
83712+ GR_CRASH1,
83713+ GR_CRASH2,
83714+ GR_PSACCT,
83715+ GR_RWXMAP,
83716+ GR_RWXMAPVMA
83717+};
83718+
83719+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
83720+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
83721+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
83722+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
83723+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
83724+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
83725+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
83726+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
83727+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
83728+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
83729+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
83730+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
83731+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
83732+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
83733+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
83734+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
83735+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
83736+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
83737+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
83738+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
83739+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
83740+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
83741+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
83742+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
83743+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
83744+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
83745+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
83746+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
83747+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
83748+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
83749+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
83750+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
83751+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
83752+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
83753+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
83754+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
83755+
83756+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
83757+
83758+#endif
83759+
83760+#endif
83761diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
83762new file mode 100644
83763index 0000000..26ef560
83764--- /dev/null
83765+++ b/include/linux/grmsg.h
83766@@ -0,0 +1,118 @@
83767+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
83768+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
83769+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
83770+#define GR_STOPMOD_MSG "denied modification of module state by "
83771+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
83772+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
83773+#define GR_IOPERM_MSG "denied use of ioperm() by "
83774+#define GR_IOPL_MSG "denied use of iopl() by "
83775+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
83776+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
83777+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
83778+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
83779+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
83780+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
83781+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
83782+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
83783+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
83784+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
83785+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
83786+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
83787+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
83788+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
83789+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
83790+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
83791+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
83792+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
83793+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
83794+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
83795+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
83796+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
83797+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
83798+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
83799+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
83800+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
83801+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
83802+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
83803+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
83804+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
83805+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
83806+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
83807+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
83808+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
83809+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
83810+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
83811+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
83812+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
83813+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
83814+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
83815+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
83816+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
83817+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
83818+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
83819+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
83820+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
83821+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
83822+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
83823+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
83824+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
83825+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
83826+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
83827+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
83828+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
83829+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
83830+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
83831+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
83832+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
83833+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
83834+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
83835+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
83836+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
83837+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
83838+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
83839+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
83840+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
83841+#define GR_FAILFORK_MSG "failed fork with errno %s by "
83842+#define GR_NICE_CHROOT_MSG "denied priority change by "
83843+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
83844+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
83845+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
83846+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
83847+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
83848+#define GR_TIME_MSG "time set by "
83849+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
83850+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
83851+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
83852+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
83853+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
83854+#define GR_BIND_MSG "denied bind() by "
83855+#define GR_CONNECT_MSG "denied connect() by "
83856+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
83857+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
83858+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
83859+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
83860+#define GR_CAP_ACL_MSG "use of %s denied for "
83861+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
83862+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
83863+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
83864+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
83865+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
83866+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
83867+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
83868+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
83869+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
83870+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
83871+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
83872+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
83873+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
83874+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
83875+#define GR_VM86_MSG "denied use of vm86 by "
83876+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
83877+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
83878+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
83879+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
83880+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
83881+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
83882+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
83883+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
83884+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
83885diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
83886new file mode 100644
83887index 0000000..63c1850
83888--- /dev/null
83889+++ b/include/linux/grsecurity.h
83890@@ -0,0 +1,250 @@
83891+#ifndef GR_SECURITY_H
83892+#define GR_SECURITY_H
83893+#include <linux/fs.h>
83894+#include <linux/fs_struct.h>
83895+#include <linux/binfmts.h>
83896+#include <linux/gracl.h>
83897+
83898+/* notify of brain-dead configs */
83899+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83900+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
83901+#endif
83902+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83903+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
83904+#endif
83905+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
83906+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
83907+#endif
83908+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
83909+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
83910+#endif
83911+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
83912+#error "CONFIG_PAX enabled, but no PaX options are enabled."
83913+#endif
83914+
83915+int gr_handle_new_usb(void);
83916+
83917+void gr_handle_brute_attach(int dumpable);
83918+void gr_handle_brute_check(void);
83919+void gr_handle_kernel_exploit(void);
83920+
83921+char gr_roletype_to_char(void);
83922+
83923+int gr_proc_is_restricted(void);
83924+
83925+int gr_acl_enable_at_secure(void);
83926+
83927+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
83928+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
83929+
83930+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
83931+
83932+void gr_del_task_from_ip_table(struct task_struct *p);
83933+
83934+int gr_pid_is_chrooted(struct task_struct *p);
83935+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
83936+int gr_handle_chroot_nice(void);
83937+int gr_handle_chroot_sysctl(const int op);
83938+int gr_handle_chroot_setpriority(struct task_struct *p,
83939+ const int niceval);
83940+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
83941+int gr_chroot_fhandle(void);
83942+int gr_handle_chroot_chroot(const struct dentry *dentry,
83943+ const struct vfsmount *mnt);
83944+void gr_handle_chroot_chdir(const struct path *path);
83945+int gr_handle_chroot_chmod(const struct dentry *dentry,
83946+ const struct vfsmount *mnt, const int mode);
83947+int gr_handle_chroot_mknod(const struct dentry *dentry,
83948+ const struct vfsmount *mnt, const int mode);
83949+int gr_handle_chroot_mount(const struct dentry *dentry,
83950+ const struct vfsmount *mnt,
83951+ const char *dev_name);
83952+int gr_handle_chroot_pivot(void);
83953+int gr_handle_chroot_unix(const pid_t pid);
83954+
83955+int gr_handle_rawio(const struct inode *inode);
83956+
83957+void gr_handle_ioperm(void);
83958+void gr_handle_iopl(void);
83959+void gr_handle_msr_write(void);
83960+
83961+umode_t gr_acl_umask(void);
83962+
83963+int gr_tpe_allow(const struct file *file);
83964+
83965+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
83966+void gr_clear_chroot_entries(struct task_struct *task);
83967+
83968+void gr_log_forkfail(const int retval);
83969+void gr_log_timechange(void);
83970+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
83971+void gr_log_chdir(const struct dentry *dentry,
83972+ const struct vfsmount *mnt);
83973+void gr_log_chroot_exec(const struct dentry *dentry,
83974+ const struct vfsmount *mnt);
83975+void gr_log_remount(const char *devname, const int retval);
83976+void gr_log_unmount(const char *devname, const int retval);
83977+void gr_log_mount(const char *from, struct path *to, const int retval);
83978+void gr_log_textrel(struct vm_area_struct *vma);
83979+void gr_log_ptgnustack(struct file *file);
83980+void gr_log_rwxmmap(struct file *file);
83981+void gr_log_rwxmprotect(struct vm_area_struct *vma);
83982+
83983+int gr_handle_follow_link(const struct inode *parent,
83984+ const struct inode *inode,
83985+ const struct dentry *dentry,
83986+ const struct vfsmount *mnt);
83987+int gr_handle_fifo(const struct dentry *dentry,
83988+ const struct vfsmount *mnt,
83989+ const struct dentry *dir, const int flag,
83990+ const int acc_mode);
83991+int gr_handle_hardlink(const struct dentry *dentry,
83992+ const struct vfsmount *mnt,
83993+ struct inode *inode,
83994+ const int mode, const struct filename *to);
83995+
83996+int gr_is_capable(const int cap);
83997+int gr_is_capable_nolog(const int cap);
83998+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
83999+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
84000+
84001+void gr_copy_label(struct task_struct *tsk);
84002+void gr_handle_crash(struct task_struct *task, const int sig);
84003+int gr_handle_signal(const struct task_struct *p, const int sig);
84004+int gr_check_crash_uid(const kuid_t uid);
84005+int gr_check_protected_task(const struct task_struct *task);
84006+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
84007+int gr_acl_handle_mmap(const struct file *file,
84008+ const unsigned long prot);
84009+int gr_acl_handle_mprotect(const struct file *file,
84010+ const unsigned long prot);
84011+int gr_check_hidden_task(const struct task_struct *tsk);
84012+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
84013+ const struct vfsmount *mnt);
84014+__u32 gr_acl_handle_utime(const struct dentry *dentry,
84015+ const struct vfsmount *mnt);
84016+__u32 gr_acl_handle_access(const struct dentry *dentry,
84017+ const struct vfsmount *mnt, const int fmode);
84018+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
84019+ const struct vfsmount *mnt, umode_t *mode);
84020+__u32 gr_acl_handle_chown(const struct dentry *dentry,
84021+ const struct vfsmount *mnt);
84022+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
84023+ const struct vfsmount *mnt);
84024+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
84025+ const struct vfsmount *mnt);
84026+int gr_handle_ptrace(struct task_struct *task, const long request);
84027+int gr_handle_proc_ptrace(struct task_struct *task);
84028+__u32 gr_acl_handle_execve(const struct dentry *dentry,
84029+ const struct vfsmount *mnt);
84030+int gr_check_crash_exec(const struct file *filp);
84031+int gr_acl_is_enabled(void);
84032+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
84033+ const kgid_t gid);
84034+int gr_set_proc_label(const struct dentry *dentry,
84035+ const struct vfsmount *mnt,
84036+ const int unsafe_flags);
84037+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
84038+ const struct vfsmount *mnt);
84039+__u32 gr_acl_handle_open(const struct dentry *dentry,
84040+ const struct vfsmount *mnt, int acc_mode);
84041+__u32 gr_acl_handle_creat(const struct dentry *dentry,
84042+ const struct dentry *p_dentry,
84043+ const struct vfsmount *p_mnt,
84044+ int open_flags, int acc_mode, const int imode);
84045+void gr_handle_create(const struct dentry *dentry,
84046+ const struct vfsmount *mnt);
84047+void gr_handle_proc_create(const struct dentry *dentry,
84048+ const struct inode *inode);
84049+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
84050+ const struct dentry *parent_dentry,
84051+ const struct vfsmount *parent_mnt,
84052+ const int mode);
84053+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
84054+ const struct dentry *parent_dentry,
84055+ const struct vfsmount *parent_mnt);
84056+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
84057+ const struct vfsmount *mnt);
84058+void gr_handle_delete(const u64 ino, const dev_t dev);
84059+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
84060+ const struct vfsmount *mnt);
84061+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
84062+ const struct dentry *parent_dentry,
84063+ const struct vfsmount *parent_mnt,
84064+ const struct filename *from);
84065+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
84066+ const struct dentry *parent_dentry,
84067+ const struct vfsmount *parent_mnt,
84068+ const struct dentry *old_dentry,
84069+ const struct vfsmount *old_mnt, const struct filename *to);
84070+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
84071+int gr_acl_handle_rename(struct dentry *new_dentry,
84072+ struct dentry *parent_dentry,
84073+ const struct vfsmount *parent_mnt,
84074+ struct dentry *old_dentry,
84075+ struct inode *old_parent_inode,
84076+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
84077+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
84078+ struct dentry *old_dentry,
84079+ struct dentry *new_dentry,
84080+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
84081+__u32 gr_check_link(const struct dentry *new_dentry,
84082+ const struct dentry *parent_dentry,
84083+ const struct vfsmount *parent_mnt,
84084+ const struct dentry *old_dentry,
84085+ const struct vfsmount *old_mnt);
84086+int gr_acl_handle_filldir(const struct file *file, const char *name,
84087+ const unsigned int namelen, const u64 ino);
84088+
84089+__u32 gr_acl_handle_unix(const struct dentry *dentry,
84090+ const struct vfsmount *mnt);
84091+void gr_acl_handle_exit(void);
84092+void gr_acl_handle_psacct(struct task_struct *task, const long code);
84093+int gr_acl_handle_procpidmem(const struct task_struct *task);
84094+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
84095+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
84096+void gr_audit_ptrace(struct task_struct *task);
84097+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
84098+u64 gr_get_ino_from_dentry(struct dentry *dentry);
84099+void gr_put_exec_file(struct task_struct *task);
84100+
84101+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
84102+
84103+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
84104+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
84105+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
84106+ struct dentry *newdentry, struct vfsmount *newmnt);
84107+
84108+#ifdef CONFIG_GRKERNSEC_RESLOG
84109+extern void gr_log_resource(const struct task_struct *task, const int res,
84110+ const unsigned long wanted, const int gt);
84111+#else
84112+static inline void gr_log_resource(const struct task_struct *task, const int res,
84113+ const unsigned long wanted, const int gt)
84114+{
84115+}
84116+#endif
84117+
84118+#ifdef CONFIG_GRKERNSEC
84119+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
84120+void gr_handle_vm86(void);
84121+void gr_handle_mem_readwrite(u64 from, u64 to);
84122+
84123+void gr_log_badprocpid(const char *entry);
84124+
84125+extern int grsec_enable_dmesg;
84126+extern int grsec_disable_privio;
84127+
84128+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
84129+extern kgid_t grsec_proc_gid;
84130+#endif
84131+
84132+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
84133+extern int grsec_enable_chroot_findtask;
84134+#endif
84135+#ifdef CONFIG_GRKERNSEC_SETXID
84136+extern int grsec_enable_setxid;
84137+#endif
84138+#endif
84139+
84140+#endif
84141diff --git a/include/linux/grsock.h b/include/linux/grsock.h
84142new file mode 100644
84143index 0000000..e7ffaaf
84144--- /dev/null
84145+++ b/include/linux/grsock.h
84146@@ -0,0 +1,19 @@
84147+#ifndef __GRSOCK_H
84148+#define __GRSOCK_H
84149+
84150+extern void gr_attach_curr_ip(const struct sock *sk);
84151+extern int gr_handle_sock_all(const int family, const int type,
84152+ const int protocol);
84153+extern int gr_handle_sock_server(const struct sockaddr *sck);
84154+extern int gr_handle_sock_server_other(const struct sock *sck);
84155+extern int gr_handle_sock_client(const struct sockaddr *sck);
84156+extern int gr_search_connect(struct socket * sock,
84157+ struct sockaddr_in * addr);
84158+extern int gr_search_bind(struct socket * sock,
84159+ struct sockaddr_in * addr);
84160+extern int gr_search_listen(struct socket * sock);
84161+extern int gr_search_accept(struct socket * sock);
84162+extern int gr_search_socket(const int domain, const int type,
84163+ const int protocol);
84164+
84165+#endif
84166diff --git a/include/linux/highmem.h b/include/linux/highmem.h
84167index 9286a46..373f27f 100644
84168--- a/include/linux/highmem.h
84169+++ b/include/linux/highmem.h
84170@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
84171 kunmap_atomic(kaddr);
84172 }
84173
84174+static inline void sanitize_highpage(struct page *page)
84175+{
84176+ void *kaddr;
84177+ unsigned long flags;
84178+
84179+ local_irq_save(flags);
84180+ kaddr = kmap_atomic(page);
84181+ clear_page(kaddr);
84182+ kunmap_atomic(kaddr);
84183+ local_irq_restore(flags);
84184+}
84185+
84186 static inline void zero_user_segments(struct page *page,
84187 unsigned start1, unsigned end1,
84188 unsigned start2, unsigned end2)
84189diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
84190index 1c7b89a..7dda400 100644
84191--- a/include/linux/hwmon-sysfs.h
84192+++ b/include/linux/hwmon-sysfs.h
84193@@ -25,7 +25,8 @@
84194 struct sensor_device_attribute{
84195 struct device_attribute dev_attr;
84196 int index;
84197-};
84198+} __do_const;
84199+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
84200 #define to_sensor_dev_attr(_dev_attr) \
84201 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
84202
84203@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
84204 struct device_attribute dev_attr;
84205 u8 index;
84206 u8 nr;
84207-};
84208+} __do_const;
84209+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
84210 #define to_sensor_dev_attr_2(_dev_attr) \
84211 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
84212
84213diff --git a/include/linux/i2c.h b/include/linux/i2c.h
84214index f17da50..2f8b203 100644
84215--- a/include/linux/i2c.h
84216+++ b/include/linux/i2c.h
84217@@ -409,6 +409,7 @@ struct i2c_algorithm {
84218 int (*unreg_slave)(struct i2c_client *client);
84219 #endif
84220 };
84221+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
84222
84223 /**
84224 * struct i2c_bus_recovery_info - I2C bus recovery information
84225diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
84226index aff7ad8..3942bbd 100644
84227--- a/include/linux/if_pppox.h
84228+++ b/include/linux/if_pppox.h
84229@@ -76,7 +76,7 @@ struct pppox_proto {
84230 int (*ioctl)(struct socket *sock, unsigned int cmd,
84231 unsigned long arg);
84232 struct module *owner;
84233-};
84234+} __do_const;
84235
84236 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
84237 extern void unregister_pppox_proto(int proto_num);
84238diff --git a/include/linux/init.h b/include/linux/init.h
84239index 2df8e8d..3e1280d 100644
84240--- a/include/linux/init.h
84241+++ b/include/linux/init.h
84242@@ -37,9 +37,17 @@
84243 * section.
84244 */
84245
84246+#define add_init_latent_entropy __latent_entropy
84247+
84248+#ifdef CONFIG_MEMORY_HOTPLUG
84249+#define add_meminit_latent_entropy
84250+#else
84251+#define add_meminit_latent_entropy __latent_entropy
84252+#endif
84253+
84254 /* These are for everybody (although not all archs will actually
84255 discard it in modules) */
84256-#define __init __section(.init.text) __cold notrace
84257+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
84258 #define __initdata __section(.init.data)
84259 #define __initconst __constsection(.init.rodata)
84260 #define __exitdata __section(.exit.data)
84261@@ -100,7 +108,7 @@
84262 #define __cpuexitconst
84263
84264 /* Used for MEMORY_HOTPLUG */
84265-#define __meminit __section(.meminit.text) __cold notrace
84266+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
84267 #define __meminitdata __section(.meminit.data)
84268 #define __meminitconst __constsection(.meminit.rodata)
84269 #define __memexit __section(.memexit.text) __exitused __cold notrace
84270diff --git a/include/linux/init_task.h b/include/linux/init_task.h
84271index 696d223..6d6b39f 100644
84272--- a/include/linux/init_task.h
84273+++ b/include/linux/init_task.h
84274@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
84275
84276 #define INIT_TASK_COMM "swapper"
84277
84278+#ifdef CONFIG_X86
84279+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
84280+#else
84281+#define INIT_TASK_THREAD_INFO
84282+#endif
84283+
84284 #ifdef CONFIG_RT_MUTEXES
84285 # define INIT_RT_MUTEXES(tsk) \
84286 .pi_waiters = RB_ROOT, \
84287@@ -224,6 +230,7 @@ extern struct task_group root_task_group;
84288 RCU_POINTER_INITIALIZER(cred, &init_cred), \
84289 .comm = INIT_TASK_COMM, \
84290 .thread = INIT_THREAD, \
84291+ INIT_TASK_THREAD_INFO \
84292 .fs = &init_fs, \
84293 .files = &init_files, \
84294 .signal = &init_signals, \
84295diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
84296index 2e88580..f6a99a0 100644
84297--- a/include/linux/interrupt.h
84298+++ b/include/linux/interrupt.h
84299@@ -420,8 +420,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
84300
84301 struct softirq_action
84302 {
84303- void (*action)(struct softirq_action *);
84304-};
84305+ void (*action)(void);
84306+} __no_const;
84307
84308 asmlinkage void do_softirq(void);
84309 asmlinkage void __do_softirq(void);
84310@@ -435,7 +435,7 @@ static inline void do_softirq_own_stack(void)
84311 }
84312 #endif
84313
84314-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
84315+extern void open_softirq(int nr, void (*action)(void));
84316 extern void softirq_init(void);
84317 extern void __raise_softirq_irqoff(unsigned int nr);
84318
84319diff --git a/include/linux/iommu.h b/include/linux/iommu.h
84320index 38daa45..4de4317 100644
84321--- a/include/linux/iommu.h
84322+++ b/include/linux/iommu.h
84323@@ -147,7 +147,7 @@ struct iommu_ops {
84324
84325 unsigned long pgsize_bitmap;
84326 void *priv;
84327-};
84328+} __do_const;
84329
84330 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
84331 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
84332diff --git a/include/linux/ioport.h b/include/linux/ioport.h
84333index 2c525022..345b106 100644
84334--- a/include/linux/ioport.h
84335+++ b/include/linux/ioport.h
84336@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
84337 int adjust_resource(struct resource *res, resource_size_t start,
84338 resource_size_t size);
84339 resource_size_t resource_alignment(struct resource *res);
84340-static inline resource_size_t resource_size(const struct resource *res)
84341+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
84342 {
84343 return res->end - res->start + 1;
84344 }
84345diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
84346index 1eee6bc..9cf4912 100644
84347--- a/include/linux/ipc_namespace.h
84348+++ b/include/linux/ipc_namespace.h
84349@@ -60,7 +60,7 @@ struct ipc_namespace {
84350 struct user_namespace *user_ns;
84351
84352 struct ns_common ns;
84353-};
84354+} __randomize_layout;
84355
84356 extern struct ipc_namespace init_ipc_ns;
84357 extern atomic_t nr_ipc_ns;
84358diff --git a/include/linux/irq.h b/include/linux/irq.h
84359index d09ec7a..f373eb5 100644
84360--- a/include/linux/irq.h
84361+++ b/include/linux/irq.h
84362@@ -364,7 +364,8 @@ struct irq_chip {
84363 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
84364
84365 unsigned long flags;
84366-};
84367+} __do_const;
84368+typedef struct irq_chip __no_const irq_chip_no_const;
84369
84370 /*
84371 * irq_chip specific flags
84372diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
84373index 71d706d..817cdec 100644
84374--- a/include/linux/irqchip/arm-gic.h
84375+++ b/include/linux/irqchip/arm-gic.h
84376@@ -95,7 +95,7 @@
84377
84378 struct device_node;
84379
84380-extern struct irq_chip gic_arch_extn;
84381+extern irq_chip_no_const gic_arch_extn;
84382
84383 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
84384 u32 offset, struct device_node *);
84385diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
84386index dd1109f..4f4fdda 100644
84387--- a/include/linux/irqdesc.h
84388+++ b/include/linux/irqdesc.h
84389@@ -61,7 +61,7 @@ struct irq_desc {
84390 unsigned int irq_count; /* For detecting broken IRQs */
84391 unsigned long last_unhandled; /* Aging timer for unhandled count */
84392 unsigned int irqs_unhandled;
84393- atomic_t threads_handled;
84394+ atomic_unchecked_t threads_handled;
84395 int threads_handled_last;
84396 raw_spinlock_t lock;
84397 struct cpumask *percpu_enabled;
84398diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
84399index 676d730..5e05daec 100644
84400--- a/include/linux/irqdomain.h
84401+++ b/include/linux/irqdomain.h
84402@@ -40,6 +40,7 @@ struct device_node;
84403 struct irq_domain;
84404 struct of_device_id;
84405 struct irq_chip;
84406+struct irq_chip_no_const;
84407 struct irq_data;
84408
84409 /* Number of irqs reserved for a legacy isa controller */
84410diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
84411index c367cbd..c9b79e6 100644
84412--- a/include/linux/jiffies.h
84413+++ b/include/linux/jiffies.h
84414@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
84415 /*
84416 * Convert various time units to each other:
84417 */
84418-extern unsigned int jiffies_to_msecs(const unsigned long j);
84419-extern unsigned int jiffies_to_usecs(const unsigned long j);
84420+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
84421+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
84422
84423-static inline u64 jiffies_to_nsecs(const unsigned long j)
84424+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
84425 {
84426 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
84427 }
84428
84429-extern unsigned long msecs_to_jiffies(const unsigned int m);
84430-extern unsigned long usecs_to_jiffies(const unsigned int u);
84431+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
84432+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
84433 extern unsigned long timespec_to_jiffies(const struct timespec *value);
84434 extern void jiffies_to_timespec(const unsigned long jiffies,
84435- struct timespec *value);
84436-extern unsigned long timeval_to_jiffies(const struct timeval *value);
84437+ struct timespec *value) __intentional_overflow(-1);
84438+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
84439 extern void jiffies_to_timeval(const unsigned long jiffies,
84440 struct timeval *value);
84441
84442diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
84443index 6883e19..e854fcb 100644
84444--- a/include/linux/kallsyms.h
84445+++ b/include/linux/kallsyms.h
84446@@ -15,7 +15,8 @@
84447
84448 struct module;
84449
84450-#ifdef CONFIG_KALLSYMS
84451+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
84452+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
84453 /* Lookup the address for a symbol. Returns 0 if not found. */
84454 unsigned long kallsyms_lookup_name(const char *name);
84455
84456@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
84457 /* Stupid that this does nothing, but I didn't create this mess. */
84458 #define __print_symbol(fmt, addr)
84459 #endif /*CONFIG_KALLSYMS*/
84460+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
84461+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
84462+extern unsigned long kallsyms_lookup_name(const char *name);
84463+extern void __print_symbol(const char *fmt, unsigned long address);
84464+extern int sprint_backtrace(char *buffer, unsigned long address);
84465+extern int sprint_symbol(char *buffer, unsigned long address);
84466+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
84467+const char *kallsyms_lookup(unsigned long addr,
84468+ unsigned long *symbolsize,
84469+ unsigned long *offset,
84470+ char **modname, char *namebuf);
84471+extern int kallsyms_lookup_size_offset(unsigned long addr,
84472+ unsigned long *symbolsize,
84473+ unsigned long *offset);
84474+#endif
84475
84476 /* This macro allows us to keep printk typechecking */
84477 static __printf(1, 2)
84478diff --git a/include/linux/kernel.h b/include/linux/kernel.h
84479index d6d630d..feea1f5 100644
84480--- a/include/linux/kernel.h
84481+++ b/include/linux/kernel.h
84482@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
84483 /* Obsolete, do not use. Use kstrto<foo> instead */
84484
84485 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
84486-extern long simple_strtol(const char *,char **,unsigned int);
84487+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
84488 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
84489 extern long long simple_strtoll(const char *,char **,unsigned int);
84490
84491diff --git a/include/linux/key-type.h b/include/linux/key-type.h
84492index ff9f1d3..6712be5 100644
84493--- a/include/linux/key-type.h
84494+++ b/include/linux/key-type.h
84495@@ -152,7 +152,7 @@ struct key_type {
84496 /* internal fields */
84497 struct list_head link; /* link in types list */
84498 struct lock_class_key lock_class; /* key->sem lock class */
84499-};
84500+} __do_const;
84501
84502 extern struct key_type key_type_keyring;
84503
84504diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
84505index e465bb1..19f605fd 100644
84506--- a/include/linux/kgdb.h
84507+++ b/include/linux/kgdb.h
84508@@ -52,7 +52,7 @@ extern int kgdb_connected;
84509 extern int kgdb_io_module_registered;
84510
84511 extern atomic_t kgdb_setting_breakpoint;
84512-extern atomic_t kgdb_cpu_doing_single_step;
84513+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
84514
84515 extern struct task_struct *kgdb_usethread;
84516 extern struct task_struct *kgdb_contthread;
84517@@ -254,7 +254,7 @@ struct kgdb_arch {
84518 void (*correct_hw_break)(void);
84519
84520 void (*enable_nmi)(bool on);
84521-};
84522+} __do_const;
84523
84524 /**
84525 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
84526@@ -279,7 +279,7 @@ struct kgdb_io {
84527 void (*pre_exception) (void);
84528 void (*post_exception) (void);
84529 int is_console;
84530-};
84531+} __do_const;
84532
84533 extern struct kgdb_arch arch_kgdb_ops;
84534
84535diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
84536index e705467..a92471d 100644
84537--- a/include/linux/kmemleak.h
84538+++ b/include/linux/kmemleak.h
84539@@ -27,7 +27,7 @@
84540
84541 extern void kmemleak_init(void) __ref;
84542 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
84543- gfp_t gfp) __ref;
84544+ gfp_t gfp) __ref __size_overflow(2);
84545 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
84546 extern void kmemleak_free(const void *ptr) __ref;
84547 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
84548@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
84549 static inline void kmemleak_init(void)
84550 {
84551 }
84552-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
84553+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
84554 gfp_t gfp)
84555 {
84556 }
84557diff --git a/include/linux/kmod.h b/include/linux/kmod.h
84558index 0555cc6..40116ce 100644
84559--- a/include/linux/kmod.h
84560+++ b/include/linux/kmod.h
84561@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
84562 * usually useless though. */
84563 extern __printf(2, 3)
84564 int __request_module(bool wait, const char *name, ...);
84565+extern __printf(3, 4)
84566+int ___request_module(bool wait, char *param_name, const char *name, ...);
84567 #define request_module(mod...) __request_module(true, mod)
84568 #define request_module_nowait(mod...) __request_module(false, mod)
84569 #define try_then_request_module(x, mod...) \
84570@@ -57,6 +59,9 @@ struct subprocess_info {
84571 struct work_struct work;
84572 struct completion *complete;
84573 char *path;
84574+#ifdef CONFIG_GRKERNSEC
84575+ char *origpath;
84576+#endif
84577 char **argv;
84578 char **envp;
84579 int wait;
84580diff --git a/include/linux/kobject.h b/include/linux/kobject.h
84581index 2d61b90..a1d0a13 100644
84582--- a/include/linux/kobject.h
84583+++ b/include/linux/kobject.h
84584@@ -118,7 +118,7 @@ struct kobj_type {
84585 struct attribute **default_attrs;
84586 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
84587 const void *(*namespace)(struct kobject *kobj);
84588-};
84589+} __do_const;
84590
84591 struct kobj_uevent_env {
84592 char *argv[3];
84593@@ -142,6 +142,7 @@ struct kobj_attribute {
84594 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
84595 const char *buf, size_t count);
84596 };
84597+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
84598
84599 extern const struct sysfs_ops kobj_sysfs_ops;
84600
84601@@ -169,7 +170,7 @@ struct kset {
84602 spinlock_t list_lock;
84603 struct kobject kobj;
84604 const struct kset_uevent_ops *uevent_ops;
84605-};
84606+} __randomize_layout;
84607
84608 extern void kset_init(struct kset *kset);
84609 extern int __must_check kset_register(struct kset *kset);
84610diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
84611index df32d25..fb52e27 100644
84612--- a/include/linux/kobject_ns.h
84613+++ b/include/linux/kobject_ns.h
84614@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
84615 const void *(*netlink_ns)(struct sock *sk);
84616 const void *(*initial_ns)(void);
84617 void (*drop_ns)(void *);
84618-};
84619+} __do_const;
84620
84621 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
84622 int kobj_ns_type_registered(enum kobj_ns_type type);
84623diff --git a/include/linux/kref.h b/include/linux/kref.h
84624index 484604d..0f6c5b6 100644
84625--- a/include/linux/kref.h
84626+++ b/include/linux/kref.h
84627@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
84628 static inline int kref_sub(struct kref *kref, unsigned int count,
84629 void (*release)(struct kref *kref))
84630 {
84631- WARN_ON(release == NULL);
84632+ BUG_ON(release == NULL);
84633
84634 if (atomic_sub_and_test((int) count, &kref->refcount)) {
84635 release(kref);
84636diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
84637index d12b210..d91fd76 100644
84638--- a/include/linux/kvm_host.h
84639+++ b/include/linux/kvm_host.h
84640@@ -455,7 +455,7 @@ static inline void kvm_irqfd_exit(void)
84641 {
84642 }
84643 #endif
84644-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
84645+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
84646 struct module *module);
84647 void kvm_exit(void);
84648
84649@@ -633,7 +633,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
84650 struct kvm_guest_debug *dbg);
84651 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
84652
84653-int kvm_arch_init(void *opaque);
84654+int kvm_arch_init(const void *opaque);
84655 void kvm_arch_exit(void);
84656
84657 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
84658diff --git a/include/linux/libata.h b/include/linux/libata.h
84659index 6b08cc1..248c5e9 100644
84660--- a/include/linux/libata.h
84661+++ b/include/linux/libata.h
84662@@ -980,7 +980,7 @@ struct ata_port_operations {
84663 * fields must be pointers.
84664 */
84665 const struct ata_port_operations *inherits;
84666-};
84667+} __do_const;
84668
84669 struct ata_port_info {
84670 unsigned long flags;
84671diff --git a/include/linux/linkage.h b/include/linux/linkage.h
84672index a6a42dd..6c5ebce 100644
84673--- a/include/linux/linkage.h
84674+++ b/include/linux/linkage.h
84675@@ -36,6 +36,7 @@
84676 #endif
84677
84678 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
84679+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
84680 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
84681
84682 /*
84683diff --git a/include/linux/list.h b/include/linux/list.h
84684index feb773c..98f3075 100644
84685--- a/include/linux/list.h
84686+++ b/include/linux/list.h
84687@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
84688 extern void list_del(struct list_head *entry);
84689 #endif
84690
84691+extern void __pax_list_add(struct list_head *new,
84692+ struct list_head *prev,
84693+ struct list_head *next);
84694+static inline void pax_list_add(struct list_head *new, struct list_head *head)
84695+{
84696+ __pax_list_add(new, head, head->next);
84697+}
84698+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
84699+{
84700+ __pax_list_add(new, head->prev, head);
84701+}
84702+extern void pax_list_del(struct list_head *entry);
84703+
84704 /**
84705 * list_replace - replace old entry by new one
84706 * @old : the element to be replaced
84707@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
84708 INIT_LIST_HEAD(entry);
84709 }
84710
84711+extern void pax_list_del_init(struct list_head *entry);
84712+
84713 /**
84714 * list_move - delete from one list and add as another's head
84715 * @list: the entry to move
84716diff --git a/include/linux/lockref.h b/include/linux/lockref.h
84717index b10b122..d37b3de 100644
84718--- a/include/linux/lockref.h
84719+++ b/include/linux/lockref.h
84720@@ -28,7 +28,7 @@ struct lockref {
84721 #endif
84722 struct {
84723 spinlock_t lock;
84724- int count;
84725+ atomic_t count;
84726 };
84727 };
84728 };
84729@@ -43,9 +43,29 @@ extern void lockref_mark_dead(struct lockref *);
84730 extern int lockref_get_not_dead(struct lockref *);
84731
84732 /* Must be called under spinlock for reliable results */
84733-static inline int __lockref_is_dead(const struct lockref *l)
84734+static inline int __lockref_is_dead(const struct lockref *lockref)
84735 {
84736- return ((int)l->count < 0);
84737+ return atomic_read(&lockref->count) < 0;
84738+}
84739+
84740+static inline int __lockref_read(const struct lockref *lockref)
84741+{
84742+ return atomic_read(&lockref->count);
84743+}
84744+
84745+static inline void __lockref_set(struct lockref *lockref, int count)
84746+{
84747+ atomic_set(&lockref->count, count);
84748+}
84749+
84750+static inline void __lockref_inc(struct lockref *lockref)
84751+{
84752+ atomic_inc(&lockref->count);
84753+}
84754+
84755+static inline void __lockref_dec(struct lockref *lockref)
84756+{
84757+ atomic_dec(&lockref->count);
84758 }
84759
84760 #endif /* __LINUX_LOCKREF_H */
84761diff --git a/include/linux/math64.h b/include/linux/math64.h
84762index c45c089..298841c 100644
84763--- a/include/linux/math64.h
84764+++ b/include/linux/math64.h
84765@@ -15,7 +15,7 @@
84766 * This is commonly provided by 32bit archs to provide an optimized 64bit
84767 * divide.
84768 */
84769-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84770+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84771 {
84772 *remainder = dividend % divisor;
84773 return dividend / divisor;
84774@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
84775 /**
84776 * div64_u64 - unsigned 64bit divide with 64bit divisor
84777 */
84778-static inline u64 div64_u64(u64 dividend, u64 divisor)
84779+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
84780 {
84781 return dividend / divisor;
84782 }
84783@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
84784 #define div64_ul(x, y) div_u64((x), (y))
84785
84786 #ifndef div_u64_rem
84787-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84788+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84789 {
84790 *remainder = do_div(dividend, divisor);
84791 return dividend;
84792@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
84793 #endif
84794
84795 #ifndef div64_u64
84796-extern u64 div64_u64(u64 dividend, u64 divisor);
84797+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
84798 #endif
84799
84800 #ifndef div64_s64
84801@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
84802 * divide.
84803 */
84804 #ifndef div_u64
84805-static inline u64 div_u64(u64 dividend, u32 divisor)
84806+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
84807 {
84808 u32 remainder;
84809 return div_u64_rem(dividend, divisor, &remainder);
84810diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
84811index 3d385c8..deacb6a 100644
84812--- a/include/linux/mempolicy.h
84813+++ b/include/linux/mempolicy.h
84814@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
84815 }
84816
84817 #define vma_policy(vma) ((vma)->vm_policy)
84818+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
84819+{
84820+ vma->vm_policy = pol;
84821+}
84822
84823 static inline void mpol_get(struct mempolicy *pol)
84824 {
84825@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
84826 }
84827
84828 #define vma_policy(vma) NULL
84829+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
84830+{
84831+}
84832
84833 static inline int
84834 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
84835diff --git a/include/linux/mm.h b/include/linux/mm.h
84836index 47a9392..ef645bc 100644
84837--- a/include/linux/mm.h
84838+++ b/include/linux/mm.h
84839@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
84840
84841 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
84842 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
84843+
84844+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
84845+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
84846+#endif
84847+
84848 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
84849 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
84850 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
84851@@ -254,8 +259,8 @@ struct vm_operations_struct {
84852 /* called by access_process_vm when get_user_pages() fails, typically
84853 * for use by special VMAs that can switch between memory and hardware
84854 */
84855- int (*access)(struct vm_area_struct *vma, unsigned long addr,
84856- void *buf, int len, int write);
84857+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
84858+ void *buf, size_t len, int write);
84859
84860 /* Called by the /proc/PID/maps code to ask the vma whether it
84861 * has a special name. Returning non-NULL will also cause this
84862@@ -293,6 +298,7 @@ struct vm_operations_struct {
84863 struct page *(*find_special_page)(struct vm_area_struct *vma,
84864 unsigned long addr);
84865 };
84866+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
84867
84868 struct mmu_gather;
84869 struct inode;
84870@@ -1213,8 +1219,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
84871 unsigned long *pfn);
84872 int follow_phys(struct vm_area_struct *vma, unsigned long address,
84873 unsigned int flags, unsigned long *prot, resource_size_t *phys);
84874-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
84875- void *buf, int len, int write);
84876+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
84877+ void *buf, size_t len, int write);
84878
84879 static inline void unmap_shared_mapping_range(struct address_space *mapping,
84880 loff_t const holebegin, loff_t const holelen)
84881@@ -1254,9 +1260,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
84882 }
84883 #endif
84884
84885-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
84886-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
84887- void *buf, int len, int write);
84888+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
84889+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
84890+ void *buf, size_t len, int write);
84891
84892 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
84893 unsigned long start, unsigned long nr_pages,
84894@@ -1299,34 +1305,6 @@ int set_page_dirty_lock(struct page *page);
84895 int clear_page_dirty_for_io(struct page *page);
84896 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
84897
84898-/* Is the vma a continuation of the stack vma above it? */
84899-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
84900-{
84901- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
84902-}
84903-
84904-static inline int stack_guard_page_start(struct vm_area_struct *vma,
84905- unsigned long addr)
84906-{
84907- return (vma->vm_flags & VM_GROWSDOWN) &&
84908- (vma->vm_start == addr) &&
84909- !vma_growsdown(vma->vm_prev, addr);
84910-}
84911-
84912-/* Is the vma a continuation of the stack vma below it? */
84913-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
84914-{
84915- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
84916-}
84917-
84918-static inline int stack_guard_page_end(struct vm_area_struct *vma,
84919- unsigned long addr)
84920-{
84921- return (vma->vm_flags & VM_GROWSUP) &&
84922- (vma->vm_end == addr) &&
84923- !vma_growsup(vma->vm_next, addr);
84924-}
84925-
84926 extern struct task_struct *task_of_stack(struct task_struct *task,
84927 struct vm_area_struct *vma, bool in_group);
84928
84929@@ -1449,8 +1427,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
84930 {
84931 return 0;
84932 }
84933+
84934+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
84935+ unsigned long address)
84936+{
84937+ return 0;
84938+}
84939 #else
84940 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
84941+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
84942 #endif
84943
84944 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
84945@@ -1460,6 +1445,12 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
84946 return 0;
84947 }
84948
84949+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
84950+ unsigned long address)
84951+{
84952+ return 0;
84953+}
84954+
84955 static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
84956
84957 static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
84958@@ -1472,6 +1463,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
84959
84960 #else
84961 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
84962+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
84963
84964 static inline void mm_nr_pmds_init(struct mm_struct *mm)
84965 {
84966@@ -1509,11 +1501,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
84967 NULL: pud_offset(pgd, address);
84968 }
84969
84970+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
84971+{
84972+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
84973+ NULL: pud_offset(pgd, address);
84974+}
84975+
84976 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
84977 {
84978 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
84979 NULL: pmd_offset(pud, address);
84980 }
84981+
84982+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
84983+{
84984+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
84985+ NULL: pmd_offset(pud, address);
84986+}
84987 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
84988
84989 #if USE_SPLIT_PTE_PTLOCKS
84990@@ -1890,12 +1894,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
84991 bool *need_rmap_locks);
84992 extern void exit_mmap(struct mm_struct *);
84993
84994+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
84995+extern void gr_learn_resource(const struct task_struct *task, const int res,
84996+ const unsigned long wanted, const int gt);
84997+#else
84998+static inline void gr_learn_resource(const struct task_struct *task, const int res,
84999+ const unsigned long wanted, const int gt)
85000+{
85001+}
85002+#endif
85003+
85004 static inline int check_data_rlimit(unsigned long rlim,
85005 unsigned long new,
85006 unsigned long start,
85007 unsigned long end_data,
85008 unsigned long start_data)
85009 {
85010+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
85011 if (rlim < RLIM_INFINITY) {
85012 if (((new - start) + (end_data - start_data)) > rlim)
85013 return -ENOSPC;
85014@@ -1920,7 +1935,7 @@ extern int install_special_mapping(struct mm_struct *mm,
85015 unsigned long addr, unsigned long len,
85016 unsigned long flags, struct page **pages);
85017
85018-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
85019+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
85020
85021 extern unsigned long mmap_region(struct file *file, unsigned long addr,
85022 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
85023@@ -1928,6 +1943,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
85024 unsigned long len, unsigned long prot, unsigned long flags,
85025 unsigned long pgoff, unsigned long *populate);
85026 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
85027+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
85028
85029 #ifdef CONFIG_MMU
85030 extern int __mm_populate(unsigned long addr, unsigned long len,
85031@@ -1956,10 +1972,11 @@ struct vm_unmapped_area_info {
85032 unsigned long high_limit;
85033 unsigned long align_mask;
85034 unsigned long align_offset;
85035+ unsigned long threadstack_offset;
85036 };
85037
85038-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
85039-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
85040+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
85041+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
85042
85043 /*
85044 * Search for an unmapped address range.
85045@@ -1971,7 +1988,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
85046 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
85047 */
85048 static inline unsigned long
85049-vm_unmapped_area(struct vm_unmapped_area_info *info)
85050+vm_unmapped_area(const struct vm_unmapped_area_info *info)
85051 {
85052 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
85053 return unmapped_area(info);
85054@@ -2033,6 +2050,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
85055 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
85056 struct vm_area_struct **pprev);
85057
85058+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
85059+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
85060+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
85061+
85062 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
85063 NULL if none. Assume start_addr < end_addr. */
85064 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
85065@@ -2062,10 +2083,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
85066 }
85067
85068 #ifdef CONFIG_MMU
85069-pgprot_t vm_get_page_prot(unsigned long vm_flags);
85070+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
85071 void vma_set_page_prot(struct vm_area_struct *vma);
85072 #else
85073-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
85074+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
85075 {
85076 return __pgprot(0);
85077 }
85078@@ -2127,6 +2148,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
85079 static inline void vm_stat_account(struct mm_struct *mm,
85080 unsigned long flags, struct file *file, long pages)
85081 {
85082+
85083+#ifdef CONFIG_PAX_RANDMMAP
85084+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
85085+#endif
85086+
85087 mm->total_vm += pages;
85088 }
85089 #endif /* CONFIG_PROC_FS */
85090@@ -2229,7 +2255,7 @@ extern int unpoison_memory(unsigned long pfn);
85091 extern int sysctl_memory_failure_early_kill;
85092 extern int sysctl_memory_failure_recovery;
85093 extern void shake_page(struct page *p, int access);
85094-extern atomic_long_t num_poisoned_pages;
85095+extern atomic_long_unchecked_t num_poisoned_pages;
85096 extern int soft_offline_page(struct page *page, int flags);
85097
85098 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
85099@@ -2280,5 +2306,11 @@ void __init setup_nr_node_ids(void);
85100 static inline void setup_nr_node_ids(void) {}
85101 #endif
85102
85103+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
85104+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
85105+#else
85106+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
85107+#endif
85108+
85109 #endif /* __KERNEL__ */
85110 #endif /* _LINUX_MM_H */
85111diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
85112index 199a03a..7328440 100644
85113--- a/include/linux/mm_types.h
85114+++ b/include/linux/mm_types.h
85115@@ -313,7 +313,9 @@ struct vm_area_struct {
85116 #ifdef CONFIG_NUMA
85117 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
85118 #endif
85119-};
85120+
85121+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
85122+} __randomize_layout;
85123
85124 struct core_thread {
85125 struct task_struct *task;
85126@@ -464,7 +466,25 @@ struct mm_struct {
85127 /* address of the bounds directory */
85128 void __user *bd_addr;
85129 #endif
85130-};
85131+
85132+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
85133+ unsigned long pax_flags;
85134+#endif
85135+
85136+#ifdef CONFIG_PAX_DLRESOLVE
85137+ unsigned long call_dl_resolve;
85138+#endif
85139+
85140+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
85141+ unsigned long call_syscall;
85142+#endif
85143+
85144+#ifdef CONFIG_PAX_ASLR
85145+ unsigned long delta_mmap; /* randomized offset */
85146+ unsigned long delta_stack; /* randomized offset */
85147+#endif
85148+
85149+} __randomize_layout;
85150
85151 static inline void mm_init_cpumask(struct mm_struct *mm)
85152 {
85153diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
85154index 160448f..7b332b7 100644
85155--- a/include/linux/mmc/core.h
85156+++ b/include/linux/mmc/core.h
85157@@ -79,7 +79,7 @@ struct mmc_command {
85158 #define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
85159
85160 unsigned int retries; /* max number of retries */
85161- unsigned int error; /* command error */
85162+ int error; /* command error */
85163
85164 /*
85165 * Standard errno values are used for errors, but some have specific
85166diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
85167index c5d5278..f0b68c8 100644
85168--- a/include/linux/mmiotrace.h
85169+++ b/include/linux/mmiotrace.h
85170@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
85171 /* Called from ioremap.c */
85172 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
85173 void __iomem *addr);
85174-extern void mmiotrace_iounmap(volatile void __iomem *addr);
85175+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
85176
85177 /* For anyone to insert markers. Remember trailing newline. */
85178 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
85179@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
85180 {
85181 }
85182
85183-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
85184+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
85185 {
85186 }
85187
85188diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
85189index 2782df4..abe756e 100644
85190--- a/include/linux/mmzone.h
85191+++ b/include/linux/mmzone.h
85192@@ -526,7 +526,7 @@ struct zone {
85193
85194 ZONE_PADDING(_pad3_)
85195 /* Zone statistics */
85196- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85197+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85198 } ____cacheline_internodealigned_in_smp;
85199
85200 enum zone_flags {
85201diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
85202index e530533..c9620c7 100644
85203--- a/include/linux/mod_devicetable.h
85204+++ b/include/linux/mod_devicetable.h
85205@@ -139,7 +139,7 @@ struct usb_device_id {
85206 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
85207 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
85208
85209-#define HID_ANY_ID (~0)
85210+#define HID_ANY_ID (~0U)
85211 #define HID_BUS_ANY 0xffff
85212 #define HID_GROUP_ANY 0x0000
85213
85214@@ -470,7 +470,7 @@ struct dmi_system_id {
85215 const char *ident;
85216 struct dmi_strmatch matches[4];
85217 void *driver_data;
85218-};
85219+} __do_const;
85220 /*
85221 * struct dmi_device_id appears during expansion of
85222 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
85223diff --git a/include/linux/module.h b/include/linux/module.h
85224index b03485b..a26974f 100644
85225--- a/include/linux/module.h
85226+++ b/include/linux/module.h
85227@@ -17,9 +17,11 @@
85228 #include <linux/moduleparam.h>
85229 #include <linux/jump_label.h>
85230 #include <linux/export.h>
85231+#include <linux/fs.h>
85232
85233 #include <linux/percpu.h>
85234 #include <asm/module.h>
85235+#include <asm/pgtable.h>
85236
85237 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
85238 #define MODULE_SIG_STRING "~Module signature appended~\n"
85239@@ -42,7 +44,7 @@ struct module_kobject {
85240 struct kobject *drivers_dir;
85241 struct module_param_attrs *mp;
85242 struct completion *kobj_completion;
85243-};
85244+} __randomize_layout;
85245
85246 struct module_attribute {
85247 struct attribute attr;
85248@@ -54,12 +56,13 @@ struct module_attribute {
85249 int (*test)(struct module *);
85250 void (*free)(struct module *);
85251 };
85252+typedef struct module_attribute __no_const module_attribute_no_const;
85253
85254 struct module_version_attribute {
85255 struct module_attribute mattr;
85256 const char *module_name;
85257 const char *version;
85258-} __attribute__ ((__aligned__(sizeof(void *))));
85259+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
85260
85261 extern ssize_t __modver_version_show(struct module_attribute *,
85262 struct module_kobject *, char *);
85263@@ -221,7 +224,7 @@ struct module {
85264
85265 /* Sysfs stuff. */
85266 struct module_kobject mkobj;
85267- struct module_attribute *modinfo_attrs;
85268+ module_attribute_no_const *modinfo_attrs;
85269 const char *version;
85270 const char *srcversion;
85271 struct kobject *holders_dir;
85272@@ -270,19 +273,16 @@ struct module {
85273 int (*init)(void);
85274
85275 /* If this is non-NULL, vfree after init() returns */
85276- void *module_init;
85277+ void *module_init_rx, *module_init_rw;
85278
85279 /* Here is the actual code + data, vfree'd on unload. */
85280- void *module_core;
85281+ void *module_core_rx, *module_core_rw;
85282
85283 /* Here are the sizes of the init and core sections */
85284- unsigned int init_size, core_size;
85285+ unsigned int init_size_rw, core_size_rw;
85286
85287 /* The size of the executable code in each section. */
85288- unsigned int init_text_size, core_text_size;
85289-
85290- /* Size of RO sections of the module (text+rodata) */
85291- unsigned int init_ro_size, core_ro_size;
85292+ unsigned int init_size_rx, core_size_rx;
85293
85294 /* Arch-specific module values */
85295 struct mod_arch_specific arch;
85296@@ -338,6 +338,10 @@ struct module {
85297 #ifdef CONFIG_EVENT_TRACING
85298 struct ftrace_event_call **trace_events;
85299 unsigned int num_trace_events;
85300+ struct file_operations trace_id;
85301+ struct file_operations trace_enable;
85302+ struct file_operations trace_format;
85303+ struct file_operations trace_filter;
85304 #endif
85305 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
85306 unsigned int num_ftrace_callsites;
85307@@ -365,7 +369,7 @@ struct module {
85308 ctor_fn_t *ctors;
85309 unsigned int num_ctors;
85310 #endif
85311-};
85312+} __randomize_layout;
85313 #ifndef MODULE_ARCH_INIT
85314 #define MODULE_ARCH_INIT {}
85315 #endif
85316@@ -386,18 +390,48 @@ bool is_module_address(unsigned long addr);
85317 bool is_module_percpu_address(unsigned long addr);
85318 bool is_module_text_address(unsigned long addr);
85319
85320+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
85321+{
85322+
85323+#ifdef CONFIG_PAX_KERNEXEC
85324+ if (ktla_ktva(addr) >= (unsigned long)start &&
85325+ ktla_ktva(addr) < (unsigned long)start + size)
85326+ return 1;
85327+#endif
85328+
85329+ return ((void *)addr >= start && (void *)addr < start + size);
85330+}
85331+
85332+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
85333+{
85334+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
85335+}
85336+
85337+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
85338+{
85339+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
85340+}
85341+
85342+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
85343+{
85344+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
85345+}
85346+
85347+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
85348+{
85349+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
85350+}
85351+
85352 static inline bool within_module_core(unsigned long addr,
85353 const struct module *mod)
85354 {
85355- return (unsigned long)mod->module_core <= addr &&
85356- addr < (unsigned long)mod->module_core + mod->core_size;
85357+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
85358 }
85359
85360 static inline bool within_module_init(unsigned long addr,
85361 const struct module *mod)
85362 {
85363- return (unsigned long)mod->module_init <= addr &&
85364- addr < (unsigned long)mod->module_init + mod->init_size;
85365+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
85366 }
85367
85368 static inline bool within_module(unsigned long addr, const struct module *mod)
85369diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
85370index 4d0cb9b..3169ac7 100644
85371--- a/include/linux/moduleloader.h
85372+++ b/include/linux/moduleloader.h
85373@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
85374 sections. Returns NULL on failure. */
85375 void *module_alloc(unsigned long size);
85376
85377+#ifdef CONFIG_PAX_KERNEXEC
85378+void *module_alloc_exec(unsigned long size);
85379+#else
85380+#define module_alloc_exec(x) module_alloc(x)
85381+#endif
85382+
85383 /* Free memory returned from module_alloc. */
85384 void module_memfree(void *module_region);
85385
85386+#ifdef CONFIG_PAX_KERNEXEC
85387+void module_memfree_exec(void *module_region);
85388+#else
85389+#define module_memfree_exec(x) module_memfree((x))
85390+#endif
85391+
85392 /*
85393 * Apply the given relocation to the (simplified) ELF. Return -error
85394 * or 0.
85395@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
85396 unsigned int relsec,
85397 struct module *me)
85398 {
85399+#ifdef CONFIG_MODULES
85400 printk(KERN_ERR "module %s: REL relocation unsupported\n",
85401 module_name(me));
85402+#endif
85403 return -ENOEXEC;
85404 }
85405 #endif
85406@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
85407 unsigned int relsec,
85408 struct module *me)
85409 {
85410+#ifdef CONFIG_MODULES
85411 printk(KERN_ERR "module %s: REL relocation unsupported\n",
85412 module_name(me));
85413+#endif
85414 return -ENOEXEC;
85415 }
85416 #endif
85417diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
85418index 1c9effa..1160bdd 100644
85419--- a/include/linux/moduleparam.h
85420+++ b/include/linux/moduleparam.h
85421@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
85422 * @len is usually just sizeof(string).
85423 */
85424 #define module_param_string(name, string, len, perm) \
85425- static const struct kparam_string __param_string_##name \
85426+ static const struct kparam_string __param_string_##name __used \
85427 = { len, string }; \
85428 __module_param_call(MODULE_PARAM_PREFIX, name, \
85429 &param_ops_string, \
85430@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
85431 */
85432 #define module_param_array_named(name, array, type, nump, perm) \
85433 param_check_##type(name, &(array)[0]); \
85434- static const struct kparam_array __param_arr_##name \
85435+ static const struct kparam_array __param_arr_##name __used \
85436 = { .max = ARRAY_SIZE(array), .num = nump, \
85437 .ops = &param_ops_##type, \
85438 .elemsize = sizeof(array[0]), .elem = array }; \
85439diff --git a/include/linux/mount.h b/include/linux/mount.h
85440index 564beee..653be6f 100644
85441--- a/include/linux/mount.h
85442+++ b/include/linux/mount.h
85443@@ -67,7 +67,7 @@ struct vfsmount {
85444 struct dentry *mnt_root; /* root of the mounted tree */
85445 struct super_block *mnt_sb; /* pointer to superblock */
85446 int mnt_flags;
85447-};
85448+} __randomize_layout;
85449
85450 struct file; /* forward dec */
85451 struct path;
85452diff --git a/include/linux/namei.h b/include/linux/namei.h
85453index c899077..b9a2010 100644
85454--- a/include/linux/namei.h
85455+++ b/include/linux/namei.h
85456@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
85457 extern void unlock_rename(struct dentry *, struct dentry *);
85458
85459 extern void nd_jump_link(struct nameidata *nd, struct path *path);
85460-extern void nd_set_link(struct nameidata *nd, char *path);
85461-extern char *nd_get_link(struct nameidata *nd);
85462+extern void nd_set_link(struct nameidata *nd, const char *path);
85463+extern const char *nd_get_link(const struct nameidata *nd);
85464
85465 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
85466 {
85467diff --git a/include/linux/net.h b/include/linux/net.h
85468index 17d8339..81656c0 100644
85469--- a/include/linux/net.h
85470+++ b/include/linux/net.h
85471@@ -192,7 +192,7 @@ struct net_proto_family {
85472 int (*create)(struct net *net, struct socket *sock,
85473 int protocol, int kern);
85474 struct module *owner;
85475-};
85476+} __do_const;
85477
85478 struct iovec;
85479 struct kvec;
85480diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
85481index 2787388..1dd8e88 100644
85482--- a/include/linux/netdevice.h
85483+++ b/include/linux/netdevice.h
85484@@ -1198,6 +1198,7 @@ struct net_device_ops {
85485 u8 state);
85486 #endif
85487 };
85488+typedef struct net_device_ops __no_const net_device_ops_no_const;
85489
85490 /**
85491 * enum net_device_priv_flags - &struct net_device priv_flags
85492@@ -1546,10 +1547,10 @@ struct net_device {
85493
85494 struct net_device_stats stats;
85495
85496- atomic_long_t rx_dropped;
85497- atomic_long_t tx_dropped;
85498+ atomic_long_unchecked_t rx_dropped;
85499+ atomic_long_unchecked_t tx_dropped;
85500
85501- atomic_t carrier_changes;
85502+ atomic_unchecked_t carrier_changes;
85503
85504 #ifdef CONFIG_WIRELESS_EXT
85505 const struct iw_handler_def * wireless_handlers;
85506diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
85507index 2517ece..0bbfcfb 100644
85508--- a/include/linux/netfilter.h
85509+++ b/include/linux/netfilter.h
85510@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
85511 #endif
85512 /* Use the module struct to lock set/get code in place */
85513 struct module *owner;
85514-};
85515+} __do_const;
85516
85517 /* Function to register/unregister hook points. */
85518 int nf_register_hook(struct nf_hook_ops *reg);
85519diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
85520index e955d47..04a5338 100644
85521--- a/include/linux/netfilter/nfnetlink.h
85522+++ b/include/linux/netfilter/nfnetlink.h
85523@@ -19,7 +19,7 @@ struct nfnl_callback {
85524 const struct nlattr * const cda[]);
85525 const struct nla_policy *policy; /* netlink attribute policy */
85526 const u_int16_t attr_count; /* number of nlattr's */
85527-};
85528+} __do_const;
85529
85530 struct nfnetlink_subsystem {
85531 const char *name;
85532diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
85533new file mode 100644
85534index 0000000..33f4af8
85535--- /dev/null
85536+++ b/include/linux/netfilter/xt_gradm.h
85537@@ -0,0 +1,9 @@
85538+#ifndef _LINUX_NETFILTER_XT_GRADM_H
85539+#define _LINUX_NETFILTER_XT_GRADM_H 1
85540+
85541+struct xt_gradm_mtinfo {
85542+ __u16 flags;
85543+ __u16 invflags;
85544+};
85545+
85546+#endif
85547diff --git a/include/linux/nls.h b/include/linux/nls.h
85548index 520681b..2b7fabb 100644
85549--- a/include/linux/nls.h
85550+++ b/include/linux/nls.h
85551@@ -31,7 +31,7 @@ struct nls_table {
85552 const unsigned char *charset2upper;
85553 struct module *owner;
85554 struct nls_table *next;
85555-};
85556+} __do_const;
85557
85558 /* this value hold the maximum octet of charset */
85559 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
85560@@ -46,7 +46,7 @@ enum utf16_endian {
85561 /* nls_base.c */
85562 extern int __register_nls(struct nls_table *, struct module *);
85563 extern int unregister_nls(struct nls_table *);
85564-extern struct nls_table *load_nls(char *);
85565+extern struct nls_table *load_nls(const char *);
85566 extern void unload_nls(struct nls_table *);
85567 extern struct nls_table *load_nls_default(void);
85568 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
85569diff --git a/include/linux/notifier.h b/include/linux/notifier.h
85570index d14a4c3..a078786 100644
85571--- a/include/linux/notifier.h
85572+++ b/include/linux/notifier.h
85573@@ -54,7 +54,8 @@ struct notifier_block {
85574 notifier_fn_t notifier_call;
85575 struct notifier_block __rcu *next;
85576 int priority;
85577-};
85578+} __do_const;
85579+typedef struct notifier_block __no_const notifier_block_no_const;
85580
85581 struct atomic_notifier_head {
85582 spinlock_t lock;
85583diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
85584index b2a0f15..4d7da32 100644
85585--- a/include/linux/oprofile.h
85586+++ b/include/linux/oprofile.h
85587@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
85588 int oprofilefs_create_ro_ulong(struct dentry * root,
85589 char const * name, ulong * val);
85590
85591-/** Create a file for read-only access to an atomic_t. */
85592+/** Create a file for read-only access to an atomic_unchecked_t. */
85593 int oprofilefs_create_ro_atomic(struct dentry * root,
85594- char const * name, atomic_t * val);
85595+ char const * name, atomic_unchecked_t * val);
85596
85597 /** create a directory */
85598 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
85599diff --git a/include/linux/padata.h b/include/linux/padata.h
85600index 4386946..f50c615 100644
85601--- a/include/linux/padata.h
85602+++ b/include/linux/padata.h
85603@@ -129,7 +129,7 @@ struct parallel_data {
85604 struct padata_serial_queue __percpu *squeue;
85605 atomic_t reorder_objects;
85606 atomic_t refcnt;
85607- atomic_t seq_nr;
85608+ atomic_unchecked_t seq_nr;
85609 struct padata_cpumask cpumask;
85610 spinlock_t lock ____cacheline_aligned;
85611 unsigned int processed;
85612diff --git a/include/linux/path.h b/include/linux/path.h
85613index d137218..be0c176 100644
85614--- a/include/linux/path.h
85615+++ b/include/linux/path.h
85616@@ -1,13 +1,15 @@
85617 #ifndef _LINUX_PATH_H
85618 #define _LINUX_PATH_H
85619
85620+#include <linux/compiler.h>
85621+
85622 struct dentry;
85623 struct vfsmount;
85624
85625 struct path {
85626 struct vfsmount *mnt;
85627 struct dentry *dentry;
85628-};
85629+} __randomize_layout;
85630
85631 extern void path_get(const struct path *);
85632 extern void path_put(const struct path *);
85633diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
85634index 8c78950..0d74ed9 100644
85635--- a/include/linux/pci_hotplug.h
85636+++ b/include/linux/pci_hotplug.h
85637@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
85638 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
85639 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
85640 int (*reset_slot) (struct hotplug_slot *slot, int probe);
85641-};
85642+} __do_const;
85643+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
85644
85645 /**
85646 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
85647diff --git a/include/linux/percpu.h b/include/linux/percpu.h
85648index caebf2a..4c3ae9d 100644
85649--- a/include/linux/percpu.h
85650+++ b/include/linux/percpu.h
85651@@ -34,7 +34,7 @@
85652 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
85653 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
85654 */
85655-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
85656+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
85657 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
85658
85659 /*
85660diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
85661index 2b62198..2b74233 100644
85662--- a/include/linux/perf_event.h
85663+++ b/include/linux/perf_event.h
85664@@ -343,8 +343,8 @@ struct perf_event {
85665
85666 enum perf_event_active_state state;
85667 unsigned int attach_state;
85668- local64_t count;
85669- atomic64_t child_count;
85670+ local64_t count; /* PaX: fix it one day */
85671+ atomic64_unchecked_t child_count;
85672
85673 /*
85674 * These are the total time in nanoseconds that the event
85675@@ -395,8 +395,8 @@ struct perf_event {
85676 * These accumulate total time (in nanoseconds) that children
85677 * events have been enabled and running, respectively.
85678 */
85679- atomic64_t child_total_time_enabled;
85680- atomic64_t child_total_time_running;
85681+ atomic64_unchecked_t child_total_time_enabled;
85682+ atomic64_unchecked_t child_total_time_running;
85683
85684 /*
85685 * Protect attach/detach and child_list:
85686@@ -752,7 +752,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
85687 entry->ip[entry->nr++] = ip;
85688 }
85689
85690-extern int sysctl_perf_event_paranoid;
85691+extern int sysctl_perf_event_legitimately_concerned;
85692 extern int sysctl_perf_event_mlock;
85693 extern int sysctl_perf_event_sample_rate;
85694 extern int sysctl_perf_cpu_time_max_percent;
85695@@ -767,19 +767,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
85696 loff_t *ppos);
85697
85698
85699+static inline bool perf_paranoid_any(void)
85700+{
85701+ return sysctl_perf_event_legitimately_concerned > 2;
85702+}
85703+
85704 static inline bool perf_paranoid_tracepoint_raw(void)
85705 {
85706- return sysctl_perf_event_paranoid > -1;
85707+ return sysctl_perf_event_legitimately_concerned > -1;
85708 }
85709
85710 static inline bool perf_paranoid_cpu(void)
85711 {
85712- return sysctl_perf_event_paranoid > 0;
85713+ return sysctl_perf_event_legitimately_concerned > 0;
85714 }
85715
85716 static inline bool perf_paranoid_kernel(void)
85717 {
85718- return sysctl_perf_event_paranoid > 1;
85719+ return sysctl_perf_event_legitimately_concerned > 1;
85720 }
85721
85722 extern void perf_event_init(void);
85723@@ -912,7 +917,7 @@ struct perf_pmu_events_attr {
85724 struct device_attribute attr;
85725 u64 id;
85726 const char *event_str;
85727-};
85728+} __do_const;
85729
85730 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
85731 char *page);
85732diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
85733index 918b117..7af374b7 100644
85734--- a/include/linux/pid_namespace.h
85735+++ b/include/linux/pid_namespace.h
85736@@ -45,7 +45,7 @@ struct pid_namespace {
85737 int hide_pid;
85738 int reboot; /* group exit code if this pidns was rebooted */
85739 struct ns_common ns;
85740-};
85741+} __randomize_layout;
85742
85743 extern struct pid_namespace init_pid_ns;
85744
85745diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
85746index eb8b8ac..62649e1 100644
85747--- a/include/linux/pipe_fs_i.h
85748+++ b/include/linux/pipe_fs_i.h
85749@@ -47,10 +47,10 @@ struct pipe_inode_info {
85750 struct mutex mutex;
85751 wait_queue_head_t wait;
85752 unsigned int nrbufs, curbuf, buffers;
85753- unsigned int readers;
85754- unsigned int writers;
85755- unsigned int files;
85756- unsigned int waiting_writers;
85757+ atomic_t readers;
85758+ atomic_t writers;
85759+ atomic_t files;
85760+ atomic_t waiting_writers;
85761 unsigned int r_counter;
85762 unsigned int w_counter;
85763 struct page *tmp_page;
85764diff --git a/include/linux/pm.h b/include/linux/pm.h
85765index e2f1be6..78a0506 100644
85766--- a/include/linux/pm.h
85767+++ b/include/linux/pm.h
85768@@ -608,6 +608,7 @@ struct dev_pm_domain {
85769 struct dev_pm_ops ops;
85770 void (*detach)(struct device *dev, bool power_off);
85771 };
85772+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
85773
85774 /*
85775 * The PM_EVENT_ messages are also used by drivers implementing the legacy
85776diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
85777index 080e778..cbdaef7 100644
85778--- a/include/linux/pm_domain.h
85779+++ b/include/linux/pm_domain.h
85780@@ -39,11 +39,11 @@ struct gpd_dev_ops {
85781 int (*save_state)(struct device *dev);
85782 int (*restore_state)(struct device *dev);
85783 bool (*active_wakeup)(struct device *dev);
85784-};
85785+} __no_const;
85786
85787 struct gpd_cpuidle_data {
85788 unsigned int saved_exit_latency;
85789- struct cpuidle_state *idle_state;
85790+ cpuidle_state_no_const *idle_state;
85791 };
85792
85793 struct generic_pm_domain {
85794diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
85795index 30e84d4..22278b4 100644
85796--- a/include/linux/pm_runtime.h
85797+++ b/include/linux/pm_runtime.h
85798@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
85799
85800 static inline void pm_runtime_mark_last_busy(struct device *dev)
85801 {
85802- ACCESS_ONCE(dev->power.last_busy) = jiffies;
85803+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
85804 }
85805
85806 static inline bool pm_runtime_is_irq_safe(struct device *dev)
85807diff --git a/include/linux/pnp.h b/include/linux/pnp.h
85808index 6512e9c..ec27fa2 100644
85809--- a/include/linux/pnp.h
85810+++ b/include/linux/pnp.h
85811@@ -298,7 +298,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
85812 struct pnp_fixup {
85813 char id[7];
85814 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
85815-};
85816+} __do_const;
85817
85818 /* config parameters */
85819 #define PNP_CONFIG_NORMAL 0x0001
85820diff --git a/include/linux/poison.h b/include/linux/poison.h
85821index 2110a81..13a11bb 100644
85822--- a/include/linux/poison.h
85823+++ b/include/linux/poison.h
85824@@ -19,8 +19,8 @@
85825 * under normal circumstances, used to verify that nobody uses
85826 * non-initialized list entries.
85827 */
85828-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
85829-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
85830+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
85831+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
85832
85833 /********** include/linux/timer.h **********/
85834 /*
85835diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
85836index d8b187c3..9a9257a 100644
85837--- a/include/linux/power/smartreflex.h
85838+++ b/include/linux/power/smartreflex.h
85839@@ -238,7 +238,7 @@ struct omap_sr_class_data {
85840 int (*notify)(struct omap_sr *sr, u32 status);
85841 u8 notify_flags;
85842 u8 class_type;
85843-};
85844+} __do_const;
85845
85846 /**
85847 * struct omap_sr_nvalue_table - Smartreflex n-target value info
85848diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
85849index 4ea1d37..80f4b33 100644
85850--- a/include/linux/ppp-comp.h
85851+++ b/include/linux/ppp-comp.h
85852@@ -84,7 +84,7 @@ struct compressor {
85853 struct module *owner;
85854 /* Extra skb space needed by the compressor algorithm */
85855 unsigned int comp_extra;
85856-};
85857+} __do_const;
85858
85859 /*
85860 * The return value from decompress routine is the length of the
85861diff --git a/include/linux/preempt.h b/include/linux/preempt.h
85862index de83b4e..c4b997d 100644
85863--- a/include/linux/preempt.h
85864+++ b/include/linux/preempt.h
85865@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
85866 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
85867 #endif
85868
85869+#define raw_preempt_count_add(val) __preempt_count_add(val)
85870+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
85871+
85872 #define __preempt_count_inc() __preempt_count_add(1)
85873 #define __preempt_count_dec() __preempt_count_sub(1)
85874
85875 #define preempt_count_inc() preempt_count_add(1)
85876+#define raw_preempt_count_inc() raw_preempt_count_add(1)
85877 #define preempt_count_dec() preempt_count_sub(1)
85878+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
85879
85880 #ifdef CONFIG_PREEMPT_COUNT
85881
85882@@ -41,6 +46,12 @@ do { \
85883 barrier(); \
85884 } while (0)
85885
85886+#define raw_preempt_disable() \
85887+do { \
85888+ raw_preempt_count_inc(); \
85889+ barrier(); \
85890+} while (0)
85891+
85892 #define sched_preempt_enable_no_resched() \
85893 do { \
85894 barrier(); \
85895@@ -49,6 +60,12 @@ do { \
85896
85897 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
85898
85899+#define raw_preempt_enable_no_resched() \
85900+do { \
85901+ barrier(); \
85902+ raw_preempt_count_dec(); \
85903+} while (0)
85904+
85905 #ifdef CONFIG_PREEMPT
85906 #define preempt_enable() \
85907 do { \
85908@@ -113,8 +130,10 @@ do { \
85909 * region.
85910 */
85911 #define preempt_disable() barrier()
85912+#define raw_preempt_disable() barrier()
85913 #define sched_preempt_enable_no_resched() barrier()
85914 #define preempt_enable_no_resched() barrier()
85915+#define raw_preempt_enable_no_resched() barrier()
85916 #define preempt_enable() barrier()
85917 #define preempt_check_resched() do { } while (0)
85918
85919@@ -128,11 +147,13 @@ do { \
85920 /*
85921 * Modules have no business playing preemption tricks.
85922 */
85923+#ifndef CONFIG_PAX_KERNEXEC
85924 #undef sched_preempt_enable_no_resched
85925 #undef preempt_enable_no_resched
85926 #undef preempt_enable_no_resched_notrace
85927 #undef preempt_check_resched
85928 #endif
85929+#endif
85930
85931 #define preempt_set_need_resched() \
85932 do { \
85933diff --git a/include/linux/printk.h b/include/linux/printk.h
85934index baa3f97..168cff1 100644
85935--- a/include/linux/printk.h
85936+++ b/include/linux/printk.h
85937@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
85938 #endif
85939
85940 typedef int(*printk_func_t)(const char *fmt, va_list args);
85941+extern int kptr_restrict;
85942
85943 #ifdef CONFIG_PRINTK
85944 asmlinkage __printf(5, 0)
85945@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
85946
85947 extern int printk_delay_msec;
85948 extern int dmesg_restrict;
85949-extern int kptr_restrict;
85950
85951 extern void wake_up_klogd(void);
85952
85953diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
85954index b97bf2e..f14c92d4 100644
85955--- a/include/linux/proc_fs.h
85956+++ b/include/linux/proc_fs.h
85957@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
85958 extern struct proc_dir_entry *proc_symlink(const char *,
85959 struct proc_dir_entry *, const char *);
85960 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
85961+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
85962 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
85963 struct proc_dir_entry *, void *);
85964+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
85965+ struct proc_dir_entry *, void *);
85966 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
85967 struct proc_dir_entry *);
85968
85969@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
85970 return proc_create_data(name, mode, parent, proc_fops, NULL);
85971 }
85972
85973+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
85974+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
85975+{
85976+#ifdef CONFIG_GRKERNSEC_PROC_USER
85977+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
85978+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
85979+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
85980+#else
85981+ return proc_create_data(name, mode, parent, proc_fops, NULL);
85982+#endif
85983+}
85984+
85985+
85986 extern void proc_set_size(struct proc_dir_entry *, loff_t);
85987 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
85988 extern void *PDE_DATA(const struct inode *);
85989@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
85990 struct proc_dir_entry *parent,const char *dest) { return NULL;}
85991 static inline struct proc_dir_entry *proc_mkdir(const char *name,
85992 struct proc_dir_entry *parent) {return NULL;}
85993+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
85994+ struct proc_dir_entry *parent) { return NULL; }
85995 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
85996 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
85997+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
85998+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
85999 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
86000 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
86001 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
86002@@ -79,7 +99,7 @@ struct net;
86003 static inline struct proc_dir_entry *proc_net_mkdir(
86004 struct net *net, const char *name, struct proc_dir_entry *parent)
86005 {
86006- return proc_mkdir_data(name, 0, parent, net);
86007+ return proc_mkdir_data_restrict(name, 0, parent, net);
86008 }
86009
86010 #endif /* _LINUX_PROC_FS_H */
86011diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
86012index 42dfc61..8113a99 100644
86013--- a/include/linux/proc_ns.h
86014+++ b/include/linux/proc_ns.h
86015@@ -16,7 +16,7 @@ struct proc_ns_operations {
86016 struct ns_common *(*get)(struct task_struct *task);
86017 void (*put)(struct ns_common *ns);
86018 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
86019-};
86020+} __do_const __randomize_layout;
86021
86022 extern const struct proc_ns_operations netns_operations;
86023 extern const struct proc_ns_operations utsns_operations;
86024diff --git a/include/linux/quota.h b/include/linux/quota.h
86025index d534e8e..782e604 100644
86026--- a/include/linux/quota.h
86027+++ b/include/linux/quota.h
86028@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
86029
86030 extern bool qid_eq(struct kqid left, struct kqid right);
86031 extern bool qid_lt(struct kqid left, struct kqid right);
86032-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
86033+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
86034 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
86035 extern bool qid_valid(struct kqid qid);
86036
86037diff --git a/include/linux/random.h b/include/linux/random.h
86038index b05856e..0a9f14e 100644
86039--- a/include/linux/random.h
86040+++ b/include/linux/random.h
86041@@ -9,9 +9,19 @@
86042 #include <uapi/linux/random.h>
86043
86044 extern void add_device_randomness(const void *, unsigned int);
86045+
86046+static inline void add_latent_entropy(void)
86047+{
86048+
86049+#ifdef LATENT_ENTROPY_PLUGIN
86050+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
86051+#endif
86052+
86053+}
86054+
86055 extern void add_input_randomness(unsigned int type, unsigned int code,
86056- unsigned int value);
86057-extern void add_interrupt_randomness(int irq, int irq_flags);
86058+ unsigned int value) __latent_entropy;
86059+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
86060
86061 extern void get_random_bytes(void *buf, int nbytes);
86062 extern void get_random_bytes_arch(void *buf, int nbytes);
86063@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
86064 extern const struct file_operations random_fops, urandom_fops;
86065 #endif
86066
86067-unsigned int get_random_int(void);
86068+unsigned int __intentional_overflow(-1) get_random_int(void);
86069 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
86070
86071-u32 prandom_u32(void);
86072+u32 prandom_u32(void) __intentional_overflow(-1);
86073 void prandom_bytes(void *buf, size_t nbytes);
86074 void prandom_seed(u32 seed);
86075 void prandom_reseed_late(void);
86076@@ -37,6 +47,11 @@ struct rnd_state {
86077 u32 prandom_u32_state(struct rnd_state *state);
86078 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
86079
86080+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
86081+{
86082+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
86083+}
86084+
86085 /**
86086 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
86087 * @ep_ro: right open interval endpoint
86088@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
86089 *
86090 * Returns: pseudo-random number in interval [0, ep_ro)
86091 */
86092-static inline u32 prandom_u32_max(u32 ep_ro)
86093+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
86094 {
86095 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
86096 }
86097diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
86098index 378c5ee..aa84a47 100644
86099--- a/include/linux/rbtree_augmented.h
86100+++ b/include/linux/rbtree_augmented.h
86101@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
86102 old->rbaugmented = rbcompute(old); \
86103 } \
86104 rbstatic const struct rb_augment_callbacks rbname = { \
86105- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
86106+ .propagate = rbname ## _propagate, \
86107+ .copy = rbname ## _copy, \
86108+ .rotate = rbname ## _rotate \
86109 };
86110
86111
86112diff --git a/include/linux/rculist.h b/include/linux/rculist.h
86113index a18b16f..2683096 100644
86114--- a/include/linux/rculist.h
86115+++ b/include/linux/rculist.h
86116@@ -29,8 +29,8 @@
86117 */
86118 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
86119 {
86120- ACCESS_ONCE(list->next) = list;
86121- ACCESS_ONCE(list->prev) = list;
86122+ ACCESS_ONCE_RW(list->next) = list;
86123+ ACCESS_ONCE_RW(list->prev) = list;
86124 }
86125
86126 /*
86127@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
86128 struct list_head *prev, struct list_head *next);
86129 #endif
86130
86131+void __pax_list_add_rcu(struct list_head *new,
86132+ struct list_head *prev, struct list_head *next);
86133+
86134 /**
86135 * list_add_rcu - add a new entry to rcu-protected list
86136 * @new: new entry to be added
86137@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
86138 __list_add_rcu(new, head, head->next);
86139 }
86140
86141+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
86142+{
86143+ __pax_list_add_rcu(new, head, head->next);
86144+}
86145+
86146 /**
86147 * list_add_tail_rcu - add a new entry to rcu-protected list
86148 * @new: new entry to be added
86149@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
86150 __list_add_rcu(new, head->prev, head);
86151 }
86152
86153+static inline void pax_list_add_tail_rcu(struct list_head *new,
86154+ struct list_head *head)
86155+{
86156+ __pax_list_add_rcu(new, head->prev, head);
86157+}
86158+
86159 /**
86160 * list_del_rcu - deletes entry from list without re-initialization
86161 * @entry: the element to delete from the list.
86162@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
86163 entry->prev = LIST_POISON2;
86164 }
86165
86166+extern void pax_list_del_rcu(struct list_head *entry);
86167+
86168 /**
86169 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
86170 * @n: the element to delete from the hash list.
86171diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
86172index 7809749..1cd9315 100644
86173--- a/include/linux/rcupdate.h
86174+++ b/include/linux/rcupdate.h
86175@@ -333,7 +333,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
86176 do { \
86177 rcu_all_qs(); \
86178 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
86179- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
86180+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
86181 } while (0)
86182 #else /* #ifdef CONFIG_TASKS_RCU */
86183 #define TASKS_RCU(x) do { } while (0)
86184diff --git a/include/linux/reboot.h b/include/linux/reboot.h
86185index 67fc8fc..a90f7d8 100644
86186--- a/include/linux/reboot.h
86187+++ b/include/linux/reboot.h
86188@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
86189 */
86190
86191 extern void migrate_to_reboot_cpu(void);
86192-extern void machine_restart(char *cmd);
86193-extern void machine_halt(void);
86194-extern void machine_power_off(void);
86195+extern void machine_restart(char *cmd) __noreturn;
86196+extern void machine_halt(void) __noreturn;
86197+extern void machine_power_off(void) __noreturn;
86198
86199 extern void machine_shutdown(void);
86200 struct pt_regs;
86201@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
86202 */
86203
86204 extern void kernel_restart_prepare(char *cmd);
86205-extern void kernel_restart(char *cmd);
86206-extern void kernel_halt(void);
86207-extern void kernel_power_off(void);
86208+extern void kernel_restart(char *cmd) __noreturn;
86209+extern void kernel_halt(void) __noreturn;
86210+extern void kernel_power_off(void) __noreturn;
86211
86212 extern int C_A_D; /* for sysctl */
86213 void ctrl_alt_del(void);
86214@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
86215 * Emergency restart, callable from an interrupt handler.
86216 */
86217
86218-extern void emergency_restart(void);
86219+extern void emergency_restart(void) __noreturn;
86220 #include <asm/emergency-restart.h>
86221
86222 #endif /* _LINUX_REBOOT_H */
86223diff --git a/include/linux/regset.h b/include/linux/regset.h
86224index 8e0c9fe..ac4d221 100644
86225--- a/include/linux/regset.h
86226+++ b/include/linux/regset.h
86227@@ -161,7 +161,8 @@ struct user_regset {
86228 unsigned int align;
86229 unsigned int bias;
86230 unsigned int core_note_type;
86231-};
86232+} __do_const;
86233+typedef struct user_regset __no_const user_regset_no_const;
86234
86235 /**
86236 * struct user_regset_view - available regsets
86237diff --git a/include/linux/relay.h b/include/linux/relay.h
86238index d7c8359..818daf5 100644
86239--- a/include/linux/relay.h
86240+++ b/include/linux/relay.h
86241@@ -157,7 +157,7 @@ struct rchan_callbacks
86242 * The callback should return 0 if successful, negative if not.
86243 */
86244 int (*remove_buf_file)(struct dentry *dentry);
86245-};
86246+} __no_const;
86247
86248 /*
86249 * CONFIG_RELAY kernel API, kernel/relay.c
86250diff --git a/include/linux/rio.h b/include/linux/rio.h
86251index 6bda06f..bf39a9b 100644
86252--- a/include/linux/rio.h
86253+++ b/include/linux/rio.h
86254@@ -358,7 +358,7 @@ struct rio_ops {
86255 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
86256 u64 rstart, u32 size, u32 flags);
86257 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
86258-};
86259+} __no_const;
86260
86261 #define RIO_RESOURCE_MEM 0x00000100
86262 #define RIO_RESOURCE_DOORBELL 0x00000200
86263diff --git a/include/linux/rmap.h b/include/linux/rmap.h
86264index c4c559a..6ba9a26 100644
86265--- a/include/linux/rmap.h
86266+++ b/include/linux/rmap.h
86267@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
86268 void anon_vma_init(void); /* create anon_vma_cachep */
86269 int anon_vma_prepare(struct vm_area_struct *);
86270 void unlink_anon_vmas(struct vm_area_struct *);
86271-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
86272-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
86273+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
86274+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
86275
86276 static inline void anon_vma_merge(struct vm_area_struct *vma,
86277 struct vm_area_struct *next)
86278diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
86279index ed8f9e70..999bc96 100644
86280--- a/include/linux/scatterlist.h
86281+++ b/include/linux/scatterlist.h
86282@@ -1,6 +1,7 @@
86283 #ifndef _LINUX_SCATTERLIST_H
86284 #define _LINUX_SCATTERLIST_H
86285
86286+#include <linux/sched.h>
86287 #include <linux/string.h>
86288 #include <linux/bug.h>
86289 #include <linux/mm.h>
86290@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
86291 #ifdef CONFIG_DEBUG_SG
86292 BUG_ON(!virt_addr_valid(buf));
86293 #endif
86294+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86295+ if (object_starts_on_stack(buf)) {
86296+ void *adjbuf = buf - current->stack + current->lowmem_stack;
86297+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
86298+ } else
86299+#endif
86300 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
86301 }
86302
86303diff --git a/include/linux/sched.h b/include/linux/sched.h
86304index 51348f7..8c8b0ba 100644
86305--- a/include/linux/sched.h
86306+++ b/include/linux/sched.h
86307@@ -133,6 +133,7 @@ struct fs_struct;
86308 struct perf_event_context;
86309 struct blk_plug;
86310 struct filename;
86311+struct linux_binprm;
86312
86313 #define VMACACHE_BITS 2
86314 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
86315@@ -420,7 +421,7 @@ extern char __sched_text_start[], __sched_text_end[];
86316 extern int in_sched_functions(unsigned long addr);
86317
86318 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
86319-extern signed long schedule_timeout(signed long timeout);
86320+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
86321 extern signed long schedule_timeout_interruptible(signed long timeout);
86322 extern signed long schedule_timeout_killable(signed long timeout);
86323 extern signed long schedule_timeout_uninterruptible(signed long timeout);
86324@@ -438,6 +439,19 @@ struct nsproxy;
86325 struct user_namespace;
86326
86327 #ifdef CONFIG_MMU
86328+
86329+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
86330+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
86331+#else
86332+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
86333+{
86334+ return 0;
86335+}
86336+#endif
86337+
86338+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
86339+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
86340+
86341 extern void arch_pick_mmap_layout(struct mm_struct *mm);
86342 extern unsigned long
86343 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
86344@@ -736,6 +750,17 @@ struct signal_struct {
86345 #ifdef CONFIG_TASKSTATS
86346 struct taskstats *stats;
86347 #endif
86348+
86349+#ifdef CONFIG_GRKERNSEC
86350+ u32 curr_ip;
86351+ u32 saved_ip;
86352+ u32 gr_saddr;
86353+ u32 gr_daddr;
86354+ u16 gr_sport;
86355+ u16 gr_dport;
86356+ u8 used_accept:1;
86357+#endif
86358+
86359 #ifdef CONFIG_AUDIT
86360 unsigned audit_tty;
86361 unsigned audit_tty_log_passwd;
86362@@ -762,7 +787,7 @@ struct signal_struct {
86363 struct mutex cred_guard_mutex; /* guard against foreign influences on
86364 * credential calculations
86365 * (notably. ptrace) */
86366-};
86367+} __randomize_layout;
86368
86369 /*
86370 * Bits in flags field of signal_struct.
86371@@ -815,6 +840,14 @@ struct user_struct {
86372 struct key *session_keyring; /* UID's default session keyring */
86373 #endif
86374
86375+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86376+ unsigned char kernel_banned;
86377+#endif
86378+#ifdef CONFIG_GRKERNSEC_BRUTE
86379+ unsigned char suid_banned;
86380+ unsigned long suid_ban_expires;
86381+#endif
86382+
86383 /* Hash table maintenance information */
86384 struct hlist_node uidhash_node;
86385 kuid_t uid;
86386@@ -822,7 +855,7 @@ struct user_struct {
86387 #ifdef CONFIG_PERF_EVENTS
86388 atomic_long_t locked_vm;
86389 #endif
86390-};
86391+} __randomize_layout;
86392
86393 extern int uids_sysfs_init(void);
86394
86395@@ -1286,6 +1319,9 @@ enum perf_event_task_context {
86396 struct task_struct {
86397 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
86398 void *stack;
86399+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86400+ void *lowmem_stack;
86401+#endif
86402 atomic_t usage;
86403 unsigned int flags; /* per process flags, defined below */
86404 unsigned int ptrace;
86405@@ -1419,8 +1455,8 @@ struct task_struct {
86406 struct list_head thread_node;
86407
86408 struct completion *vfork_done; /* for vfork() */
86409- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
86410- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
86411+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
86412+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
86413
86414 cputime_t utime, stime, utimescaled, stimescaled;
86415 cputime_t gtime;
86416@@ -1445,11 +1481,6 @@ struct task_struct {
86417 struct task_cputime cputime_expires;
86418 struct list_head cpu_timers[3];
86419
86420-/* process credentials */
86421- const struct cred __rcu *real_cred; /* objective and real subjective task
86422- * credentials (COW) */
86423- const struct cred __rcu *cred; /* effective (overridable) subjective task
86424- * credentials (COW) */
86425 char comm[TASK_COMM_LEN]; /* executable name excluding path
86426 - access with [gs]et_task_comm (which lock
86427 it with task_lock())
86428@@ -1467,6 +1498,10 @@ struct task_struct {
86429 #endif
86430 /* CPU-specific state of this task */
86431 struct thread_struct thread;
86432+/* thread_info moved to task_struct */
86433+#ifdef CONFIG_X86
86434+ struct thread_info tinfo;
86435+#endif
86436 /* filesystem information */
86437 struct fs_struct *fs;
86438 /* open file information */
86439@@ -1541,6 +1576,10 @@ struct task_struct {
86440 gfp_t lockdep_reclaim_gfp;
86441 #endif
86442
86443+/* process credentials */
86444+ const struct cred __rcu *real_cred; /* objective and real subjective task
86445+ * credentials (COW) */
86446+
86447 /* journalling filesystem info */
86448 void *journal_info;
86449
86450@@ -1579,6 +1618,10 @@ struct task_struct {
86451 /* cg_list protected by css_set_lock and tsk->alloc_lock */
86452 struct list_head cg_list;
86453 #endif
86454+
86455+ const struct cred __rcu *cred; /* effective (overridable) subjective task
86456+ * credentials (COW) */
86457+
86458 #ifdef CONFIG_FUTEX
86459 struct robust_list_head __user *robust_list;
86460 #ifdef CONFIG_COMPAT
86461@@ -1690,7 +1733,7 @@ struct task_struct {
86462 * Number of functions that haven't been traced
86463 * because of depth overrun.
86464 */
86465- atomic_t trace_overrun;
86466+ atomic_unchecked_t trace_overrun;
86467 /* Pause for the tracing */
86468 atomic_t tracing_graph_pause;
86469 #endif
86470@@ -1718,7 +1761,78 @@ struct task_struct {
86471 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
86472 unsigned long task_state_change;
86473 #endif
86474-};
86475+
86476+#ifdef CONFIG_GRKERNSEC
86477+ /* grsecurity */
86478+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
86479+ u64 exec_id;
86480+#endif
86481+#ifdef CONFIG_GRKERNSEC_SETXID
86482+ const struct cred *delayed_cred;
86483+#endif
86484+ struct dentry *gr_chroot_dentry;
86485+ struct acl_subject_label *acl;
86486+ struct acl_subject_label *tmpacl;
86487+ struct acl_role_label *role;
86488+ struct file *exec_file;
86489+ unsigned long brute_expires;
86490+ u16 acl_role_id;
86491+ u8 inherited;
86492+ /* is this the task that authenticated to the special role */
86493+ u8 acl_sp_role;
86494+ u8 is_writable;
86495+ u8 brute;
86496+ u8 gr_is_chrooted;
86497+#endif
86498+
86499+} __randomize_layout;
86500+
86501+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
86502+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
86503+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
86504+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
86505+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
86506+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
86507+
86508+#ifdef CONFIG_PAX_SOFTMODE
86509+extern int pax_softmode;
86510+#endif
86511+
86512+extern int pax_check_flags(unsigned long *);
86513+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
86514+
86515+/* if tsk != current then task_lock must be held on it */
86516+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
86517+static inline unsigned long pax_get_flags(struct task_struct *tsk)
86518+{
86519+ if (likely(tsk->mm))
86520+ return tsk->mm->pax_flags;
86521+ else
86522+ return 0UL;
86523+}
86524+
86525+/* if tsk != current then task_lock must be held on it */
86526+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
86527+{
86528+ if (likely(tsk->mm)) {
86529+ tsk->mm->pax_flags = flags;
86530+ return 0;
86531+ }
86532+ return -EINVAL;
86533+}
86534+#endif
86535+
86536+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
86537+extern void pax_set_initial_flags(struct linux_binprm *bprm);
86538+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
86539+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
86540+#endif
86541+
86542+struct path;
86543+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
86544+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
86545+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
86546+extern void pax_report_refcount_overflow(struct pt_regs *regs);
86547
86548 /* Future-safe accessor for struct task_struct's cpus_allowed. */
86549 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
86550@@ -1801,7 +1915,7 @@ struct pid_namespace;
86551 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
86552 struct pid_namespace *ns);
86553
86554-static inline pid_t task_pid_nr(struct task_struct *tsk)
86555+static inline pid_t task_pid_nr(const struct task_struct *tsk)
86556 {
86557 return tsk->pid;
86558 }
86559@@ -2169,6 +2283,25 @@ extern u64 sched_clock_cpu(int cpu);
86560
86561 extern void sched_clock_init(void);
86562
86563+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86564+static inline void populate_stack(void)
86565+{
86566+ struct task_struct *curtask = current;
86567+ int c;
86568+ int *ptr = curtask->stack;
86569+ int *end = curtask->stack + THREAD_SIZE;
86570+
86571+ while (ptr < end) {
86572+ c = *(volatile int *)ptr;
86573+ ptr += PAGE_SIZE/sizeof(int);
86574+ }
86575+}
86576+#else
86577+static inline void populate_stack(void)
86578+{
86579+}
86580+#endif
86581+
86582 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
86583 static inline void sched_clock_tick(void)
86584 {
86585@@ -2302,7 +2435,9 @@ void yield(void);
86586 extern struct exec_domain default_exec_domain;
86587
86588 union thread_union {
86589+#ifndef CONFIG_X86
86590 struct thread_info thread_info;
86591+#endif
86592 unsigned long stack[THREAD_SIZE/sizeof(long)];
86593 };
86594
86595@@ -2335,6 +2470,7 @@ extern struct pid_namespace init_pid_ns;
86596 */
86597
86598 extern struct task_struct *find_task_by_vpid(pid_t nr);
86599+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
86600 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
86601 struct pid_namespace *ns);
86602
86603@@ -2499,7 +2635,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
86604 extern void exit_itimers(struct signal_struct *);
86605 extern void flush_itimer_signals(void);
86606
86607-extern void do_group_exit(int);
86608+extern __noreturn void do_group_exit(int);
86609
86610 extern int do_execve(struct filename *,
86611 const char __user * const __user *,
86612@@ -2720,9 +2856,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
86613 #define task_stack_end_corrupted(task) \
86614 (*(end_of_stack(task)) != STACK_END_MAGIC)
86615
86616-static inline int object_is_on_stack(void *obj)
86617+static inline int object_starts_on_stack(const void *obj)
86618 {
86619- void *stack = task_stack_page(current);
86620+ const void *stack = task_stack_page(current);
86621
86622 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
86623 }
86624diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
86625index 596a0e0..bea77ec 100644
86626--- a/include/linux/sched/sysctl.h
86627+++ b/include/linux/sched/sysctl.h
86628@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
86629 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
86630
86631 extern int sysctl_max_map_count;
86632+extern unsigned long sysctl_heap_stack_gap;
86633
86634 extern unsigned int sysctl_sched_latency;
86635 extern unsigned int sysctl_sched_min_granularity;
86636diff --git a/include/linux/security.h b/include/linux/security.h
86637index a1b7dbd..036f47f 100644
86638--- a/include/linux/security.h
86639+++ b/include/linux/security.h
86640@@ -27,6 +27,7 @@
86641 #include <linux/slab.h>
86642 #include <linux/err.h>
86643 #include <linux/string.h>
86644+#include <linux/grsecurity.h>
86645
86646 struct linux_binprm;
86647 struct cred;
86648@@ -116,8 +117,6 @@ struct seq_file;
86649
86650 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
86651
86652-void reset_security_ops(void);
86653-
86654 #ifdef CONFIG_MMU
86655 extern unsigned long mmap_min_addr;
86656 extern unsigned long dac_mmap_min_addr;
86657@@ -1756,7 +1755,7 @@ struct security_operations {
86658 struct audit_context *actx);
86659 void (*audit_rule_free) (void *lsmrule);
86660 #endif /* CONFIG_AUDIT */
86661-};
86662+} __randomize_layout;
86663
86664 /* prototypes */
86665 extern int security_init(void);
86666diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
86667index dc368b8..e895209 100644
86668--- a/include/linux/semaphore.h
86669+++ b/include/linux/semaphore.h
86670@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
86671 }
86672
86673 extern void down(struct semaphore *sem);
86674-extern int __must_check down_interruptible(struct semaphore *sem);
86675+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
86676 extern int __must_check down_killable(struct semaphore *sem);
86677 extern int __must_check down_trylock(struct semaphore *sem);
86678 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
86679diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
86680index afbb1fd..e1d205d 100644
86681--- a/include/linux/seq_file.h
86682+++ b/include/linux/seq_file.h
86683@@ -27,6 +27,9 @@ struct seq_file {
86684 struct mutex lock;
86685 const struct seq_operations *op;
86686 int poll_event;
86687+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
86688+ u64 exec_id;
86689+#endif
86690 #ifdef CONFIG_USER_NS
86691 struct user_namespace *user_ns;
86692 #endif
86693@@ -39,6 +42,7 @@ struct seq_operations {
86694 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
86695 int (*show) (struct seq_file *m, void *v);
86696 };
86697+typedef struct seq_operations __no_const seq_operations_no_const;
86698
86699 #define SEQ_SKIP 1
86700
86701@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
86702
86703 char *mangle_path(char *s, const char *p, const char *esc);
86704 int seq_open(struct file *, const struct seq_operations *);
86705+int seq_open_restrict(struct file *, const struct seq_operations *);
86706 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
86707 loff_t seq_lseek(struct file *, loff_t, int);
86708 int seq_release(struct inode *, struct file *);
86709@@ -128,6 +133,7 @@ int seq_path_root(struct seq_file *m, const struct path *path,
86710 const struct path *root, const char *esc);
86711
86712 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
86713+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
86714 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
86715 int single_release(struct inode *, struct file *);
86716 void *__seq_open_private(struct file *, const struct seq_operations *, int);
86717diff --git a/include/linux/shm.h b/include/linux/shm.h
86718index 6fb8016..ab4465e 100644
86719--- a/include/linux/shm.h
86720+++ b/include/linux/shm.h
86721@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
86722 /* The task created the shm object. NULL if the task is dead. */
86723 struct task_struct *shm_creator;
86724 struct list_head shm_clist; /* list by creator */
86725+#ifdef CONFIG_GRKERNSEC
86726+ u64 shm_createtime;
86727+ pid_t shm_lapid;
86728+#endif
86729 };
86730
86731 /* shm_mode upper byte flags */
86732diff --git a/include/linux/signal.h b/include/linux/signal.h
86733index ab1e039..ad4229e 100644
86734--- a/include/linux/signal.h
86735+++ b/include/linux/signal.h
86736@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
86737 * know it'll be handled, so that they don't get converted to
86738 * SIGKILL or just silently dropped.
86739 */
86740- kernel_sigaction(sig, (__force __sighandler_t)2);
86741+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
86742 }
86743
86744 static inline void disallow_signal(int sig)
86745diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
86746index bdccc4b..e9f8670 100644
86747--- a/include/linux/skbuff.h
86748+++ b/include/linux/skbuff.h
86749@@ -771,7 +771,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
86750 int node);
86751 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
86752 struct sk_buff *build_skb(void *data, unsigned int frag_size);
86753-static inline struct sk_buff *alloc_skb(unsigned int size,
86754+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
86755 gfp_t priority)
86756 {
86757 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
86758@@ -1967,7 +1967,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
86759 return skb->inner_transport_header - skb->inner_network_header;
86760 }
86761
86762-static inline int skb_network_offset(const struct sk_buff *skb)
86763+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
86764 {
86765 return skb_network_header(skb) - skb->data;
86766 }
86767@@ -2027,7 +2027,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
86768 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
86769 */
86770 #ifndef NET_SKB_PAD
86771-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
86772+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
86773 #endif
86774
86775 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
86776@@ -2669,9 +2669,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
86777 int *err);
86778 unsigned int datagram_poll(struct file *file, struct socket *sock,
86779 struct poll_table_struct *wait);
86780-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
86781+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
86782 struct iov_iter *to, int size);
86783-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
86784+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
86785 struct msghdr *msg, int size)
86786 {
86787 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
86788@@ -3193,6 +3193,9 @@ static inline void nf_reset(struct sk_buff *skb)
86789 nf_bridge_put(skb->nf_bridge);
86790 skb->nf_bridge = NULL;
86791 #endif
86792+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
86793+ skb->nf_trace = 0;
86794+#endif
86795 }
86796
86797 static inline void nf_reset_trace(struct sk_buff *skb)
86798diff --git a/include/linux/slab.h b/include/linux/slab.h
86799index 76f1fee..d95e6d2 100644
86800--- a/include/linux/slab.h
86801+++ b/include/linux/slab.h
86802@@ -14,15 +14,29 @@
86803 #include <linux/gfp.h>
86804 #include <linux/types.h>
86805 #include <linux/workqueue.h>
86806-
86807+#include <linux/err.h>
86808
86809 /*
86810 * Flags to pass to kmem_cache_create().
86811 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
86812 */
86813 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
86814+
86815+#ifdef CONFIG_PAX_USERCOPY_SLABS
86816+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
86817+#else
86818+#define SLAB_USERCOPY 0x00000000UL
86819+#endif
86820+
86821 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
86822 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
86823+
86824+#ifdef CONFIG_PAX_MEMORY_SANITIZE
86825+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
86826+#else
86827+#define SLAB_NO_SANITIZE 0x00000000UL
86828+#endif
86829+
86830 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
86831 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
86832 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
86833@@ -98,10 +112,13 @@
86834 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
86835 * Both make kfree a no-op.
86836 */
86837-#define ZERO_SIZE_PTR ((void *)16)
86838+#define ZERO_SIZE_PTR \
86839+({ \
86840+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
86841+ (void *)(-MAX_ERRNO-1L); \
86842+})
86843
86844-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
86845- (unsigned long)ZERO_SIZE_PTR)
86846+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
86847
86848 #include <linux/kmemleak.h>
86849 #include <linux/kasan.h>
86850@@ -143,6 +160,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
86851 void kfree(const void *);
86852 void kzfree(const void *);
86853 size_t ksize(const void *);
86854+const char *check_heap_object(const void *ptr, unsigned long n);
86855+bool is_usercopy_object(const void *ptr);
86856
86857 /*
86858 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
86859@@ -235,6 +254,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
86860 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
86861 #endif
86862
86863+#ifdef CONFIG_PAX_USERCOPY_SLABS
86864+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
86865+#endif
86866+
86867 /*
86868 * Figure out which kmalloc slab an allocation of a certain size
86869 * belongs to.
86870@@ -243,7 +266,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
86871 * 2 = 120 .. 192 bytes
86872 * n = 2^(n-1) .. 2^n -1
86873 */
86874-static __always_inline int kmalloc_index(size_t size)
86875+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
86876 {
86877 if (!size)
86878 return 0;
86879@@ -286,15 +309,15 @@ static __always_inline int kmalloc_index(size_t size)
86880 }
86881 #endif /* !CONFIG_SLOB */
86882
86883-void *__kmalloc(size_t size, gfp_t flags);
86884+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
86885 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
86886 void kmem_cache_free(struct kmem_cache *, void *);
86887
86888 #ifdef CONFIG_NUMA
86889-void *__kmalloc_node(size_t size, gfp_t flags, int node);
86890+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
86891 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
86892 #else
86893-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
86894+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
86895 {
86896 return __kmalloc(size, flags);
86897 }
86898diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
86899index 33d0490..70a6313 100644
86900--- a/include/linux/slab_def.h
86901+++ b/include/linux/slab_def.h
86902@@ -40,7 +40,7 @@ struct kmem_cache {
86903 /* 4) cache creation/removal */
86904 const char *name;
86905 struct list_head list;
86906- int refcount;
86907+ atomic_t refcount;
86908 int object_size;
86909 int align;
86910
86911@@ -56,10 +56,14 @@ struct kmem_cache {
86912 unsigned long node_allocs;
86913 unsigned long node_frees;
86914 unsigned long node_overflow;
86915- atomic_t allochit;
86916- atomic_t allocmiss;
86917- atomic_t freehit;
86918- atomic_t freemiss;
86919+ atomic_unchecked_t allochit;
86920+ atomic_unchecked_t allocmiss;
86921+ atomic_unchecked_t freehit;
86922+ atomic_unchecked_t freemiss;
86923+#ifdef CONFIG_PAX_MEMORY_SANITIZE
86924+ atomic_unchecked_t sanitized;
86925+ atomic_unchecked_t not_sanitized;
86926+#endif
86927
86928 /*
86929 * If debugging is enabled, then the allocator can add additional
86930diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
86931index 3388511..6252f90 100644
86932--- a/include/linux/slub_def.h
86933+++ b/include/linux/slub_def.h
86934@@ -74,7 +74,7 @@ struct kmem_cache {
86935 struct kmem_cache_order_objects max;
86936 struct kmem_cache_order_objects min;
86937 gfp_t allocflags; /* gfp flags to use on each alloc */
86938- int refcount; /* Refcount for slab cache destroy */
86939+ atomic_t refcount; /* Refcount for slab cache destroy */
86940 void (*ctor)(void *);
86941 int inuse; /* Offset to metadata */
86942 int align; /* Alignment */
86943diff --git a/include/linux/smp.h b/include/linux/smp.h
86944index be91db2..3f23232 100644
86945--- a/include/linux/smp.h
86946+++ b/include/linux/smp.h
86947@@ -183,7 +183,9 @@ static inline void smp_init(void) { }
86948 #endif
86949
86950 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
86951+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
86952 #define put_cpu() preempt_enable()
86953+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
86954
86955 /*
86956 * Callback to arch code if there's nosmp or maxcpus=0 on the
86957diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
86958index 46cca4c..3323536 100644
86959--- a/include/linux/sock_diag.h
86960+++ b/include/linux/sock_diag.h
86961@@ -11,7 +11,7 @@ struct sock;
86962 struct sock_diag_handler {
86963 __u8 family;
86964 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
86965-};
86966+} __do_const;
86967
86968 int sock_diag_register(const struct sock_diag_handler *h);
86969 void sock_diag_unregister(const struct sock_diag_handler *h);
86970diff --git a/include/linux/sonet.h b/include/linux/sonet.h
86971index 680f9a3..f13aeb0 100644
86972--- a/include/linux/sonet.h
86973+++ b/include/linux/sonet.h
86974@@ -7,7 +7,7 @@
86975 #include <uapi/linux/sonet.h>
86976
86977 struct k_sonet_stats {
86978-#define __HANDLE_ITEM(i) atomic_t i
86979+#define __HANDLE_ITEM(i) atomic_unchecked_t i
86980 __SONET_ITEMS
86981 #undef __HANDLE_ITEM
86982 };
86983diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
86984index 07d8e53..dc934c9 100644
86985--- a/include/linux/sunrpc/addr.h
86986+++ b/include/linux/sunrpc/addr.h
86987@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
86988 {
86989 switch (sap->sa_family) {
86990 case AF_INET:
86991- return ntohs(((struct sockaddr_in *)sap)->sin_port);
86992+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
86993 case AF_INET6:
86994- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
86995+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
86996 }
86997 return 0;
86998 }
86999@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
87000 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
87001 const struct sockaddr *src)
87002 {
87003- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
87004+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
87005 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
87006
87007 dsin->sin_family = ssin->sin_family;
87008@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
87009 if (sa->sa_family != AF_INET6)
87010 return 0;
87011
87012- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
87013+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
87014 }
87015
87016 #endif /* _LINUX_SUNRPC_ADDR_H */
87017diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
87018index 598ba80..d90cba6 100644
87019--- a/include/linux/sunrpc/clnt.h
87020+++ b/include/linux/sunrpc/clnt.h
87021@@ -100,7 +100,7 @@ struct rpc_procinfo {
87022 unsigned int p_timer; /* Which RTT timer to use */
87023 u32 p_statidx; /* Which procedure to account */
87024 const char * p_name; /* name of procedure */
87025-};
87026+} __do_const;
87027
87028 #ifdef __KERNEL__
87029
87030diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
87031index fae6fb9..023fbcd 100644
87032--- a/include/linux/sunrpc/svc.h
87033+++ b/include/linux/sunrpc/svc.h
87034@@ -420,7 +420,7 @@ struct svc_procedure {
87035 unsigned int pc_count; /* call count */
87036 unsigned int pc_cachetype; /* cache info (NFS) */
87037 unsigned int pc_xdrressize; /* maximum size of XDR reply */
87038-};
87039+} __do_const;
87040
87041 /*
87042 * Function prototypes.
87043diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
87044index df8edf8..d140fec 100644
87045--- a/include/linux/sunrpc/svc_rdma.h
87046+++ b/include/linux/sunrpc/svc_rdma.h
87047@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
87048 extern unsigned int svcrdma_max_requests;
87049 extern unsigned int svcrdma_max_req_size;
87050
87051-extern atomic_t rdma_stat_recv;
87052-extern atomic_t rdma_stat_read;
87053-extern atomic_t rdma_stat_write;
87054-extern atomic_t rdma_stat_sq_starve;
87055-extern atomic_t rdma_stat_rq_starve;
87056-extern atomic_t rdma_stat_rq_poll;
87057-extern atomic_t rdma_stat_rq_prod;
87058-extern atomic_t rdma_stat_sq_poll;
87059-extern atomic_t rdma_stat_sq_prod;
87060+extern atomic_unchecked_t rdma_stat_recv;
87061+extern atomic_unchecked_t rdma_stat_read;
87062+extern atomic_unchecked_t rdma_stat_write;
87063+extern atomic_unchecked_t rdma_stat_sq_starve;
87064+extern atomic_unchecked_t rdma_stat_rq_starve;
87065+extern atomic_unchecked_t rdma_stat_rq_poll;
87066+extern atomic_unchecked_t rdma_stat_rq_prod;
87067+extern atomic_unchecked_t rdma_stat_sq_poll;
87068+extern atomic_unchecked_t rdma_stat_sq_prod;
87069
87070 /*
87071 * Contexts are built when an RDMA request is created and are a
87072diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
87073index 8d71d65..f79586e 100644
87074--- a/include/linux/sunrpc/svcauth.h
87075+++ b/include/linux/sunrpc/svcauth.h
87076@@ -120,7 +120,7 @@ struct auth_ops {
87077 int (*release)(struct svc_rqst *rq);
87078 void (*domain_release)(struct auth_domain *);
87079 int (*set_client)(struct svc_rqst *rq);
87080-};
87081+} __do_const;
87082
87083 #define SVC_GARBAGE 1
87084 #define SVC_SYSERR 2
87085diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
87086index e7a018e..49f8b17 100644
87087--- a/include/linux/swiotlb.h
87088+++ b/include/linux/swiotlb.h
87089@@ -60,7 +60,8 @@ extern void
87090
87091 extern void
87092 swiotlb_free_coherent(struct device *hwdev, size_t size,
87093- void *vaddr, dma_addr_t dma_handle);
87094+ void *vaddr, dma_addr_t dma_handle,
87095+ struct dma_attrs *attrs);
87096
87097 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
87098 unsigned long offset, size_t size,
87099diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
87100index 76d1e38..200776e 100644
87101--- a/include/linux/syscalls.h
87102+++ b/include/linux/syscalls.h
87103@@ -102,7 +102,12 @@ union bpf_attr;
87104 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
87105 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
87106 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
87107-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
87108+#define __SC_LONG(t, a) __typeof__( \
87109+ __builtin_choose_expr( \
87110+ sizeof(t) > sizeof(int), \
87111+ (t) 0, \
87112+ __builtin_choose_expr(__type_is_unsigned(t), 0UL, 0L) \
87113+ )) a
87114 #define __SC_CAST(t, a) (t) a
87115 #define __SC_ARGS(t, a) a
87116 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
87117@@ -384,11 +389,11 @@ asmlinkage long sys_sync(void);
87118 asmlinkage long sys_fsync(unsigned int fd);
87119 asmlinkage long sys_fdatasync(unsigned int fd);
87120 asmlinkage long sys_bdflush(int func, long data);
87121-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
87122- char __user *type, unsigned long flags,
87123+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
87124+ const char __user *type, unsigned long flags,
87125 void __user *data);
87126-asmlinkage long sys_umount(char __user *name, int flags);
87127-asmlinkage long sys_oldumount(char __user *name);
87128+asmlinkage long sys_umount(const char __user *name, int flags);
87129+asmlinkage long sys_oldumount(const char __user *name);
87130 asmlinkage long sys_truncate(const char __user *path, long length);
87131 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
87132 asmlinkage long sys_stat(const char __user *filename,
87133@@ -604,7 +609,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
87134 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
87135 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
87136 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
87137- struct sockaddr __user *, int);
87138+ struct sockaddr __user *, int) __intentional_overflow(0);
87139 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
87140 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
87141 unsigned int vlen, unsigned flags);
87142@@ -663,10 +668,10 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
87143
87144 asmlinkage long sys_semget(key_t key, int nsems, int semflg);
87145 asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
87146- unsigned nsops);
87147+ long nsops);
87148 asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
87149 asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops,
87150- unsigned nsops,
87151+ long nsops,
87152 const struct timespec __user *timeout);
87153 asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
87154 asmlinkage long sys_shmget(key_t key, size_t size, int flag);
87155diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
87156index 27b3b0b..e093dd9 100644
87157--- a/include/linux/syscore_ops.h
87158+++ b/include/linux/syscore_ops.h
87159@@ -16,7 +16,7 @@ struct syscore_ops {
87160 int (*suspend)(void);
87161 void (*resume)(void);
87162 void (*shutdown)(void);
87163-};
87164+} __do_const;
87165
87166 extern void register_syscore_ops(struct syscore_ops *ops);
87167 extern void unregister_syscore_ops(struct syscore_ops *ops);
87168diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
87169index b7361f8..341a15a 100644
87170--- a/include/linux/sysctl.h
87171+++ b/include/linux/sysctl.h
87172@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
87173
87174 extern int proc_dostring(struct ctl_table *, int,
87175 void __user *, size_t *, loff_t *);
87176+extern int proc_dostring_modpriv(struct ctl_table *, int,
87177+ void __user *, size_t *, loff_t *);
87178 extern int proc_dointvec(struct ctl_table *, int,
87179 void __user *, size_t *, loff_t *);
87180 extern int proc_dointvec_minmax(struct ctl_table *, int,
87181@@ -113,7 +115,8 @@ struct ctl_table
87182 struct ctl_table_poll *poll;
87183 void *extra1;
87184 void *extra2;
87185-};
87186+} __do_const __randomize_layout;
87187+typedef struct ctl_table __no_const ctl_table_no_const;
87188
87189 struct ctl_node {
87190 struct rb_node node;
87191diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
87192index ddad161..a3efd26 100644
87193--- a/include/linux/sysfs.h
87194+++ b/include/linux/sysfs.h
87195@@ -34,7 +34,8 @@ struct attribute {
87196 struct lock_class_key *key;
87197 struct lock_class_key skey;
87198 #endif
87199-};
87200+} __do_const;
87201+typedef struct attribute __no_const attribute_no_const;
87202
87203 /**
87204 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
87205@@ -63,7 +64,8 @@ struct attribute_group {
87206 struct attribute *, int);
87207 struct attribute **attrs;
87208 struct bin_attribute **bin_attrs;
87209-};
87210+} __do_const;
87211+typedef struct attribute_group __no_const attribute_group_no_const;
87212
87213 /**
87214 * Use these macros to make defining attributes easier. See include/linux/device.h
87215@@ -137,7 +139,8 @@ struct bin_attribute {
87216 char *, loff_t, size_t);
87217 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
87218 struct vm_area_struct *vma);
87219-};
87220+} __do_const;
87221+typedef struct bin_attribute __no_const bin_attribute_no_const;
87222
87223 /**
87224 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
87225diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
87226index 387fa7d..3fcde6b 100644
87227--- a/include/linux/sysrq.h
87228+++ b/include/linux/sysrq.h
87229@@ -16,6 +16,7 @@
87230
87231 #include <linux/errno.h>
87232 #include <linux/types.h>
87233+#include <linux/compiler.h>
87234
87235 /* Possible values of bitmask for enabling sysrq functions */
87236 /* 0x0001 is reserved for enable everything */
87237@@ -33,7 +34,7 @@ struct sysrq_key_op {
87238 char *help_msg;
87239 char *action_msg;
87240 int enable_mask;
87241-};
87242+} __do_const;
87243
87244 #ifdef CONFIG_MAGIC_SYSRQ
87245
87246diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
87247index ff307b5..f1a4468 100644
87248--- a/include/linux/thread_info.h
87249+++ b/include/linux/thread_info.h
87250@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
87251 #error "no set_restore_sigmask() provided and default one won't work"
87252 #endif
87253
87254+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
87255+
87256+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
87257+{
87258+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
87259+}
87260+
87261 #endif /* __KERNEL__ */
87262
87263 #endif /* _LINUX_THREAD_INFO_H */
87264diff --git a/include/linux/tty.h b/include/linux/tty.h
87265index 358a337..8829c1f 100644
87266--- a/include/linux/tty.h
87267+++ b/include/linux/tty.h
87268@@ -225,7 +225,7 @@ struct tty_port {
87269 const struct tty_port_operations *ops; /* Port operations */
87270 spinlock_t lock; /* Lock protecting tty field */
87271 int blocked_open; /* Waiting to open */
87272- int count; /* Usage count */
87273+ atomic_t count; /* Usage count */
87274 wait_queue_head_t open_wait; /* Open waiters */
87275 wait_queue_head_t close_wait; /* Close waiters */
87276 wait_queue_head_t delta_msr_wait; /* Modem status change */
87277@@ -313,7 +313,7 @@ struct tty_struct {
87278 /* If the tty has a pending do_SAK, queue it here - akpm */
87279 struct work_struct SAK_work;
87280 struct tty_port *port;
87281-};
87282+} __randomize_layout;
87283
87284 /* Each of a tty's open files has private_data pointing to tty_file_private */
87285 struct tty_file_private {
87286@@ -572,7 +572,7 @@ extern int tty_port_open(struct tty_port *port,
87287 struct tty_struct *tty, struct file *filp);
87288 static inline int tty_port_users(struct tty_port *port)
87289 {
87290- return port->count + port->blocked_open;
87291+ return atomic_read(&port->count) + port->blocked_open;
87292 }
87293
87294 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
87295diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
87296index 92e337c..f46757b 100644
87297--- a/include/linux/tty_driver.h
87298+++ b/include/linux/tty_driver.h
87299@@ -291,7 +291,7 @@ struct tty_operations {
87300 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
87301 #endif
87302 const struct file_operations *proc_fops;
87303-};
87304+} __do_const __randomize_layout;
87305
87306 struct tty_driver {
87307 int magic; /* magic number for this structure */
87308@@ -325,7 +325,7 @@ struct tty_driver {
87309
87310 const struct tty_operations *ops;
87311 struct list_head tty_drivers;
87312-};
87313+} __randomize_layout;
87314
87315 extern struct list_head tty_drivers;
87316
87317diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
87318index 00c9d68..bc0188b 100644
87319--- a/include/linux/tty_ldisc.h
87320+++ b/include/linux/tty_ldisc.h
87321@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
87322
87323 struct module *owner;
87324
87325- int refcount;
87326+ atomic_t refcount;
87327 };
87328
87329 struct tty_ldisc {
87330diff --git a/include/linux/types.h b/include/linux/types.h
87331index 6747247..fc7ec8b 100644
87332--- a/include/linux/types.h
87333+++ b/include/linux/types.h
87334@@ -174,10 +174,26 @@ typedef struct {
87335 int counter;
87336 } atomic_t;
87337
87338+#ifdef CONFIG_PAX_REFCOUNT
87339+typedef struct {
87340+ int counter;
87341+} atomic_unchecked_t;
87342+#else
87343+typedef atomic_t atomic_unchecked_t;
87344+#endif
87345+
87346 #ifdef CONFIG_64BIT
87347 typedef struct {
87348 long counter;
87349 } atomic64_t;
87350+
87351+#ifdef CONFIG_PAX_REFCOUNT
87352+typedef struct {
87353+ long counter;
87354+} atomic64_unchecked_t;
87355+#else
87356+typedef atomic64_t atomic64_unchecked_t;
87357+#endif
87358 #endif
87359
87360 struct list_head {
87361diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
87362index ecd3319..8a36ded 100644
87363--- a/include/linux/uaccess.h
87364+++ b/include/linux/uaccess.h
87365@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
87366 long ret; \
87367 mm_segment_t old_fs = get_fs(); \
87368 \
87369- set_fs(KERNEL_DS); \
87370 pagefault_disable(); \
87371- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
87372- pagefault_enable(); \
87373+ set_fs(KERNEL_DS); \
87374+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
87375 set_fs(old_fs); \
87376+ pagefault_enable(); \
87377 ret; \
87378 })
87379
87380diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
87381index 2d1f9b6..d7a9fce 100644
87382--- a/include/linux/uidgid.h
87383+++ b/include/linux/uidgid.h
87384@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
87385
87386 #endif /* CONFIG_USER_NS */
87387
87388+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
87389+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
87390+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
87391+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
87392+
87393 #endif /* _LINUX_UIDGID_H */
87394diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
87395index 32c0e83..671eb35 100644
87396--- a/include/linux/uio_driver.h
87397+++ b/include/linux/uio_driver.h
87398@@ -67,7 +67,7 @@ struct uio_device {
87399 struct module *owner;
87400 struct device *dev;
87401 int minor;
87402- atomic_t event;
87403+ atomic_unchecked_t event;
87404 struct fasync_struct *async_queue;
87405 wait_queue_head_t wait;
87406 struct uio_info *info;
87407diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
87408index 99c1b4d..562e6f3 100644
87409--- a/include/linux/unaligned/access_ok.h
87410+++ b/include/linux/unaligned/access_ok.h
87411@@ -4,34 +4,34 @@
87412 #include <linux/kernel.h>
87413 #include <asm/byteorder.h>
87414
87415-static inline u16 get_unaligned_le16(const void *p)
87416+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
87417 {
87418- return le16_to_cpup((__le16 *)p);
87419+ return le16_to_cpup((const __le16 *)p);
87420 }
87421
87422-static inline u32 get_unaligned_le32(const void *p)
87423+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
87424 {
87425- return le32_to_cpup((__le32 *)p);
87426+ return le32_to_cpup((const __le32 *)p);
87427 }
87428
87429-static inline u64 get_unaligned_le64(const void *p)
87430+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
87431 {
87432- return le64_to_cpup((__le64 *)p);
87433+ return le64_to_cpup((const __le64 *)p);
87434 }
87435
87436-static inline u16 get_unaligned_be16(const void *p)
87437+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
87438 {
87439- return be16_to_cpup((__be16 *)p);
87440+ return be16_to_cpup((const __be16 *)p);
87441 }
87442
87443-static inline u32 get_unaligned_be32(const void *p)
87444+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
87445 {
87446- return be32_to_cpup((__be32 *)p);
87447+ return be32_to_cpup((const __be32 *)p);
87448 }
87449
87450-static inline u64 get_unaligned_be64(const void *p)
87451+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
87452 {
87453- return be64_to_cpup((__be64 *)p);
87454+ return be64_to_cpup((const __be64 *)p);
87455 }
87456
87457 static inline void put_unaligned_le16(u16 val, void *p)
87458diff --git a/include/linux/usb.h b/include/linux/usb.h
87459index 447fe29..9fc875f 100644
87460--- a/include/linux/usb.h
87461+++ b/include/linux/usb.h
87462@@ -592,7 +592,7 @@ struct usb_device {
87463 int maxchild;
87464
87465 u32 quirks;
87466- atomic_t urbnum;
87467+ atomic_unchecked_t urbnum;
87468
87469 unsigned long active_duration;
87470
87471@@ -1676,7 +1676,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
87472
87473 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
87474 __u8 request, __u8 requesttype, __u16 value, __u16 index,
87475- void *data, __u16 size, int timeout);
87476+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
87477 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
87478 void *data, int len, int *actual_length, int timeout);
87479 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
87480diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
87481index 9fd9e48..e2c5f35 100644
87482--- a/include/linux/usb/renesas_usbhs.h
87483+++ b/include/linux/usb/renesas_usbhs.h
87484@@ -39,7 +39,7 @@ enum {
87485 */
87486 struct renesas_usbhs_driver_callback {
87487 int (*notify_hotplug)(struct platform_device *pdev);
87488-};
87489+} __no_const;
87490
87491 /*
87492 * callback functions for platform
87493diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
87494index 8297e5b..0dfae27 100644
87495--- a/include/linux/user_namespace.h
87496+++ b/include/linux/user_namespace.h
87497@@ -39,7 +39,7 @@ struct user_namespace {
87498 struct key *persistent_keyring_register;
87499 struct rw_semaphore persistent_keyring_register_sem;
87500 #endif
87501-};
87502+} __randomize_layout;
87503
87504 extern struct user_namespace init_user_ns;
87505
87506diff --git a/include/linux/utsname.h b/include/linux/utsname.h
87507index 5093f58..c103e58 100644
87508--- a/include/linux/utsname.h
87509+++ b/include/linux/utsname.h
87510@@ -25,7 +25,7 @@ struct uts_namespace {
87511 struct new_utsname name;
87512 struct user_namespace *user_ns;
87513 struct ns_common ns;
87514-};
87515+} __randomize_layout;
87516 extern struct uts_namespace init_uts_ns;
87517
87518 #ifdef CONFIG_UTS_NS
87519diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
87520index 6f8fbcf..4efc177 100644
87521--- a/include/linux/vermagic.h
87522+++ b/include/linux/vermagic.h
87523@@ -25,9 +25,42 @@
87524 #define MODULE_ARCH_VERMAGIC ""
87525 #endif
87526
87527+#ifdef CONFIG_PAX_REFCOUNT
87528+#define MODULE_PAX_REFCOUNT "REFCOUNT "
87529+#else
87530+#define MODULE_PAX_REFCOUNT ""
87531+#endif
87532+
87533+#ifdef CONSTIFY_PLUGIN
87534+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
87535+#else
87536+#define MODULE_CONSTIFY_PLUGIN ""
87537+#endif
87538+
87539+#ifdef STACKLEAK_PLUGIN
87540+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
87541+#else
87542+#define MODULE_STACKLEAK_PLUGIN ""
87543+#endif
87544+
87545+#ifdef RANDSTRUCT_PLUGIN
87546+#include <generated/randomize_layout_hash.h>
87547+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
87548+#else
87549+#define MODULE_RANDSTRUCT_PLUGIN
87550+#endif
87551+
87552+#ifdef CONFIG_GRKERNSEC
87553+#define MODULE_GRSEC "GRSEC "
87554+#else
87555+#define MODULE_GRSEC ""
87556+#endif
87557+
87558 #define VERMAGIC_STRING \
87559 UTS_RELEASE " " \
87560 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
87561 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
87562- MODULE_ARCH_VERMAGIC
87563+ MODULE_ARCH_VERMAGIC \
87564+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
87565+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
87566
87567diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
87568index b483abd..af305ad 100644
87569--- a/include/linux/vga_switcheroo.h
87570+++ b/include/linux/vga_switcheroo.h
87571@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
87572
87573 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
87574
87575-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
87576+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
87577 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
87578-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
87579+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
87580 #else
87581
87582 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
87583@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
87584
87585 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
87586
87587-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
87588+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
87589 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
87590-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
87591+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
87592
87593 #endif
87594 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
87595diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
87596index 0ec5983..cc61051 100644
87597--- a/include/linux/vmalloc.h
87598+++ b/include/linux/vmalloc.h
87599@@ -18,6 +18,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
87600 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
87601 #define VM_NO_GUARD 0x00000040 /* don't add guard page */
87602 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
87603+
87604+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
87605+#define VM_KERNEXEC 0x00000100 /* allocate from executable kernel memory range */
87606+#endif
87607+
87608 /* bits [20..32] reserved for arch specific ioremap internals */
87609
87610 /*
87611@@ -86,6 +91,10 @@ extern void *vmap(struct page **pages, unsigned int count,
87612 unsigned long flags, pgprot_t prot);
87613 extern void vunmap(const void *addr);
87614
87615+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
87616+extern void unmap_process_stacks(struct task_struct *task);
87617+#endif
87618+
87619 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
87620 unsigned long uaddr, void *kaddr,
87621 unsigned long size);
87622@@ -150,7 +159,7 @@ extern void free_vm_area(struct vm_struct *area);
87623
87624 /* for /dev/kmem */
87625 extern long vread(char *buf, char *addr, unsigned long count);
87626-extern long vwrite(char *buf, char *addr, unsigned long count);
87627+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
87628
87629 /*
87630 * Internals. Dont't use..
87631diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
87632index 82e7db7..f8ce3d0 100644
87633--- a/include/linux/vmstat.h
87634+++ b/include/linux/vmstat.h
87635@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
87636 /*
87637 * Zone based page accounting with per cpu differentials.
87638 */
87639-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87640+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87641
87642 static inline void zone_page_state_add(long x, struct zone *zone,
87643 enum zone_stat_item item)
87644 {
87645- atomic_long_add(x, &zone->vm_stat[item]);
87646- atomic_long_add(x, &vm_stat[item]);
87647+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
87648+ atomic_long_add_unchecked(x, &vm_stat[item]);
87649 }
87650
87651-static inline unsigned long global_page_state(enum zone_stat_item item)
87652+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
87653 {
87654- long x = atomic_long_read(&vm_stat[item]);
87655+ long x = atomic_long_read_unchecked(&vm_stat[item]);
87656 #ifdef CONFIG_SMP
87657 if (x < 0)
87658 x = 0;
87659@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
87660 return x;
87661 }
87662
87663-static inline unsigned long zone_page_state(struct zone *zone,
87664+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
87665 enum zone_stat_item item)
87666 {
87667- long x = atomic_long_read(&zone->vm_stat[item]);
87668+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
87669 #ifdef CONFIG_SMP
87670 if (x < 0)
87671 x = 0;
87672@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
87673 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
87674 enum zone_stat_item item)
87675 {
87676- long x = atomic_long_read(&zone->vm_stat[item]);
87677+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
87678
87679 #ifdef CONFIG_SMP
87680 int cpu;
87681@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
87682
87683 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
87684 {
87685- atomic_long_inc(&zone->vm_stat[item]);
87686- atomic_long_inc(&vm_stat[item]);
87687+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
87688+ atomic_long_inc_unchecked(&vm_stat[item]);
87689 }
87690
87691 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
87692 {
87693- atomic_long_dec(&zone->vm_stat[item]);
87694- atomic_long_dec(&vm_stat[item]);
87695+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
87696+ atomic_long_dec_unchecked(&vm_stat[item]);
87697 }
87698
87699 static inline void __inc_zone_page_state(struct page *page,
87700diff --git a/include/linux/xattr.h b/include/linux/xattr.h
87701index 91b0a68..0e9adf6 100644
87702--- a/include/linux/xattr.h
87703+++ b/include/linux/xattr.h
87704@@ -28,7 +28,7 @@ struct xattr_handler {
87705 size_t size, int handler_flags);
87706 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
87707 size_t size, int flags, int handler_flags);
87708-};
87709+} __do_const;
87710
87711 struct xattr {
87712 const char *name;
87713@@ -37,6 +37,9 @@ struct xattr {
87714 };
87715
87716 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
87717+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
87718+ssize_t pax_getxattr(struct dentry *, void *, size_t);
87719+#endif
87720 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
87721 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
87722 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
87723diff --git a/include/linux/zlib.h b/include/linux/zlib.h
87724index 92dbbd3..13ab0b3 100644
87725--- a/include/linux/zlib.h
87726+++ b/include/linux/zlib.h
87727@@ -31,6 +31,7 @@
87728 #define _ZLIB_H
87729
87730 #include <linux/zconf.h>
87731+#include <linux/compiler.h>
87732
87733 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
87734 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
87735@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
87736
87737 /* basic functions */
87738
87739-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
87740+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
87741 /*
87742 Returns the number of bytes that needs to be allocated for a per-
87743 stream workspace with the specified parameters. A pointer to this
87744diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
87745index 3e4fddf..5ec9104 100644
87746--- a/include/media/v4l2-dev.h
87747+++ b/include/media/v4l2-dev.h
87748@@ -75,7 +75,7 @@ struct v4l2_file_operations {
87749 int (*mmap) (struct file *, struct vm_area_struct *);
87750 int (*open) (struct file *);
87751 int (*release) (struct file *);
87752-};
87753+} __do_const;
87754
87755 /*
87756 * Newer version of video_device, handled by videodev2.c
87757diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
87758index ffb69da..040393e 100644
87759--- a/include/media/v4l2-device.h
87760+++ b/include/media/v4l2-device.h
87761@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
87762 this function returns 0. If the name ends with a digit (e.g. cx18),
87763 then the name will be set to cx18-0 since cx180 looks really odd. */
87764 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
87765- atomic_t *instance);
87766+ atomic_unchecked_t *instance);
87767
87768 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
87769 Since the parent disappears this ensures that v4l2_dev doesn't have an
87770diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
87771index 2a25dec..bf6dd8a 100644
87772--- a/include/net/9p/transport.h
87773+++ b/include/net/9p/transport.h
87774@@ -62,7 +62,7 @@ struct p9_trans_module {
87775 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
87776 int (*zc_request)(struct p9_client *, struct p9_req_t *,
87777 char *, char *, int , int, int, int);
87778-};
87779+} __do_const;
87780
87781 void v9fs_register_trans(struct p9_trans_module *m);
87782 void v9fs_unregister_trans(struct p9_trans_module *m);
87783diff --git a/include/net/af_unix.h b/include/net/af_unix.h
87784index a175ba4..196eb8242 100644
87785--- a/include/net/af_unix.h
87786+++ b/include/net/af_unix.h
87787@@ -36,7 +36,7 @@ struct unix_skb_parms {
87788 u32 secid; /* Security ID */
87789 #endif
87790 u32 consumed;
87791-};
87792+} __randomize_layout;
87793
87794 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
87795 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
87796diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
87797index 2239a37..a83461f 100644
87798--- a/include/net/bluetooth/l2cap.h
87799+++ b/include/net/bluetooth/l2cap.h
87800@@ -609,7 +609,7 @@ struct l2cap_ops {
87801 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
87802 unsigned long hdr_len,
87803 unsigned long len, int nb);
87804-};
87805+} __do_const;
87806
87807 struct l2cap_conn {
87808 struct hci_conn *hcon;
87809diff --git a/include/net/bonding.h b/include/net/bonding.h
87810index fda6fee..dbdf83c 100644
87811--- a/include/net/bonding.h
87812+++ b/include/net/bonding.h
87813@@ -665,7 +665,7 @@ extern struct rtnl_link_ops bond_link_ops;
87814
87815 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
87816 {
87817- atomic_long_inc(&dev->tx_dropped);
87818+ atomic_long_inc_unchecked(&dev->tx_dropped);
87819 dev_kfree_skb_any(skb);
87820 }
87821
87822diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
87823index f2ae33d..c457cf0 100644
87824--- a/include/net/caif/cfctrl.h
87825+++ b/include/net/caif/cfctrl.h
87826@@ -52,7 +52,7 @@ struct cfctrl_rsp {
87827 void (*radioset_rsp)(void);
87828 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
87829 struct cflayer *client_layer);
87830-};
87831+} __no_const;
87832
87833 /* Link Setup Parameters for CAIF-Links. */
87834 struct cfctrl_link_param {
87835@@ -101,8 +101,8 @@ struct cfctrl_request_info {
87836 struct cfctrl {
87837 struct cfsrvl serv;
87838 struct cfctrl_rsp res;
87839- atomic_t req_seq_no;
87840- atomic_t rsp_seq_no;
87841+ atomic_unchecked_t req_seq_no;
87842+ atomic_unchecked_t rsp_seq_no;
87843 struct list_head list;
87844 /* Protects from simultaneous access to first_req list */
87845 spinlock_t info_list_lock;
87846diff --git a/include/net/flow.h b/include/net/flow.h
87847index 8109a15..504466d 100644
87848--- a/include/net/flow.h
87849+++ b/include/net/flow.h
87850@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
87851
87852 void flow_cache_flush(struct net *net);
87853 void flow_cache_flush_deferred(struct net *net);
87854-extern atomic_t flow_cache_genid;
87855+extern atomic_unchecked_t flow_cache_genid;
87856
87857 #endif
87858diff --git a/include/net/genetlink.h b/include/net/genetlink.h
87859index 0574abd..0f16881 100644
87860--- a/include/net/genetlink.h
87861+++ b/include/net/genetlink.h
87862@@ -130,7 +130,7 @@ struct genl_ops {
87863 u8 cmd;
87864 u8 internal_flags;
87865 u8 flags;
87866-};
87867+} __do_const;
87868
87869 int __genl_register_family(struct genl_family *family);
87870
87871diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
87872index 0f712c0..cd762c4 100644
87873--- a/include/net/gro_cells.h
87874+++ b/include/net/gro_cells.h
87875@@ -27,7 +27,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
87876 cell = this_cpu_ptr(gcells->cells);
87877
87878 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
87879- atomic_long_inc(&dev->rx_dropped);
87880+ atomic_long_inc_unchecked(&dev->rx_dropped);
87881 kfree_skb(skb);
87882 return;
87883 }
87884diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
87885index 5976bde..3a81660 100644
87886--- a/include/net/inet_connection_sock.h
87887+++ b/include/net/inet_connection_sock.h
87888@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
87889 int (*bind_conflict)(const struct sock *sk,
87890 const struct inet_bind_bucket *tb, bool relax);
87891 void (*mtu_reduced)(struct sock *sk);
87892-};
87893+} __do_const;
87894
87895 /** inet_connection_sock - INET connection oriented sock
87896 *
87897diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
87898index 80479ab..0c3f647 100644
87899--- a/include/net/inetpeer.h
87900+++ b/include/net/inetpeer.h
87901@@ -47,7 +47,7 @@ struct inet_peer {
87902 */
87903 union {
87904 struct {
87905- atomic_t rid; /* Frag reception counter */
87906+ atomic_unchecked_t rid; /* Frag reception counter */
87907 };
87908 struct rcu_head rcu;
87909 struct inet_peer *gc_next;
87910diff --git a/include/net/ip.h b/include/net/ip.h
87911index 6cc1eaf..14059b0 100644
87912--- a/include/net/ip.h
87913+++ b/include/net/ip.h
87914@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
87915 }
87916 }
87917
87918-u32 ip_idents_reserve(u32 hash, int segs);
87919+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
87920 void __ip_select_ident(struct iphdr *iph, int segs);
87921
87922 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
87923diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
87924index 5bd120e4..03fb812 100644
87925--- a/include/net/ip_fib.h
87926+++ b/include/net/ip_fib.h
87927@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
87928
87929 #define FIB_RES_SADDR(net, res) \
87930 ((FIB_RES_NH(res).nh_saddr_genid == \
87931- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
87932+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
87933 FIB_RES_NH(res).nh_saddr : \
87934 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
87935 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
87936diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
87937index 615b20b..fd4cbd8 100644
87938--- a/include/net/ip_vs.h
87939+++ b/include/net/ip_vs.h
87940@@ -534,7 +534,7 @@ struct ip_vs_conn {
87941 struct ip_vs_conn *control; /* Master control connection */
87942 atomic_t n_control; /* Number of controlled ones */
87943 struct ip_vs_dest *dest; /* real server */
87944- atomic_t in_pkts; /* incoming packet counter */
87945+ atomic_unchecked_t in_pkts; /* incoming packet counter */
87946
87947 /* Packet transmitter for different forwarding methods. If it
87948 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
87949@@ -682,7 +682,7 @@ struct ip_vs_dest {
87950 __be16 port; /* port number of the server */
87951 union nf_inet_addr addr; /* IP address of the server */
87952 volatile unsigned int flags; /* dest status flags */
87953- atomic_t conn_flags; /* flags to copy to conn */
87954+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
87955 atomic_t weight; /* server weight */
87956
87957 atomic_t refcnt; /* reference counter */
87958@@ -928,11 +928,11 @@ struct netns_ipvs {
87959 /* ip_vs_lblc */
87960 int sysctl_lblc_expiration;
87961 struct ctl_table_header *lblc_ctl_header;
87962- struct ctl_table *lblc_ctl_table;
87963+ ctl_table_no_const *lblc_ctl_table;
87964 /* ip_vs_lblcr */
87965 int sysctl_lblcr_expiration;
87966 struct ctl_table_header *lblcr_ctl_header;
87967- struct ctl_table *lblcr_ctl_table;
87968+ ctl_table_no_const *lblcr_ctl_table;
87969 /* ip_vs_est */
87970 struct list_head est_list; /* estimator list */
87971 spinlock_t est_lock;
87972diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
87973index 8d4f588..2e37ad2 100644
87974--- a/include/net/irda/ircomm_tty.h
87975+++ b/include/net/irda/ircomm_tty.h
87976@@ -33,6 +33,7 @@
87977 #include <linux/termios.h>
87978 #include <linux/timer.h>
87979 #include <linux/tty.h> /* struct tty_struct */
87980+#include <asm/local.h>
87981
87982 #include <net/irda/irias_object.h>
87983 #include <net/irda/ircomm_core.h>
87984diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
87985index 714cc9a..ea05f3e 100644
87986--- a/include/net/iucv/af_iucv.h
87987+++ b/include/net/iucv/af_iucv.h
87988@@ -149,7 +149,7 @@ struct iucv_skb_cb {
87989 struct iucv_sock_list {
87990 struct hlist_head head;
87991 rwlock_t lock;
87992- atomic_t autobind_name;
87993+ atomic_unchecked_t autobind_name;
87994 };
87995
87996 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
87997diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
87998index f3be818..bf46196 100644
87999--- a/include/net/llc_c_ac.h
88000+++ b/include/net/llc_c_ac.h
88001@@ -87,7 +87,7 @@
88002 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
88003 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
88004
88005-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
88006+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
88007
88008 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
88009 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
88010diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
88011index 3948cf1..83b28c4 100644
88012--- a/include/net/llc_c_ev.h
88013+++ b/include/net/llc_c_ev.h
88014@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
88015 return (struct llc_conn_state_ev *)skb->cb;
88016 }
88017
88018-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
88019-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
88020+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
88021+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
88022
88023 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
88024 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
88025diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
88026index 48f3f89..0e92c50 100644
88027--- a/include/net/llc_c_st.h
88028+++ b/include/net/llc_c_st.h
88029@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
88030 u8 next_state;
88031 const llc_conn_ev_qfyr_t *ev_qualifiers;
88032 const llc_conn_action_t *ev_actions;
88033-};
88034+} __do_const;
88035
88036 struct llc_conn_state {
88037 u8 current_state;
88038diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
88039index a61b98c..aade1eb 100644
88040--- a/include/net/llc_s_ac.h
88041+++ b/include/net/llc_s_ac.h
88042@@ -23,7 +23,7 @@
88043 #define SAP_ACT_TEST_IND 9
88044
88045 /* All action functions must look like this */
88046-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
88047+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
88048
88049 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
88050 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
88051diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
88052index c4359e2..76dbc4a 100644
88053--- a/include/net/llc_s_st.h
88054+++ b/include/net/llc_s_st.h
88055@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
88056 llc_sap_ev_t ev;
88057 u8 next_state;
88058 const llc_sap_action_t *ev_actions;
88059-};
88060+} __do_const;
88061
88062 struct llc_sap_state {
88063 u8 curr_state;
88064diff --git a/include/net/mac80211.h b/include/net/mac80211.h
88065index d52914b..2b13cec 100644
88066--- a/include/net/mac80211.h
88067+++ b/include/net/mac80211.h
88068@@ -4915,7 +4915,7 @@ struct rate_control_ops {
88069 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
88070
88071 u32 (*get_expected_throughput)(void *priv_sta);
88072-};
88073+} __do_const;
88074
88075 static inline int rate_supported(struct ieee80211_sta *sta,
88076 enum ieee80211_band band,
88077diff --git a/include/net/neighbour.h b/include/net/neighbour.h
88078index 76f7084..8f36e39 100644
88079--- a/include/net/neighbour.h
88080+++ b/include/net/neighbour.h
88081@@ -163,7 +163,7 @@ struct neigh_ops {
88082 void (*error_report)(struct neighbour *, struct sk_buff *);
88083 int (*output)(struct neighbour *, struct sk_buff *);
88084 int (*connected_output)(struct neighbour *, struct sk_buff *);
88085-};
88086+} __do_const;
88087
88088 struct pneigh_entry {
88089 struct pneigh_entry *next;
88090@@ -217,7 +217,7 @@ struct neigh_table {
88091 struct neigh_statistics __percpu *stats;
88092 struct neigh_hash_table __rcu *nht;
88093 struct pneigh_entry **phash_buckets;
88094-};
88095+} __randomize_layout;
88096
88097 enum {
88098 NEIGH_ARP_TABLE = 0,
88099diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
88100index 36faf49..6927638 100644
88101--- a/include/net/net_namespace.h
88102+++ b/include/net/net_namespace.h
88103@@ -131,8 +131,8 @@ struct net {
88104 struct netns_ipvs *ipvs;
88105 #endif
88106 struct sock *diag_nlsk;
88107- atomic_t fnhe_genid;
88108-};
88109+ atomic_unchecked_t fnhe_genid;
88110+} __randomize_layout;
88111
88112 #include <linux/seq_file_net.h>
88113
88114@@ -288,7 +288,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
88115 #define __net_init __init
88116 #define __net_exit __exit_refok
88117 #define __net_initdata __initdata
88118+#ifdef CONSTIFY_PLUGIN
88119 #define __net_initconst __initconst
88120+#else
88121+#define __net_initconst __initdata
88122+#endif
88123 #endif
88124
88125 int peernet2id(struct net *net, struct net *peer);
88126@@ -301,7 +305,7 @@ struct pernet_operations {
88127 void (*exit_batch)(struct list_head *net_exit_list);
88128 int *id;
88129 size_t size;
88130-};
88131+} __do_const;
88132
88133 /*
88134 * Use these carefully. If you implement a network device and it
88135@@ -349,12 +353,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
88136
88137 static inline int rt_genid_ipv4(struct net *net)
88138 {
88139- return atomic_read(&net->ipv4.rt_genid);
88140+ return atomic_read_unchecked(&net->ipv4.rt_genid);
88141 }
88142
88143 static inline void rt_genid_bump_ipv4(struct net *net)
88144 {
88145- atomic_inc(&net->ipv4.rt_genid);
88146+ atomic_inc_unchecked(&net->ipv4.rt_genid);
88147 }
88148
88149 extern void (*__fib6_flush_trees)(struct net *net);
88150@@ -381,12 +385,12 @@ static inline void rt_genid_bump_all(struct net *net)
88151
88152 static inline int fnhe_genid(struct net *net)
88153 {
88154- return atomic_read(&net->fnhe_genid);
88155+ return atomic_read_unchecked(&net->fnhe_genid);
88156 }
88157
88158 static inline void fnhe_genid_bump(struct net *net)
88159 {
88160- atomic_inc(&net->fnhe_genid);
88161+ atomic_inc_unchecked(&net->fnhe_genid);
88162 }
88163
88164 #endif /* __NET_NET_NAMESPACE_H */
88165diff --git a/include/net/netlink.h b/include/net/netlink.h
88166index e010ee8..405b9f4 100644
88167--- a/include/net/netlink.h
88168+++ b/include/net/netlink.h
88169@@ -518,7 +518,7 @@ static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
88170 {
88171 if (mark) {
88172 WARN_ON((unsigned char *) mark < skb->data);
88173- skb_trim(skb, (unsigned char *) mark - skb->data);
88174+ skb_trim(skb, (const unsigned char *) mark - skb->data);
88175 }
88176 }
88177
88178diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
88179index 29d6a94..235d3d84 100644
88180--- a/include/net/netns/conntrack.h
88181+++ b/include/net/netns/conntrack.h
88182@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
88183 struct nf_proto_net {
88184 #ifdef CONFIG_SYSCTL
88185 struct ctl_table_header *ctl_table_header;
88186- struct ctl_table *ctl_table;
88187+ ctl_table_no_const *ctl_table;
88188 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
88189 struct ctl_table_header *ctl_compat_header;
88190- struct ctl_table *ctl_compat_table;
88191+ ctl_table_no_const *ctl_compat_table;
88192 #endif
88193 #endif
88194 unsigned int users;
88195@@ -60,7 +60,7 @@ struct nf_ip_net {
88196 struct nf_icmp_net icmpv6;
88197 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
88198 struct ctl_table_header *ctl_table_header;
88199- struct ctl_table *ctl_table;
88200+ ctl_table_no_const *ctl_table;
88201 #endif
88202 };
88203
88204diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
88205index dbe2254..ed0c151 100644
88206--- a/include/net/netns/ipv4.h
88207+++ b/include/net/netns/ipv4.h
88208@@ -87,7 +87,7 @@ struct netns_ipv4 {
88209
88210 struct ping_group_range ping_group_range;
88211
88212- atomic_t dev_addr_genid;
88213+ atomic_unchecked_t dev_addr_genid;
88214
88215 #ifdef CONFIG_SYSCTL
88216 unsigned long *sysctl_local_reserved_ports;
88217@@ -101,6 +101,6 @@ struct netns_ipv4 {
88218 struct fib_rules_ops *mr_rules_ops;
88219 #endif
88220 #endif
88221- atomic_t rt_genid;
88222+ atomic_unchecked_t rt_genid;
88223 };
88224 #endif
88225diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
88226index 69ae41f..4f94868 100644
88227--- a/include/net/netns/ipv6.h
88228+++ b/include/net/netns/ipv6.h
88229@@ -75,8 +75,8 @@ struct netns_ipv6 {
88230 struct fib_rules_ops *mr6_rules_ops;
88231 #endif
88232 #endif
88233- atomic_t dev_addr_genid;
88234- atomic_t fib6_sernum;
88235+ atomic_unchecked_t dev_addr_genid;
88236+ atomic_unchecked_t fib6_sernum;
88237 };
88238
88239 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
88240diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
88241index 730d82a..045f2c4 100644
88242--- a/include/net/netns/xfrm.h
88243+++ b/include/net/netns/xfrm.h
88244@@ -78,7 +78,7 @@ struct netns_xfrm {
88245
88246 /* flow cache part */
88247 struct flow_cache flow_cache_global;
88248- atomic_t flow_cache_genid;
88249+ atomic_unchecked_t flow_cache_genid;
88250 struct list_head flow_cache_gc_list;
88251 spinlock_t flow_cache_gc_lock;
88252 struct work_struct flow_cache_gc_work;
88253diff --git a/include/net/ping.h b/include/net/ping.h
88254index cc16d41..664f40b 100644
88255--- a/include/net/ping.h
88256+++ b/include/net/ping.h
88257@@ -54,7 +54,7 @@ struct ping_iter_state {
88258
88259 extern struct proto ping_prot;
88260 #if IS_ENABLED(CONFIG_IPV6)
88261-extern struct pingv6_ops pingv6_ops;
88262+extern struct pingv6_ops *pingv6_ops;
88263 #endif
88264
88265 struct pingfakehdr {
88266diff --git a/include/net/protocol.h b/include/net/protocol.h
88267index d6fcc1f..ca277058 100644
88268--- a/include/net/protocol.h
88269+++ b/include/net/protocol.h
88270@@ -49,7 +49,7 @@ struct net_protocol {
88271 * socket lookup?
88272 */
88273 icmp_strict_tag_validation:1;
88274-};
88275+} __do_const;
88276
88277 #if IS_ENABLED(CONFIG_IPV6)
88278 struct inet6_protocol {
88279@@ -62,7 +62,7 @@ struct inet6_protocol {
88280 u8 type, u8 code, int offset,
88281 __be32 info);
88282 unsigned int flags; /* INET6_PROTO_xxx */
88283-};
88284+} __do_const;
88285
88286 #define INET6_PROTO_NOPOLICY 0x1
88287 #define INET6_PROTO_FINAL 0x2
88288diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
88289index 6c6d539..af70817 100644
88290--- a/include/net/rtnetlink.h
88291+++ b/include/net/rtnetlink.h
88292@@ -95,7 +95,7 @@ struct rtnl_link_ops {
88293 const struct net_device *dev,
88294 const struct net_device *slave_dev);
88295 struct net *(*get_link_net)(const struct net_device *dev);
88296-};
88297+} __do_const;
88298
88299 int __rtnl_link_register(struct rtnl_link_ops *ops);
88300 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
88301diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
88302index 4a5b9a3..ca27d73 100644
88303--- a/include/net/sctp/checksum.h
88304+++ b/include/net/sctp/checksum.h
88305@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
88306 unsigned int offset)
88307 {
88308 struct sctphdr *sh = sctp_hdr(skb);
88309- __le32 ret, old = sh->checksum;
88310- const struct skb_checksum_ops ops = {
88311+ __le32 ret, old = sh->checksum;
88312+ static const struct skb_checksum_ops ops = {
88313 .update = sctp_csum_update,
88314 .combine = sctp_csum_combine,
88315 };
88316diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
88317index 487ef34..d457f98 100644
88318--- a/include/net/sctp/sm.h
88319+++ b/include/net/sctp/sm.h
88320@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
88321 typedef struct {
88322 sctp_state_fn_t *fn;
88323 const char *name;
88324-} sctp_sm_table_entry_t;
88325+} __do_const sctp_sm_table_entry_t;
88326
88327 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
88328 * currently in use.
88329@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
88330 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
88331
88332 /* Extern declarations for major data structures. */
88333-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
88334+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
88335
88336
88337 /* Get the size of a DATA chunk payload. */
88338diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
88339index 2bb2fcf..d17c291 100644
88340--- a/include/net/sctp/structs.h
88341+++ b/include/net/sctp/structs.h
88342@@ -509,7 +509,7 @@ struct sctp_pf {
88343 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
88344 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
88345 struct sctp_af *af;
88346-};
88347+} __do_const;
88348
88349
88350 /* Structure to track chunk fragments that have been acked, but peer
88351diff --git a/include/net/sock.h b/include/net/sock.h
88352index e4079c2..79c5d3a 100644
88353--- a/include/net/sock.h
88354+++ b/include/net/sock.h
88355@@ -362,7 +362,7 @@ struct sock {
88356 unsigned int sk_napi_id;
88357 unsigned int sk_ll_usec;
88358 #endif
88359- atomic_t sk_drops;
88360+ atomic_unchecked_t sk_drops;
88361 int sk_rcvbuf;
88362
88363 struct sk_filter __rcu *sk_filter;
88364@@ -1039,7 +1039,7 @@ struct proto {
88365 void (*destroy_cgroup)(struct mem_cgroup *memcg);
88366 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
88367 #endif
88368-};
88369+} __randomize_layout;
88370
88371 /*
88372 * Bits in struct cg_proto.flags
88373@@ -1212,7 +1212,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
88374 page_counter_uncharge(&prot->memory_allocated, amt);
88375 }
88376
88377-static inline long
88378+static inline long __intentional_overflow(-1)
88379 sk_memory_allocated(const struct sock *sk)
88380 {
88381 struct proto *prot = sk->sk_prot;
88382@@ -1778,7 +1778,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
88383 }
88384
88385 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
88386- struct iov_iter *from, char *to,
88387+ struct iov_iter *from, unsigned char *to,
88388 int copy, int offset)
88389 {
88390 if (skb->ip_summed == CHECKSUM_NONE) {
88391@@ -2025,7 +2025,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
88392 }
88393 }
88394
88395-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
88396+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
88397
88398 /**
88399 * sk_page_frag - return an appropriate page_frag
88400diff --git a/include/net/tcp.h b/include/net/tcp.h
88401index 8d6b983..5813205 100644
88402--- a/include/net/tcp.h
88403+++ b/include/net/tcp.h
88404@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
88405 void tcp_xmit_retransmit_queue(struct sock *);
88406 void tcp_simple_retransmit(struct sock *);
88407 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
88408-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
88409+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
88410
88411 void tcp_send_probe0(struct sock *);
88412 void tcp_send_partial(struct sock *);
88413@@ -694,8 +694,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
88414 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
88415 */
88416 struct tcp_skb_cb {
88417- __u32 seq; /* Starting sequence number */
88418- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
88419+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
88420+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
88421 union {
88422 /* Note : tcp_tw_isn is used in input path only
88423 * (isn chosen by tcp_timewait_state_process())
88424@@ -720,7 +720,7 @@ struct tcp_skb_cb {
88425
88426 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
88427 /* 1 byte hole */
88428- __u32 ack_seq; /* Sequence number ACK'd */
88429+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
88430 union {
88431 struct inet_skb_parm h4;
88432 #if IS_ENABLED(CONFIG_IPV6)
88433diff --git a/include/net/xfrm.h b/include/net/xfrm.h
88434index dc4865e..152ee4c 100644
88435--- a/include/net/xfrm.h
88436+++ b/include/net/xfrm.h
88437@@ -285,7 +285,6 @@ struct xfrm_dst;
88438 struct xfrm_policy_afinfo {
88439 unsigned short family;
88440 struct dst_ops *dst_ops;
88441- void (*garbage_collect)(struct net *net);
88442 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
88443 const xfrm_address_t *saddr,
88444 const xfrm_address_t *daddr);
88445@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
88446 struct net_device *dev,
88447 const struct flowi *fl);
88448 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
88449-};
88450+} __do_const;
88451
88452 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
88453 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
88454@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
88455 int (*transport_finish)(struct sk_buff *skb,
88456 int async);
88457 void (*local_error)(struct sk_buff *skb, u32 mtu);
88458-};
88459+} __do_const;
88460
88461 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
88462 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
88463@@ -437,7 +436,7 @@ struct xfrm_mode {
88464 struct module *owner;
88465 unsigned int encap;
88466 int flags;
88467-};
88468+} __do_const;
88469
88470 /* Flags for xfrm_mode. */
88471 enum {
88472@@ -534,7 +533,7 @@ struct xfrm_policy {
88473 struct timer_list timer;
88474
88475 struct flow_cache_object flo;
88476- atomic_t genid;
88477+ atomic_unchecked_t genid;
88478 u32 priority;
88479 u32 index;
88480 struct xfrm_mark mark;
88481@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
88482 }
88483
88484 void xfrm_garbage_collect(struct net *net);
88485+void xfrm_garbage_collect_deferred(struct net *net);
88486
88487 #else
88488
88489@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
88490 static inline void xfrm_garbage_collect(struct net *net)
88491 {
88492 }
88493+static inline void xfrm_garbage_collect_deferred(struct net *net)
88494+{
88495+}
88496 #endif
88497
88498 static __inline__
88499diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
88500index 1017e0b..227aa4d 100644
88501--- a/include/rdma/iw_cm.h
88502+++ b/include/rdma/iw_cm.h
88503@@ -122,7 +122,7 @@ struct iw_cm_verbs {
88504 int backlog);
88505
88506 int (*destroy_listen)(struct iw_cm_id *cm_id);
88507-};
88508+} __no_const;
88509
88510 /**
88511 * iw_create_cm_id - Create an IW CM identifier.
88512diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
88513index 93d14da..734b3d8 100644
88514--- a/include/scsi/libfc.h
88515+++ b/include/scsi/libfc.h
88516@@ -771,6 +771,7 @@ struct libfc_function_template {
88517 */
88518 void (*disc_stop_final) (struct fc_lport *);
88519 };
88520+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
88521
88522 /**
88523 * struct fc_disc - Discovery context
88524@@ -875,7 +876,7 @@ struct fc_lport {
88525 struct fc_vport *vport;
88526
88527 /* Operational Information */
88528- struct libfc_function_template tt;
88529+ libfc_function_template_no_const tt;
88530 u8 link_up;
88531 u8 qfull;
88532 enum fc_lport_state state;
88533diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
88534index a4c9336..d6f8f34 100644
88535--- a/include/scsi/scsi_device.h
88536+++ b/include/scsi/scsi_device.h
88537@@ -185,9 +185,9 @@ struct scsi_device {
88538 unsigned int max_device_blocked; /* what device_blocked counts down from */
88539 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
88540
88541- atomic_t iorequest_cnt;
88542- atomic_t iodone_cnt;
88543- atomic_t ioerr_cnt;
88544+ atomic_unchecked_t iorequest_cnt;
88545+ atomic_unchecked_t iodone_cnt;
88546+ atomic_unchecked_t ioerr_cnt;
88547
88548 struct device sdev_gendev,
88549 sdev_dev;
88550diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
88551index 007a0bc..7188db8 100644
88552--- a/include/scsi/scsi_transport_fc.h
88553+++ b/include/scsi/scsi_transport_fc.h
88554@@ -756,7 +756,8 @@ struct fc_function_template {
88555 unsigned long show_host_system_hostname:1;
88556
88557 unsigned long disable_target_scan:1;
88558-};
88559+} __do_const;
88560+typedef struct fc_function_template __no_const fc_function_template_no_const;
88561
88562
88563 /**
88564diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
88565index f48089d..73abe48 100644
88566--- a/include/sound/compress_driver.h
88567+++ b/include/sound/compress_driver.h
88568@@ -130,7 +130,7 @@ struct snd_compr_ops {
88569 struct snd_compr_caps *caps);
88570 int (*get_codec_caps) (struct snd_compr_stream *stream,
88571 struct snd_compr_codec_caps *codec);
88572-};
88573+} __no_const;
88574
88575 /**
88576 * struct snd_compr: Compressed device
88577diff --git a/include/sound/soc.h b/include/sound/soc.h
88578index 0d1ade1..34e77d3 100644
88579--- a/include/sound/soc.h
88580+++ b/include/sound/soc.h
88581@@ -856,7 +856,7 @@ struct snd_soc_codec_driver {
88582 enum snd_soc_dapm_type, int);
88583
88584 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
88585-};
88586+} __do_const;
88587
88588 /* SoC platform interface */
88589 struct snd_soc_platform_driver {
88590@@ -883,7 +883,7 @@ struct snd_soc_platform_driver {
88591 const struct snd_compr_ops *compr_ops;
88592
88593 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
88594-};
88595+} __do_const;
88596
88597 struct snd_soc_dai_link_component {
88598 const char *name;
88599diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
88600index 985ca4c..b55b54a 100644
88601--- a/include/target/target_core_base.h
88602+++ b/include/target/target_core_base.h
88603@@ -767,7 +767,7 @@ struct se_device {
88604 atomic_long_t write_bytes;
88605 /* Active commands on this virtual SE device */
88606 atomic_t simple_cmds;
88607- atomic_t dev_ordered_id;
88608+ atomic_unchecked_t dev_ordered_id;
88609 atomic_t dev_ordered_sync;
88610 atomic_t dev_qf_count;
88611 int export_count;
88612diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
88613new file mode 100644
88614index 0000000..fb634b7
88615--- /dev/null
88616+++ b/include/trace/events/fs.h
88617@@ -0,0 +1,53 @@
88618+#undef TRACE_SYSTEM
88619+#define TRACE_SYSTEM fs
88620+
88621+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
88622+#define _TRACE_FS_H
88623+
88624+#include <linux/fs.h>
88625+#include <linux/tracepoint.h>
88626+
88627+TRACE_EVENT(do_sys_open,
88628+
88629+ TP_PROTO(const char *filename, int flags, int mode),
88630+
88631+ TP_ARGS(filename, flags, mode),
88632+
88633+ TP_STRUCT__entry(
88634+ __string( filename, filename )
88635+ __field( int, flags )
88636+ __field( int, mode )
88637+ ),
88638+
88639+ TP_fast_assign(
88640+ __assign_str(filename, filename);
88641+ __entry->flags = flags;
88642+ __entry->mode = mode;
88643+ ),
88644+
88645+ TP_printk("\"%s\" %x %o",
88646+ __get_str(filename), __entry->flags, __entry->mode)
88647+);
88648+
88649+TRACE_EVENT(open_exec,
88650+
88651+ TP_PROTO(const char *filename),
88652+
88653+ TP_ARGS(filename),
88654+
88655+ TP_STRUCT__entry(
88656+ __string( filename, filename )
88657+ ),
88658+
88659+ TP_fast_assign(
88660+ __assign_str(filename, filename);
88661+ ),
88662+
88663+ TP_printk("\"%s\"",
88664+ __get_str(filename))
88665+);
88666+
88667+#endif /* _TRACE_FS_H */
88668+
88669+/* This part must be outside protection */
88670+#include <trace/define_trace.h>
88671diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
88672index 3608beb..df39d8a 100644
88673--- a/include/trace/events/irq.h
88674+++ b/include/trace/events/irq.h
88675@@ -36,7 +36,7 @@ struct softirq_action;
88676 */
88677 TRACE_EVENT(irq_handler_entry,
88678
88679- TP_PROTO(int irq, struct irqaction *action),
88680+ TP_PROTO(int irq, const struct irqaction *action),
88681
88682 TP_ARGS(irq, action),
88683
88684@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
88685 */
88686 TRACE_EVENT(irq_handler_exit,
88687
88688- TP_PROTO(int irq, struct irqaction *action, int ret),
88689+ TP_PROTO(int irq, const struct irqaction *action, int ret),
88690
88691 TP_ARGS(irq, action, ret),
88692
88693diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
88694index 6eed16b..3e05750 100644
88695--- a/include/uapi/drm/i915_drm.h
88696+++ b/include/uapi/drm/i915_drm.h
88697@@ -347,6 +347,7 @@ typedef struct drm_i915_irq_wait {
88698 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
88699 #define I915_PARAM_MMAP_VERSION 30
88700 #define I915_PARAM_HAS_BSD2 31
88701+#define I915_PARAM_HAS_LEGACY_CONTEXT 35
88702
88703 typedef struct drm_i915_getparam {
88704 int param;
88705diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
88706index 7caf44c..23c6f27 100644
88707--- a/include/uapi/linux/a.out.h
88708+++ b/include/uapi/linux/a.out.h
88709@@ -39,6 +39,14 @@ enum machine_type {
88710 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
88711 };
88712
88713+/* Constants for the N_FLAGS field */
88714+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
88715+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
88716+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
88717+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
88718+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
88719+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
88720+
88721 #if !defined (N_MAGIC)
88722 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
88723 #endif
88724diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
88725index 22b6ad3..aeba37e 100644
88726--- a/include/uapi/linux/bcache.h
88727+++ b/include/uapi/linux/bcache.h
88728@@ -5,6 +5,7 @@
88729 * Bcache on disk data structures
88730 */
88731
88732+#include <linux/compiler.h>
88733 #include <asm/types.h>
88734
88735 #define BITMASK(name, type, field, offset, size) \
88736@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
88737 /* Btree keys - all units are in sectors */
88738
88739 struct bkey {
88740- __u64 high;
88741- __u64 low;
88742+ __u64 high __intentional_overflow(-1);
88743+ __u64 low __intentional_overflow(-1);
88744 __u64 ptr[];
88745 };
88746
88747diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
88748index d876736..ccce5c0 100644
88749--- a/include/uapi/linux/byteorder/little_endian.h
88750+++ b/include/uapi/linux/byteorder/little_endian.h
88751@@ -42,51 +42,51 @@
88752
88753 static inline __le64 __cpu_to_le64p(const __u64 *p)
88754 {
88755- return (__force __le64)*p;
88756+ return (__force const __le64)*p;
88757 }
88758-static inline __u64 __le64_to_cpup(const __le64 *p)
88759+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
88760 {
88761- return (__force __u64)*p;
88762+ return (__force const __u64)*p;
88763 }
88764 static inline __le32 __cpu_to_le32p(const __u32 *p)
88765 {
88766- return (__force __le32)*p;
88767+ return (__force const __le32)*p;
88768 }
88769 static inline __u32 __le32_to_cpup(const __le32 *p)
88770 {
88771- return (__force __u32)*p;
88772+ return (__force const __u32)*p;
88773 }
88774 static inline __le16 __cpu_to_le16p(const __u16 *p)
88775 {
88776- return (__force __le16)*p;
88777+ return (__force const __le16)*p;
88778 }
88779 static inline __u16 __le16_to_cpup(const __le16 *p)
88780 {
88781- return (__force __u16)*p;
88782+ return (__force const __u16)*p;
88783 }
88784 static inline __be64 __cpu_to_be64p(const __u64 *p)
88785 {
88786- return (__force __be64)__swab64p(p);
88787+ return (__force const __be64)__swab64p(p);
88788 }
88789 static inline __u64 __be64_to_cpup(const __be64 *p)
88790 {
88791- return __swab64p((__u64 *)p);
88792+ return __swab64p((const __u64 *)p);
88793 }
88794 static inline __be32 __cpu_to_be32p(const __u32 *p)
88795 {
88796- return (__force __be32)__swab32p(p);
88797+ return (__force const __be32)__swab32p(p);
88798 }
88799-static inline __u32 __be32_to_cpup(const __be32 *p)
88800+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
88801 {
88802- return __swab32p((__u32 *)p);
88803+ return __swab32p((const __u32 *)p);
88804 }
88805 static inline __be16 __cpu_to_be16p(const __u16 *p)
88806 {
88807- return (__force __be16)__swab16p(p);
88808+ return (__force const __be16)__swab16p(p);
88809 }
88810 static inline __u16 __be16_to_cpup(const __be16 *p)
88811 {
88812- return __swab16p((__u16 *)p);
88813+ return __swab16p((const __u16 *)p);
88814 }
88815 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
88816 #define __le64_to_cpus(x) do { (void)(x); } while (0)
88817diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
88818index 71e1d0e..6cc9caf 100644
88819--- a/include/uapi/linux/elf.h
88820+++ b/include/uapi/linux/elf.h
88821@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
88822 #define PT_GNU_EH_FRAME 0x6474e550
88823
88824 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
88825+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
88826+
88827+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
88828+
88829+/* Constants for the e_flags field */
88830+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
88831+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
88832+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
88833+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
88834+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
88835+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
88836
88837 /*
88838 * Extended Numbering
88839@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
88840 #define DT_DEBUG 21
88841 #define DT_TEXTREL 22
88842 #define DT_JMPREL 23
88843+#define DT_FLAGS 30
88844+ #define DF_TEXTREL 0x00000004
88845 #define DT_ENCODING 32
88846 #define OLD_DT_LOOS 0x60000000
88847 #define DT_LOOS 0x6000000d
88848@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
88849 #define PF_W 0x2
88850 #define PF_X 0x1
88851
88852+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
88853+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
88854+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
88855+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
88856+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
88857+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
88858+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
88859+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
88860+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
88861+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
88862+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
88863+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
88864+
88865 typedef struct elf32_phdr{
88866 Elf32_Word p_type;
88867 Elf32_Off p_offset;
88868@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
88869 #define EI_OSABI 7
88870 #define EI_PAD 8
88871
88872+#define EI_PAX 14
88873+
88874 #define ELFMAG0 0x7f /* EI_MAG */
88875 #define ELFMAG1 'E'
88876 #define ELFMAG2 'L'
88877diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
88878index aa169c4..6a2771d 100644
88879--- a/include/uapi/linux/personality.h
88880+++ b/include/uapi/linux/personality.h
88881@@ -30,6 +30,7 @@ enum {
88882 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
88883 ADDR_NO_RANDOMIZE | \
88884 ADDR_COMPAT_LAYOUT | \
88885+ ADDR_LIMIT_3GB | \
88886 MMAP_PAGE_ZERO)
88887
88888 /*
88889diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
88890index 7530e74..e714828 100644
88891--- a/include/uapi/linux/screen_info.h
88892+++ b/include/uapi/linux/screen_info.h
88893@@ -43,7 +43,8 @@ struct screen_info {
88894 __u16 pages; /* 0x32 */
88895 __u16 vesa_attributes; /* 0x34 */
88896 __u32 capabilities; /* 0x36 */
88897- __u8 _reserved[6]; /* 0x3a */
88898+ __u16 vesapm_size; /* 0x3a */
88899+ __u8 _reserved[4]; /* 0x3c */
88900 } __attribute__((packed));
88901
88902 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
88903diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
88904index 0e011eb..82681b1 100644
88905--- a/include/uapi/linux/swab.h
88906+++ b/include/uapi/linux/swab.h
88907@@ -43,7 +43,7 @@
88908 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
88909 */
88910
88911-static inline __attribute_const__ __u16 __fswab16(__u16 val)
88912+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
88913 {
88914 #ifdef __HAVE_BUILTIN_BSWAP16__
88915 return __builtin_bswap16(val);
88916@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
88917 #endif
88918 }
88919
88920-static inline __attribute_const__ __u32 __fswab32(__u32 val)
88921+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
88922 {
88923 #ifdef __HAVE_BUILTIN_BSWAP32__
88924 return __builtin_bswap32(val);
88925@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
88926 #endif
88927 }
88928
88929-static inline __attribute_const__ __u64 __fswab64(__u64 val)
88930+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
88931 {
88932 #ifdef __HAVE_BUILTIN_BSWAP64__
88933 return __builtin_bswap64(val);
88934diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
88935index 1590c49..5eab462 100644
88936--- a/include/uapi/linux/xattr.h
88937+++ b/include/uapi/linux/xattr.h
88938@@ -73,5 +73,9 @@
88939 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
88940 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
88941
88942+/* User namespace */
88943+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
88944+#define XATTR_PAX_FLAGS_SUFFIX "flags"
88945+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
88946
88947 #endif /* _UAPI_LINUX_XATTR_H */
88948diff --git a/include/video/udlfb.h b/include/video/udlfb.h
88949index f9466fa..f4e2b81 100644
88950--- a/include/video/udlfb.h
88951+++ b/include/video/udlfb.h
88952@@ -53,10 +53,10 @@ struct dlfb_data {
88953 u32 pseudo_palette[256];
88954 int blank_mode; /*one of FB_BLANK_ */
88955 /* blit-only rendering path metrics, exposed through sysfs */
88956- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
88957- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
88958- atomic_t bytes_sent; /* to usb, after compression including overhead */
88959- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
88960+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
88961+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
88962+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
88963+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
88964 };
88965
88966 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
88967diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
88968index 30f5362..8ed8ac9 100644
88969--- a/include/video/uvesafb.h
88970+++ b/include/video/uvesafb.h
88971@@ -122,6 +122,7 @@ struct uvesafb_par {
88972 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
88973 u8 pmi_setpal; /* PMI for palette changes */
88974 u16 *pmi_base; /* protected mode interface location */
88975+ u8 *pmi_code; /* protected mode code location */
88976 void *pmi_start;
88977 void *pmi_pal;
88978 u8 *vbe_state_orig; /*
88979diff --git a/init/Kconfig b/init/Kconfig
88980index f5dbc6d..8259396 100644
88981--- a/init/Kconfig
88982+++ b/init/Kconfig
88983@@ -1136,6 +1136,7 @@ endif # CGROUPS
88984
88985 config CHECKPOINT_RESTORE
88986 bool "Checkpoint/restore support" if EXPERT
88987+ depends on !GRKERNSEC
88988 default n
88989 help
88990 Enables additional kernel features in a sake of checkpoint/restore.
88991@@ -1646,7 +1647,7 @@ config SLUB_DEBUG
88992
88993 config COMPAT_BRK
88994 bool "Disable heap randomization"
88995- default y
88996+ default n
88997 help
88998 Randomizing heap placement makes heap exploits harder, but it
88999 also breaks ancient binaries (including anything libc5 based).
89000@@ -1977,7 +1978,7 @@ config INIT_ALL_POSSIBLE
89001 config STOP_MACHINE
89002 bool
89003 default y
89004- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
89005+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
89006 help
89007 Need stop_machine() primitive.
89008
89009diff --git a/init/Makefile b/init/Makefile
89010index 7bc47ee..6da2dc7 100644
89011--- a/init/Makefile
89012+++ b/init/Makefile
89013@@ -2,6 +2,9 @@
89014 # Makefile for the linux kernel.
89015 #
89016
89017+ccflags-y := $(GCC_PLUGINS_CFLAGS)
89018+asflags-y := $(GCC_PLUGINS_AFLAGS)
89019+
89020 obj-y := main.o version.o mounts.o
89021 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
89022 obj-y += noinitramfs.o
89023diff --git a/init/do_mounts.c b/init/do_mounts.c
89024index eb41008..f5dbbf9 100644
89025--- a/init/do_mounts.c
89026+++ b/init/do_mounts.c
89027@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
89028 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
89029 {
89030 struct super_block *s;
89031- int err = sys_mount(name, "/root", fs, flags, data);
89032+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
89033 if (err)
89034 return err;
89035
89036- sys_chdir("/root");
89037+ sys_chdir((const char __force_user *)"/root");
89038 s = current->fs->pwd.dentry->d_sb;
89039 ROOT_DEV = s->s_dev;
89040 printk(KERN_INFO
89041@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
89042 va_start(args, fmt);
89043 vsprintf(buf, fmt, args);
89044 va_end(args);
89045- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
89046+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
89047 if (fd >= 0) {
89048 sys_ioctl(fd, FDEJECT, 0);
89049 sys_close(fd);
89050 }
89051 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
89052- fd = sys_open("/dev/console", O_RDWR, 0);
89053+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
89054 if (fd >= 0) {
89055 sys_ioctl(fd, TCGETS, (long)&termios);
89056 termios.c_lflag &= ~ICANON;
89057 sys_ioctl(fd, TCSETSF, (long)&termios);
89058- sys_read(fd, &c, 1);
89059+ sys_read(fd, (char __user *)&c, 1);
89060 termios.c_lflag |= ICANON;
89061 sys_ioctl(fd, TCSETSF, (long)&termios);
89062 sys_close(fd);
89063@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
89064 mount_root();
89065 out:
89066 devtmpfs_mount("dev");
89067- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89068- sys_chroot(".");
89069+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89070+ sys_chroot((const char __force_user *)".");
89071 }
89072
89073 static bool is_tmpfs;
89074diff --git a/init/do_mounts.h b/init/do_mounts.h
89075index f5b978a..69dbfe8 100644
89076--- a/init/do_mounts.h
89077+++ b/init/do_mounts.h
89078@@ -15,15 +15,15 @@ extern int root_mountflags;
89079
89080 static inline int create_dev(char *name, dev_t dev)
89081 {
89082- sys_unlink(name);
89083- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
89084+ sys_unlink((char __force_user *)name);
89085+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
89086 }
89087
89088 #if BITS_PER_LONG == 32
89089 static inline u32 bstat(char *name)
89090 {
89091 struct stat64 stat;
89092- if (sys_stat64(name, &stat) != 0)
89093+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
89094 return 0;
89095 if (!S_ISBLK(stat.st_mode))
89096 return 0;
89097@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
89098 static inline u32 bstat(char *name)
89099 {
89100 struct stat stat;
89101- if (sys_newstat(name, &stat) != 0)
89102+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
89103 return 0;
89104 if (!S_ISBLK(stat.st_mode))
89105 return 0;
89106diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
89107index 3e0878e..8a9d7a0 100644
89108--- a/init/do_mounts_initrd.c
89109+++ b/init/do_mounts_initrd.c
89110@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
89111 {
89112 sys_unshare(CLONE_FS | CLONE_FILES);
89113 /* stdin/stdout/stderr for /linuxrc */
89114- sys_open("/dev/console", O_RDWR, 0);
89115+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
89116 sys_dup(0);
89117 sys_dup(0);
89118 /* move initrd over / and chdir/chroot in initrd root */
89119- sys_chdir("/root");
89120- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89121- sys_chroot(".");
89122+ sys_chdir((const char __force_user *)"/root");
89123+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89124+ sys_chroot((const char __force_user *)".");
89125 sys_setsid();
89126 return 0;
89127 }
89128@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
89129 create_dev("/dev/root.old", Root_RAM0);
89130 /* mount initrd on rootfs' /root */
89131 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
89132- sys_mkdir("/old", 0700);
89133- sys_chdir("/old");
89134+ sys_mkdir((const char __force_user *)"/old", 0700);
89135+ sys_chdir((const char __force_user *)"/old");
89136
89137 /* try loading default modules from initrd */
89138 load_default_modules();
89139@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
89140 current->flags &= ~PF_FREEZER_SKIP;
89141
89142 /* move initrd to rootfs' /old */
89143- sys_mount("..", ".", NULL, MS_MOVE, NULL);
89144+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
89145 /* switch root and cwd back to / of rootfs */
89146- sys_chroot("..");
89147+ sys_chroot((const char __force_user *)"..");
89148
89149 if (new_decode_dev(real_root_dev) == Root_RAM0) {
89150- sys_chdir("/old");
89151+ sys_chdir((const char __force_user *)"/old");
89152 return;
89153 }
89154
89155- sys_chdir("/");
89156+ sys_chdir((const char __force_user *)"/");
89157 ROOT_DEV = new_decode_dev(real_root_dev);
89158 mount_root();
89159
89160 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
89161- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
89162+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
89163 if (!error)
89164 printk("okay\n");
89165 else {
89166- int fd = sys_open("/dev/root.old", O_RDWR, 0);
89167+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
89168 if (error == -ENOENT)
89169 printk("/initrd does not exist. Ignored.\n");
89170 else
89171 printk("failed\n");
89172 printk(KERN_NOTICE "Unmounting old root\n");
89173- sys_umount("/old", MNT_DETACH);
89174+ sys_umount((char __force_user *)"/old", MNT_DETACH);
89175 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
89176 if (fd < 0) {
89177 error = fd;
89178@@ -127,11 +127,11 @@ int __init initrd_load(void)
89179 * mounted in the normal path.
89180 */
89181 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
89182- sys_unlink("/initrd.image");
89183+ sys_unlink((const char __force_user *)"/initrd.image");
89184 handle_initrd();
89185 return 1;
89186 }
89187 }
89188- sys_unlink("/initrd.image");
89189+ sys_unlink((const char __force_user *)"/initrd.image");
89190 return 0;
89191 }
89192diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
89193index 8cb6db5..d729f50 100644
89194--- a/init/do_mounts_md.c
89195+++ b/init/do_mounts_md.c
89196@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
89197 partitioned ? "_d" : "", minor,
89198 md_setup_args[ent].device_names);
89199
89200- fd = sys_open(name, 0, 0);
89201+ fd = sys_open((char __force_user *)name, 0, 0);
89202 if (fd < 0) {
89203 printk(KERN_ERR "md: open failed - cannot start "
89204 "array %s\n", name);
89205@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
89206 * array without it
89207 */
89208 sys_close(fd);
89209- fd = sys_open(name, 0, 0);
89210+ fd = sys_open((char __force_user *)name, 0, 0);
89211 sys_ioctl(fd, BLKRRPART, 0);
89212 }
89213 sys_close(fd);
89214@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
89215
89216 wait_for_device_probe();
89217
89218- fd = sys_open("/dev/md0", 0, 0);
89219+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
89220 if (fd >= 0) {
89221 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
89222 sys_close(fd);
89223diff --git a/init/init_task.c b/init/init_task.c
89224index ba0a7f36..2bcf1d5 100644
89225--- a/init/init_task.c
89226+++ b/init/init_task.c
89227@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
89228 * Initial thread structure. Alignment of this is handled by a special
89229 * linker map entry.
89230 */
89231+#ifdef CONFIG_X86
89232+union thread_union init_thread_union __init_task_data;
89233+#else
89234 union thread_union init_thread_union __init_task_data =
89235 { INIT_THREAD_INFO(init_task) };
89236+#endif
89237diff --git a/init/initramfs.c b/init/initramfs.c
89238index ad1bd77..dca2c1b 100644
89239--- a/init/initramfs.c
89240+++ b/init/initramfs.c
89241@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
89242
89243 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
89244 while (count) {
89245- ssize_t rv = sys_write(fd, p, count);
89246+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
89247
89248 if (rv < 0) {
89249 if (rv == -EINTR || rv == -EAGAIN)
89250@@ -107,7 +107,7 @@ static void __init free_hash(void)
89251 }
89252 }
89253
89254-static long __init do_utime(char *filename, time_t mtime)
89255+static long __init do_utime(char __force_user *filename, time_t mtime)
89256 {
89257 struct timespec t[2];
89258
89259@@ -142,7 +142,7 @@ static void __init dir_utime(void)
89260 struct dir_entry *de, *tmp;
89261 list_for_each_entry_safe(de, tmp, &dir_list, list) {
89262 list_del(&de->list);
89263- do_utime(de->name, de->mtime);
89264+ do_utime((char __force_user *)de->name, de->mtime);
89265 kfree(de->name);
89266 kfree(de);
89267 }
89268@@ -304,7 +304,7 @@ static int __init maybe_link(void)
89269 if (nlink >= 2) {
89270 char *old = find_link(major, minor, ino, mode, collected);
89271 if (old)
89272- return (sys_link(old, collected) < 0) ? -1 : 1;
89273+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
89274 }
89275 return 0;
89276 }
89277@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
89278 {
89279 struct stat st;
89280
89281- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
89282+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
89283 if (S_ISDIR(st.st_mode))
89284- sys_rmdir(path);
89285+ sys_rmdir((char __force_user *)path);
89286 else
89287- sys_unlink(path);
89288+ sys_unlink((char __force_user *)path);
89289 }
89290 }
89291
89292@@ -338,7 +338,7 @@ static int __init do_name(void)
89293 int openflags = O_WRONLY|O_CREAT;
89294 if (ml != 1)
89295 openflags |= O_TRUNC;
89296- wfd = sys_open(collected, openflags, mode);
89297+ wfd = sys_open((char __force_user *)collected, openflags, mode);
89298
89299 if (wfd >= 0) {
89300 sys_fchown(wfd, uid, gid);
89301@@ -350,17 +350,17 @@ static int __init do_name(void)
89302 }
89303 }
89304 } else if (S_ISDIR(mode)) {
89305- sys_mkdir(collected, mode);
89306- sys_chown(collected, uid, gid);
89307- sys_chmod(collected, mode);
89308+ sys_mkdir((char __force_user *)collected, mode);
89309+ sys_chown((char __force_user *)collected, uid, gid);
89310+ sys_chmod((char __force_user *)collected, mode);
89311 dir_add(collected, mtime);
89312 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
89313 S_ISFIFO(mode) || S_ISSOCK(mode)) {
89314 if (maybe_link() == 0) {
89315- sys_mknod(collected, mode, rdev);
89316- sys_chown(collected, uid, gid);
89317- sys_chmod(collected, mode);
89318- do_utime(collected, mtime);
89319+ sys_mknod((char __force_user *)collected, mode, rdev);
89320+ sys_chown((char __force_user *)collected, uid, gid);
89321+ sys_chmod((char __force_user *)collected, mode);
89322+ do_utime((char __force_user *)collected, mtime);
89323 }
89324 }
89325 return 0;
89326@@ -372,7 +372,7 @@ static int __init do_copy(void)
89327 if (xwrite(wfd, victim, body_len) != body_len)
89328 error("write error");
89329 sys_close(wfd);
89330- do_utime(vcollected, mtime);
89331+ do_utime((char __force_user *)vcollected, mtime);
89332 kfree(vcollected);
89333 eat(body_len);
89334 state = SkipIt;
89335@@ -390,9 +390,9 @@ static int __init do_symlink(void)
89336 {
89337 collected[N_ALIGN(name_len) + body_len] = '\0';
89338 clean_path(collected, 0);
89339- sys_symlink(collected + N_ALIGN(name_len), collected);
89340- sys_lchown(collected, uid, gid);
89341- do_utime(collected, mtime);
89342+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
89343+ sys_lchown((char __force_user *)collected, uid, gid);
89344+ do_utime((char __force_user *)collected, mtime);
89345 state = SkipIt;
89346 next_state = Reset;
89347 return 0;
89348diff --git a/init/main.c b/init/main.c
89349index 6f0f1c5f..a542824 100644
89350--- a/init/main.c
89351+++ b/init/main.c
89352@@ -96,6 +96,8 @@ extern void radix_tree_init(void);
89353 static inline void mark_rodata_ro(void) { }
89354 #endif
89355
89356+extern void grsecurity_init(void);
89357+
89358 /*
89359 * Debug helper: via this flag we know that we are in 'early bootup code'
89360 * where only the boot processor is running with IRQ disabled. This means
89361@@ -157,6 +159,85 @@ static int __init set_reset_devices(char *str)
89362
89363 __setup("reset_devices", set_reset_devices);
89364
89365+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
89366+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
89367+static int __init setup_grsec_proc_gid(char *str)
89368+{
89369+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
89370+ return 1;
89371+}
89372+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
89373+#endif
89374+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
89375+int grsec_enable_sysfs_restrict = 1;
89376+static int __init setup_grsec_sysfs_restrict(char *str)
89377+{
89378+ if (!simple_strtol(str, NULL, 0))
89379+ grsec_enable_sysfs_restrict = 0;
89380+ return 1;
89381+}
89382+__setup("grsec_sysfs_restrict", setup_grsec_sysfs_restrict);
89383+#endif
89384+
89385+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
89386+unsigned long pax_user_shadow_base __read_only;
89387+EXPORT_SYMBOL(pax_user_shadow_base);
89388+extern char pax_enter_kernel_user[];
89389+extern char pax_exit_kernel_user[];
89390+#endif
89391+
89392+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
89393+static int __init setup_pax_nouderef(char *str)
89394+{
89395+#ifdef CONFIG_X86_32
89396+ unsigned int cpu;
89397+ struct desc_struct *gdt;
89398+
89399+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
89400+ gdt = get_cpu_gdt_table(cpu);
89401+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
89402+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
89403+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
89404+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
89405+ }
89406+ loadsegment(ds, __KERNEL_DS);
89407+ loadsegment(es, __KERNEL_DS);
89408+ loadsegment(ss, __KERNEL_DS);
89409+#else
89410+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
89411+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
89412+ clone_pgd_mask = ~(pgdval_t)0UL;
89413+ pax_user_shadow_base = 0UL;
89414+ setup_clear_cpu_cap(X86_FEATURE_PCID);
89415+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
89416+#endif
89417+
89418+ return 0;
89419+}
89420+early_param("pax_nouderef", setup_pax_nouderef);
89421+
89422+#ifdef CONFIG_X86_64
89423+static int __init setup_pax_weakuderef(char *str)
89424+{
89425+ if (clone_pgd_mask != ~(pgdval_t)0UL)
89426+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
89427+ return 1;
89428+}
89429+__setup("pax_weakuderef", setup_pax_weakuderef);
89430+#endif
89431+#endif
89432+
89433+#ifdef CONFIG_PAX_SOFTMODE
89434+int pax_softmode;
89435+
89436+static int __init setup_pax_softmode(char *str)
89437+{
89438+ get_option(&str, &pax_softmode);
89439+ return 1;
89440+}
89441+__setup("pax_softmode=", setup_pax_softmode);
89442+#endif
89443+
89444 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
89445 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
89446 static const char *panic_later, *panic_param;
89447@@ -722,7 +803,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
89448 struct blacklist_entry *entry;
89449 char *fn_name;
89450
89451- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
89452+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
89453 if (!fn_name)
89454 return false;
89455
89456@@ -774,7 +855,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
89457 {
89458 int count = preempt_count();
89459 int ret;
89460- char msgbuf[64];
89461+ const char *msg1 = "", *msg2 = "";
89462
89463 if (initcall_blacklisted(fn))
89464 return -EPERM;
89465@@ -784,18 +865,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
89466 else
89467 ret = fn();
89468
89469- msgbuf[0] = 0;
89470-
89471 if (preempt_count() != count) {
89472- sprintf(msgbuf, "preemption imbalance ");
89473+ msg1 = " preemption imbalance";
89474 preempt_count_set(count);
89475 }
89476 if (irqs_disabled()) {
89477- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
89478+ msg2 = " disabled interrupts";
89479 local_irq_enable();
89480 }
89481- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
89482+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
89483
89484+ add_latent_entropy();
89485 return ret;
89486 }
89487
89488@@ -901,8 +981,8 @@ static int run_init_process(const char *init_filename)
89489 {
89490 argv_init[0] = init_filename;
89491 return do_execve(getname_kernel(init_filename),
89492- (const char __user *const __user *)argv_init,
89493- (const char __user *const __user *)envp_init);
89494+ (const char __user *const __force_user *)argv_init,
89495+ (const char __user *const __force_user *)envp_init);
89496 }
89497
89498 static int try_to_run_init_process(const char *init_filename)
89499@@ -919,6 +999,10 @@ static int try_to_run_init_process(const char *init_filename)
89500 return ret;
89501 }
89502
89503+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
89504+extern int gr_init_ran;
89505+#endif
89506+
89507 static noinline void __init kernel_init_freeable(void);
89508
89509 static int __ref kernel_init(void *unused)
89510@@ -943,6 +1027,11 @@ static int __ref kernel_init(void *unused)
89511 ramdisk_execute_command, ret);
89512 }
89513
89514+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
89515+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
89516+ gr_init_ran = 1;
89517+#endif
89518+
89519 /*
89520 * We try each of these until one succeeds.
89521 *
89522@@ -998,7 +1087,7 @@ static noinline void __init kernel_init_freeable(void)
89523 do_basic_setup();
89524
89525 /* Open the /dev/console on the rootfs, this should never fail */
89526- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
89527+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
89528 pr_err("Warning: unable to open an initial console.\n");
89529
89530 (void) sys_dup(0);
89531@@ -1011,11 +1100,13 @@ static noinline void __init kernel_init_freeable(void)
89532 if (!ramdisk_execute_command)
89533 ramdisk_execute_command = "/init";
89534
89535- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
89536+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
89537 ramdisk_execute_command = NULL;
89538 prepare_namespace();
89539 }
89540
89541+ grsecurity_init();
89542+
89543 /*
89544 * Ok, we have completed the initial bootup, and
89545 * we're essentially up and running. Get rid of the
89546diff --git a/ipc/compat.c b/ipc/compat.c
89547index 9b3c85f..5266b0f 100644
89548--- a/ipc/compat.c
89549+++ b/ipc/compat.c
89550@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
89551 COMPAT_SHMLBA);
89552 if (err < 0)
89553 return err;
89554- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
89555+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
89556 }
89557 case SHMDT:
89558 return sys_shmdt(compat_ptr(ptr));
89559@@ -747,7 +747,7 @@ COMPAT_SYSCALL_DEFINE3(shmctl, int, first, int, second, void __user *, uptr)
89560 }
89561
89562 COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
89563- unsigned, nsops,
89564+ compat_long_t, nsops,
89565 const struct compat_timespec __user *, timeout)
89566 {
89567 struct timespec __user *ts64;
89568diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
89569index 8ad93c2..efd80f8 100644
89570--- a/ipc/ipc_sysctl.c
89571+++ b/ipc/ipc_sysctl.c
89572@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
89573 static int proc_ipc_dointvec(struct ctl_table *table, int write,
89574 void __user *buffer, size_t *lenp, loff_t *ppos)
89575 {
89576- struct ctl_table ipc_table;
89577+ ctl_table_no_const ipc_table;
89578
89579 memcpy(&ipc_table, table, sizeof(ipc_table));
89580 ipc_table.data = get_ipc(table);
89581@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
89582 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
89583 void __user *buffer, size_t *lenp, loff_t *ppos)
89584 {
89585- struct ctl_table ipc_table;
89586+ ctl_table_no_const ipc_table;
89587
89588 memcpy(&ipc_table, table, sizeof(ipc_table));
89589 ipc_table.data = get_ipc(table);
89590@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
89591 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
89592 void __user *buffer, size_t *lenp, loff_t *ppos)
89593 {
89594- struct ctl_table ipc_table;
89595+ ctl_table_no_const ipc_table;
89596 memcpy(&ipc_table, table, sizeof(ipc_table));
89597 ipc_table.data = get_ipc(table);
89598
89599@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
89600 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
89601 void __user *buffer, size_t *lenp, loff_t *ppos)
89602 {
89603- struct ctl_table ipc_table;
89604+ ctl_table_no_const ipc_table;
89605 int dummy = 0;
89606
89607 memcpy(&ipc_table, table, sizeof(ipc_table));
89608diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
89609index 68d4e95..1477ded 100644
89610--- a/ipc/mq_sysctl.c
89611+++ b/ipc/mq_sysctl.c
89612@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
89613 static int proc_mq_dointvec(struct ctl_table *table, int write,
89614 void __user *buffer, size_t *lenp, loff_t *ppos)
89615 {
89616- struct ctl_table mq_table;
89617+ ctl_table_no_const mq_table;
89618 memcpy(&mq_table, table, sizeof(mq_table));
89619 mq_table.data = get_mq(table);
89620
89621@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
89622 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
89623 void __user *buffer, size_t *lenp, loff_t *ppos)
89624 {
89625- struct ctl_table mq_table;
89626+ ctl_table_no_const mq_table;
89627 memcpy(&mq_table, table, sizeof(mq_table));
89628 mq_table.data = get_mq(table);
89629
89630diff --git a/ipc/mqueue.c b/ipc/mqueue.c
89631index 7635a1c..7432cb6 100644
89632--- a/ipc/mqueue.c
89633+++ b/ipc/mqueue.c
89634@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
89635 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
89636 info->attr.mq_msgsize);
89637
89638+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
89639 spin_lock(&mq_lock);
89640 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
89641 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
89642diff --git a/ipc/sem.c b/ipc/sem.c
89643index 9284211..bca5b1b 100644
89644--- a/ipc/sem.c
89645+++ b/ipc/sem.c
89646@@ -1780,7 +1780,7 @@ static int get_queue_result(struct sem_queue *q)
89647 }
89648
89649 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
89650- unsigned, nsops, const struct timespec __user *, timeout)
89651+ long, nsops, const struct timespec __user *, timeout)
89652 {
89653 int error = -EINVAL;
89654 struct sem_array *sma;
89655@@ -2015,7 +2015,7 @@ out_free:
89656 }
89657
89658 SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
89659- unsigned, nsops)
89660+ long, nsops)
89661 {
89662 return sys_semtimedop(semid, tsops, nsops, NULL);
89663 }
89664diff --git a/ipc/shm.c b/ipc/shm.c
89665index 19633b4..d454904 100644
89666--- a/ipc/shm.c
89667+++ b/ipc/shm.c
89668@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
89669 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
89670 #endif
89671
89672+#ifdef CONFIG_GRKERNSEC
89673+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
89674+ const u64 shm_createtime, const kuid_t cuid,
89675+ const int shmid);
89676+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
89677+ const u64 shm_createtime);
89678+#endif
89679+
89680 void shm_init_ns(struct ipc_namespace *ns)
89681 {
89682 ns->shm_ctlmax = SHMMAX;
89683@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
89684 shp->shm_lprid = 0;
89685 shp->shm_atim = shp->shm_dtim = 0;
89686 shp->shm_ctim = get_seconds();
89687+#ifdef CONFIG_GRKERNSEC
89688+ shp->shm_createtime = ktime_get_ns();
89689+#endif
89690 shp->shm_segsz = size;
89691 shp->shm_nattch = 0;
89692 shp->shm_file = file;
89693@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
89694 f_mode = FMODE_READ | FMODE_WRITE;
89695 }
89696 if (shmflg & SHM_EXEC) {
89697+
89698+#ifdef CONFIG_PAX_MPROTECT
89699+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
89700+ goto out;
89701+#endif
89702+
89703 prot |= PROT_EXEC;
89704 acc_mode |= S_IXUGO;
89705 }
89706@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
89707 if (err)
89708 goto out_unlock;
89709
89710+#ifdef CONFIG_GRKERNSEC
89711+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
89712+ shp->shm_perm.cuid, shmid) ||
89713+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
89714+ err = -EACCES;
89715+ goto out_unlock;
89716+ }
89717+#endif
89718+
89719 ipc_lock_object(&shp->shm_perm);
89720
89721 /* check if shm_destroy() is tearing down shp */
89722@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
89723 path = shp->shm_file->f_path;
89724 path_get(&path);
89725 shp->shm_nattch++;
89726+#ifdef CONFIG_GRKERNSEC
89727+ shp->shm_lapid = current->pid;
89728+#endif
89729 size = i_size_read(path.dentry->d_inode);
89730 ipc_unlock_object(&shp->shm_perm);
89731 rcu_read_unlock();
89732diff --git a/ipc/util.c b/ipc/util.c
89733index 106bed0..f851429 100644
89734--- a/ipc/util.c
89735+++ b/ipc/util.c
89736@@ -71,6 +71,8 @@ struct ipc_proc_iface {
89737 int (*show)(struct seq_file *, void *);
89738 };
89739
89740+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
89741+
89742 /**
89743 * ipc_init - initialise ipc subsystem
89744 *
89745@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
89746 granted_mode >>= 6;
89747 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
89748 granted_mode >>= 3;
89749+
89750+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
89751+ return -1;
89752+
89753 /* is there some bit set in requested_mode but not in granted_mode? */
89754 if ((requested_mode & ~granted_mode & 0007) &&
89755 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
89756diff --git a/kernel/audit.c b/kernel/audit.c
89757index 72ab759..757deba 100644
89758--- a/kernel/audit.c
89759+++ b/kernel/audit.c
89760@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
89761 3) suppressed due to audit_rate_limit
89762 4) suppressed due to audit_backlog_limit
89763 */
89764-static atomic_t audit_lost = ATOMIC_INIT(0);
89765+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
89766
89767 /* The netlink socket. */
89768 static struct sock *audit_sock;
89769@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
89770 unsigned long now;
89771 int print;
89772
89773- atomic_inc(&audit_lost);
89774+ atomic_inc_unchecked(&audit_lost);
89775
89776 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
89777
89778@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
89779 if (print) {
89780 if (printk_ratelimit())
89781 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
89782- atomic_read(&audit_lost),
89783+ atomic_read_unchecked(&audit_lost),
89784 audit_rate_limit,
89785 audit_backlog_limit);
89786 audit_panic(message);
89787@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
89788 s.pid = audit_pid;
89789 s.rate_limit = audit_rate_limit;
89790 s.backlog_limit = audit_backlog_limit;
89791- s.lost = atomic_read(&audit_lost);
89792+ s.lost = atomic_read_unchecked(&audit_lost);
89793 s.backlog = skb_queue_len(&audit_skb_queue);
89794 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
89795 s.backlog_wait_time = audit_backlog_wait_time;
89796diff --git a/kernel/auditsc.c b/kernel/auditsc.c
89797index dc4ae70..2a2bddc 100644
89798--- a/kernel/auditsc.c
89799+++ b/kernel/auditsc.c
89800@@ -1955,7 +1955,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
89801 }
89802
89803 /* global counter which is incremented every time something logs in */
89804-static atomic_t session_id = ATOMIC_INIT(0);
89805+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
89806
89807 static int audit_set_loginuid_perm(kuid_t loginuid)
89808 {
89809@@ -2022,7 +2022,7 @@ int audit_set_loginuid(kuid_t loginuid)
89810
89811 /* are we setting or clearing? */
89812 if (uid_valid(loginuid))
89813- sessionid = (unsigned int)atomic_inc_return(&session_id);
89814+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
89815
89816 task->sessionid = sessionid;
89817 task->loginuid = loginuid;
89818diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
89819index 0c5796e..a9414e2 100644
89820--- a/kernel/bpf/core.c
89821+++ b/kernel/bpf/core.c
89822@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
89823 * random section of illegal instructions.
89824 */
89825 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
89826- hdr = module_alloc(size);
89827+ hdr = module_alloc_exec(size);
89828 if (hdr == NULL)
89829 return NULL;
89830
89831 /* Fill space with illegal/arch-dep instructions. */
89832 bpf_fill_ill_insns(hdr, size);
89833
89834+ pax_open_kernel();
89835 hdr->pages = size / PAGE_SIZE;
89836+ pax_close_kernel();
89837+
89838 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
89839 PAGE_SIZE - sizeof(*hdr));
89840 start = (prandom_u32() % hole) & ~(alignment - 1);
89841@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
89842
89843 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
89844 {
89845- module_memfree(hdr);
89846+ module_memfree_exec(hdr);
89847 }
89848 #endif /* CONFIG_BPF_JIT */
89849
89850diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
89851index 536edc2..d28c85d 100644
89852--- a/kernel/bpf/syscall.c
89853+++ b/kernel/bpf/syscall.c
89854@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
89855 int err;
89856
89857 /* the syscall is limited to root temporarily. This restriction will be
89858- * lifted when security audit is clean. Note that eBPF+tracing must have
89859- * this restriction, since it may pass kernel data to user space
89860+ * lifted by upstream when a half-assed security audit is clean. Note
89861+ * that eBPF+tracing must have this restriction, since it may pass
89862+ * kernel data to user space
89863 */
89864 if (!capable(CAP_SYS_ADMIN))
89865 return -EPERM;
89866+#ifdef CONFIG_GRKERNSEC
89867+ return -EPERM;
89868+#endif
89869
89870 if (!access_ok(VERIFY_READ, uattr, 1))
89871 return -EFAULT;
89872diff --git a/kernel/capability.c b/kernel/capability.c
89873index 989f5bf..d317ca0 100644
89874--- a/kernel/capability.c
89875+++ b/kernel/capability.c
89876@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
89877 * before modification is attempted and the application
89878 * fails.
89879 */
89880+ if (tocopy > ARRAY_SIZE(kdata))
89881+ return -EFAULT;
89882+
89883 if (copy_to_user(dataptr, kdata, tocopy
89884 * sizeof(struct __user_cap_data_struct))) {
89885 return -EFAULT;
89886@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
89887 int ret;
89888
89889 rcu_read_lock();
89890- ret = security_capable(__task_cred(t), ns, cap);
89891+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
89892+ gr_task_is_capable(t, __task_cred(t), cap);
89893 rcu_read_unlock();
89894
89895- return (ret == 0);
89896+ return ret;
89897 }
89898
89899 /**
89900@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
89901 int ret;
89902
89903 rcu_read_lock();
89904- ret = security_capable_noaudit(__task_cred(t), ns, cap);
89905+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
89906 rcu_read_unlock();
89907
89908- return (ret == 0);
89909+ return ret;
89910 }
89911
89912 /**
89913@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
89914 BUG();
89915 }
89916
89917- if (security_capable(current_cred(), ns, cap) == 0) {
89918+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
89919 current->flags |= PF_SUPERPRIV;
89920 return true;
89921 }
89922@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
89923 }
89924 EXPORT_SYMBOL(ns_capable);
89925
89926+bool ns_capable_nolog(struct user_namespace *ns, int cap)
89927+{
89928+ if (unlikely(!cap_valid(cap))) {
89929+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
89930+ BUG();
89931+ }
89932+
89933+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
89934+ current->flags |= PF_SUPERPRIV;
89935+ return true;
89936+ }
89937+ return false;
89938+}
89939+EXPORT_SYMBOL(ns_capable_nolog);
89940+
89941 /**
89942 * file_ns_capable - Determine if the file's opener had a capability in effect
89943 * @file: The file we want to check
89944@@ -427,6 +446,12 @@ bool capable(int cap)
89945 }
89946 EXPORT_SYMBOL(capable);
89947
89948+bool capable_nolog(int cap)
89949+{
89950+ return ns_capable_nolog(&init_user_ns, cap);
89951+}
89952+EXPORT_SYMBOL(capable_nolog);
89953+
89954 /**
89955 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
89956 * @inode: The inode in question
89957@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
89958 kgid_has_mapping(ns, inode->i_gid);
89959 }
89960 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
89961+
89962+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
89963+{
89964+ struct user_namespace *ns = current_user_ns();
89965+
89966+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
89967+ kgid_has_mapping(ns, inode->i_gid);
89968+}
89969+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
89970diff --git a/kernel/cgroup.c b/kernel/cgroup.c
89971index 29a7b2c..a64e30a 100644
89972--- a/kernel/cgroup.c
89973+++ b/kernel/cgroup.c
89974@@ -5347,6 +5347,9 @@ static void cgroup_release_agent(struct work_struct *work)
89975 if (!pathbuf || !agentbuf)
89976 goto out;
89977
89978+ if (agentbuf[0] == '\0')
89979+ goto out;
89980+
89981 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
89982 if (!path)
89983 goto out;
89984@@ -5532,7 +5535,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
89985 struct task_struct *task;
89986 int count = 0;
89987
89988- seq_printf(seq, "css_set %p\n", cset);
89989+ seq_printf(seq, "css_set %pK\n", cset);
89990
89991 list_for_each_entry(task, &cset->tasks, cg_list) {
89992 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
89993diff --git a/kernel/compat.c b/kernel/compat.c
89994index 24f0061..ea80802 100644
89995--- a/kernel/compat.c
89996+++ b/kernel/compat.c
89997@@ -13,6 +13,7 @@
89998
89999 #include <linux/linkage.h>
90000 #include <linux/compat.h>
90001+#include <linux/module.h>
90002 #include <linux/errno.h>
90003 #include <linux/time.h>
90004 #include <linux/signal.h>
90005@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
90006 mm_segment_t oldfs;
90007 long ret;
90008
90009- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
90010+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
90011 oldfs = get_fs();
90012 set_fs(KERNEL_DS);
90013 ret = hrtimer_nanosleep_restart(restart);
90014@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
90015 oldfs = get_fs();
90016 set_fs(KERNEL_DS);
90017 ret = hrtimer_nanosleep(&tu,
90018- rmtp ? (struct timespec __user *)&rmt : NULL,
90019+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
90020 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
90021 set_fs(oldfs);
90022
90023@@ -378,7 +379,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
90024 mm_segment_t old_fs = get_fs();
90025
90026 set_fs(KERNEL_DS);
90027- ret = sys_sigpending((old_sigset_t __user *) &s);
90028+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
90029 set_fs(old_fs);
90030 if (ret == 0)
90031 ret = put_user(s, set);
90032@@ -468,7 +469,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
90033 mm_segment_t old_fs = get_fs();
90034
90035 set_fs(KERNEL_DS);
90036- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
90037+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
90038 set_fs(old_fs);
90039
90040 if (!ret) {
90041@@ -550,8 +551,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
90042 set_fs (KERNEL_DS);
90043 ret = sys_wait4(pid,
90044 (stat_addr ?
90045- (unsigned int __user *) &status : NULL),
90046- options, (struct rusage __user *) &r);
90047+ (unsigned int __force_user *) &status : NULL),
90048+ options, (struct rusage __force_user *) &r);
90049 set_fs (old_fs);
90050
90051 if (ret > 0) {
90052@@ -577,8 +578,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
90053 memset(&info, 0, sizeof(info));
90054
90055 set_fs(KERNEL_DS);
90056- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
90057- uru ? (struct rusage __user *)&ru : NULL);
90058+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
90059+ uru ? (struct rusage __force_user *)&ru : NULL);
90060 set_fs(old_fs);
90061
90062 if ((ret < 0) || (info.si_signo == 0))
90063@@ -712,8 +713,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
90064 oldfs = get_fs();
90065 set_fs(KERNEL_DS);
90066 err = sys_timer_settime(timer_id, flags,
90067- (struct itimerspec __user *) &newts,
90068- (struct itimerspec __user *) &oldts);
90069+ (struct itimerspec __force_user *) &newts,
90070+ (struct itimerspec __force_user *) &oldts);
90071 set_fs(oldfs);
90072 if (!err && old && put_compat_itimerspec(old, &oldts))
90073 return -EFAULT;
90074@@ -730,7 +731,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
90075 oldfs = get_fs();
90076 set_fs(KERNEL_DS);
90077 err = sys_timer_gettime(timer_id,
90078- (struct itimerspec __user *) &ts);
90079+ (struct itimerspec __force_user *) &ts);
90080 set_fs(oldfs);
90081 if (!err && put_compat_itimerspec(setting, &ts))
90082 return -EFAULT;
90083@@ -749,7 +750,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
90084 oldfs = get_fs();
90085 set_fs(KERNEL_DS);
90086 err = sys_clock_settime(which_clock,
90087- (struct timespec __user *) &ts);
90088+ (struct timespec __force_user *) &ts);
90089 set_fs(oldfs);
90090 return err;
90091 }
90092@@ -764,7 +765,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
90093 oldfs = get_fs();
90094 set_fs(KERNEL_DS);
90095 err = sys_clock_gettime(which_clock,
90096- (struct timespec __user *) &ts);
90097+ (struct timespec __force_user *) &ts);
90098 set_fs(oldfs);
90099 if (!err && compat_put_timespec(&ts, tp))
90100 return -EFAULT;
90101@@ -784,7 +785,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
90102
90103 oldfs = get_fs();
90104 set_fs(KERNEL_DS);
90105- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
90106+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
90107 set_fs(oldfs);
90108
90109 err = compat_put_timex(utp, &txc);
90110@@ -804,7 +805,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
90111 oldfs = get_fs();
90112 set_fs(KERNEL_DS);
90113 err = sys_clock_getres(which_clock,
90114- (struct timespec __user *) &ts);
90115+ (struct timespec __force_user *) &ts);
90116 set_fs(oldfs);
90117 if (!err && tp && compat_put_timespec(&ts, tp))
90118 return -EFAULT;
90119@@ -818,7 +819,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
90120 struct timespec tu;
90121 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
90122
90123- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
90124+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
90125 oldfs = get_fs();
90126 set_fs(KERNEL_DS);
90127 err = clock_nanosleep_restart(restart);
90128@@ -850,8 +851,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
90129 oldfs = get_fs();
90130 set_fs(KERNEL_DS);
90131 err = sys_clock_nanosleep(which_clock, flags,
90132- (struct timespec __user *) &in,
90133- (struct timespec __user *) &out);
90134+ (struct timespec __force_user *) &in,
90135+ (struct timespec __force_user *) &out);
90136 set_fs(oldfs);
90137
90138 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
90139@@ -1145,7 +1146,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
90140 mm_segment_t old_fs = get_fs();
90141
90142 set_fs(KERNEL_DS);
90143- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
90144+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
90145 set_fs(old_fs);
90146 if (compat_put_timespec(&t, interval))
90147 return -EFAULT;
90148diff --git a/kernel/configs.c b/kernel/configs.c
90149index c18b1f1..b9a0132 100644
90150--- a/kernel/configs.c
90151+++ b/kernel/configs.c
90152@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
90153 struct proc_dir_entry *entry;
90154
90155 /* create the current config file */
90156+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
90157+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
90158+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
90159+ &ikconfig_file_ops);
90160+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90161+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
90162+ &ikconfig_file_ops);
90163+#endif
90164+#else
90165 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
90166 &ikconfig_file_ops);
90167+#endif
90168+
90169 if (!entry)
90170 return -ENOMEM;
90171
90172diff --git a/kernel/cred.c b/kernel/cred.c
90173index e0573a4..26c0fd3 100644
90174--- a/kernel/cred.c
90175+++ b/kernel/cred.c
90176@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
90177 validate_creds(cred);
90178 alter_cred_subscribers(cred, -1);
90179 put_cred(cred);
90180+
90181+#ifdef CONFIG_GRKERNSEC_SETXID
90182+ cred = (struct cred *) tsk->delayed_cred;
90183+ if (cred != NULL) {
90184+ tsk->delayed_cred = NULL;
90185+ validate_creds(cred);
90186+ alter_cred_subscribers(cred, -1);
90187+ put_cred(cred);
90188+ }
90189+#endif
90190 }
90191
90192 /**
90193@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
90194 * Always returns 0 thus allowing this function to be tail-called at the end
90195 * of, say, sys_setgid().
90196 */
90197-int commit_creds(struct cred *new)
90198+static int __commit_creds(struct cred *new)
90199 {
90200 struct task_struct *task = current;
90201 const struct cred *old = task->real_cred;
90202@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
90203
90204 get_cred(new); /* we will require a ref for the subj creds too */
90205
90206+ gr_set_role_label(task, new->uid, new->gid);
90207+
90208 /* dumpability changes */
90209 if (!uid_eq(old->euid, new->euid) ||
90210 !gid_eq(old->egid, new->egid) ||
90211@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
90212 put_cred(old);
90213 return 0;
90214 }
90215+#ifdef CONFIG_GRKERNSEC_SETXID
90216+extern int set_user(struct cred *new);
90217+
90218+void gr_delayed_cred_worker(void)
90219+{
90220+ const struct cred *new = current->delayed_cred;
90221+ struct cred *ncred;
90222+
90223+ current->delayed_cred = NULL;
90224+
90225+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
90226+ // from doing get_cred on it when queueing this
90227+ put_cred(new);
90228+ return;
90229+ } else if (new == NULL)
90230+ return;
90231+
90232+ ncred = prepare_creds();
90233+ if (!ncred)
90234+ goto die;
90235+ // uids
90236+ ncred->uid = new->uid;
90237+ ncred->euid = new->euid;
90238+ ncred->suid = new->suid;
90239+ ncred->fsuid = new->fsuid;
90240+ // gids
90241+ ncred->gid = new->gid;
90242+ ncred->egid = new->egid;
90243+ ncred->sgid = new->sgid;
90244+ ncred->fsgid = new->fsgid;
90245+ // groups
90246+ set_groups(ncred, new->group_info);
90247+ // caps
90248+ ncred->securebits = new->securebits;
90249+ ncred->cap_inheritable = new->cap_inheritable;
90250+ ncred->cap_permitted = new->cap_permitted;
90251+ ncred->cap_effective = new->cap_effective;
90252+ ncred->cap_bset = new->cap_bset;
90253+
90254+ if (set_user(ncred)) {
90255+ abort_creds(ncred);
90256+ goto die;
90257+ }
90258+
90259+ // from doing get_cred on it when queueing this
90260+ put_cred(new);
90261+
90262+ __commit_creds(ncred);
90263+ return;
90264+die:
90265+ // from doing get_cred on it when queueing this
90266+ put_cred(new);
90267+ do_group_exit(SIGKILL);
90268+}
90269+#endif
90270+
90271+int commit_creds(struct cred *new)
90272+{
90273+#ifdef CONFIG_GRKERNSEC_SETXID
90274+ int ret;
90275+ int schedule_it = 0;
90276+ struct task_struct *t;
90277+ unsigned oldsecurebits = current_cred()->securebits;
90278+
90279+ /* we won't get called with tasklist_lock held for writing
90280+ and interrupts disabled as the cred struct in that case is
90281+ init_cred
90282+ */
90283+ if (grsec_enable_setxid && !current_is_single_threaded() &&
90284+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
90285+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
90286+ schedule_it = 1;
90287+ }
90288+ ret = __commit_creds(new);
90289+ if (schedule_it) {
90290+ rcu_read_lock();
90291+ read_lock(&tasklist_lock);
90292+ for (t = next_thread(current); t != current;
90293+ t = next_thread(t)) {
90294+ /* we'll check if the thread has uid 0 in
90295+ * the delayed worker routine
90296+ */
90297+ if (task_securebits(t) == oldsecurebits &&
90298+ t->delayed_cred == NULL) {
90299+ t->delayed_cred = get_cred(new);
90300+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
90301+ set_tsk_need_resched(t);
90302+ }
90303+ }
90304+ read_unlock(&tasklist_lock);
90305+ rcu_read_unlock();
90306+ }
90307+
90308+ return ret;
90309+#else
90310+ return __commit_creds(new);
90311+#endif
90312+}
90313+
90314 EXPORT_SYMBOL(commit_creds);
90315
90316 /**
90317diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
90318index 0874e2e..5b32cc9 100644
90319--- a/kernel/debug/debug_core.c
90320+++ b/kernel/debug/debug_core.c
90321@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
90322 */
90323 static atomic_t masters_in_kgdb;
90324 static atomic_t slaves_in_kgdb;
90325-static atomic_t kgdb_break_tasklet_var;
90326+static atomic_unchecked_t kgdb_break_tasklet_var;
90327 atomic_t kgdb_setting_breakpoint;
90328
90329 struct task_struct *kgdb_usethread;
90330@@ -137,7 +137,7 @@ int kgdb_single_step;
90331 static pid_t kgdb_sstep_pid;
90332
90333 /* to keep track of the CPU which is doing the single stepping*/
90334-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
90335+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
90336
90337 /*
90338 * If you are debugging a problem where roundup (the collection of
90339@@ -552,7 +552,7 @@ return_normal:
90340 * kernel will only try for the value of sstep_tries before
90341 * giving up and continuing on.
90342 */
90343- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
90344+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
90345 (kgdb_info[cpu].task &&
90346 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
90347 atomic_set(&kgdb_active, -1);
90348@@ -654,8 +654,8 @@ cpu_master_loop:
90349 }
90350
90351 kgdb_restore:
90352- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
90353- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
90354+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
90355+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
90356 if (kgdb_info[sstep_cpu].task)
90357 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
90358 else
90359@@ -949,18 +949,18 @@ static void kgdb_unregister_callbacks(void)
90360 static void kgdb_tasklet_bpt(unsigned long ing)
90361 {
90362 kgdb_breakpoint();
90363- atomic_set(&kgdb_break_tasklet_var, 0);
90364+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
90365 }
90366
90367 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
90368
90369 void kgdb_schedule_breakpoint(void)
90370 {
90371- if (atomic_read(&kgdb_break_tasklet_var) ||
90372+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
90373 atomic_read(&kgdb_active) != -1 ||
90374 atomic_read(&kgdb_setting_breakpoint))
90375 return;
90376- atomic_inc(&kgdb_break_tasklet_var);
90377+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
90378 tasklet_schedule(&kgdb_tasklet_breakpoint);
90379 }
90380 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
90381diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
90382index 4121345..861e178 100644
90383--- a/kernel/debug/kdb/kdb_main.c
90384+++ b/kernel/debug/kdb/kdb_main.c
90385@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
90386 continue;
90387
90388 kdb_printf("%-20s%8u 0x%p ", mod->name,
90389- mod->core_size, (void *)mod);
90390+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
90391 #ifdef CONFIG_MODULE_UNLOAD
90392 kdb_printf("%4d ", module_refcount(mod));
90393 #endif
90394@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
90395 kdb_printf(" (Loading)");
90396 else
90397 kdb_printf(" (Live)");
90398- kdb_printf(" 0x%p", mod->module_core);
90399+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
90400
90401 #ifdef CONFIG_MODULE_UNLOAD
90402 {
90403diff --git a/kernel/events/core.c b/kernel/events/core.c
90404index 2fabc06..79cceec 100644
90405--- a/kernel/events/core.c
90406+++ b/kernel/events/core.c
90407@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
90408 * 0 - disallow raw tracepoint access for unpriv
90409 * 1 - disallow cpu events for unpriv
90410 * 2 - disallow kernel profiling for unpriv
90411+ * 3 - disallow all unpriv perf event use
90412 */
90413-int sysctl_perf_event_paranoid __read_mostly = 1;
90414+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
90415+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
90416+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
90417+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
90418+#else
90419+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
90420+#endif
90421
90422 /* Minimum for 512 kiB + 1 user control page */
90423 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
90424@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
90425
90426 tmp *= sysctl_perf_cpu_time_max_percent;
90427 do_div(tmp, 100);
90428- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
90429+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
90430 }
90431
90432 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
90433@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
90434 }
90435 }
90436
90437-static atomic64_t perf_event_id;
90438+static atomic64_unchecked_t perf_event_id;
90439
90440 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
90441 enum event_type_t event_type);
90442@@ -3220,7 +3227,7 @@ static void __perf_event_read(void *info)
90443
90444 static inline u64 perf_event_count(struct perf_event *event)
90445 {
90446- return local64_read(&event->count) + atomic64_read(&event->child_count);
90447+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
90448 }
90449
90450 static u64 perf_event_read(struct perf_event *event)
90451@@ -3656,9 +3663,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
90452 mutex_lock(&event->child_mutex);
90453 total += perf_event_read(event);
90454 *enabled += event->total_time_enabled +
90455- atomic64_read(&event->child_total_time_enabled);
90456+ atomic64_read_unchecked(&event->child_total_time_enabled);
90457 *running += event->total_time_running +
90458- atomic64_read(&event->child_total_time_running);
90459+ atomic64_read_unchecked(&event->child_total_time_running);
90460
90461 list_for_each_entry(child, &event->child_list, child_list) {
90462 total += perf_event_read(child);
90463@@ -4147,10 +4154,10 @@ void perf_event_update_userpage(struct perf_event *event)
90464 userpg->offset -= local64_read(&event->hw.prev_count);
90465
90466 userpg->time_enabled = enabled +
90467- atomic64_read(&event->child_total_time_enabled);
90468+ atomic64_read_unchecked(&event->child_total_time_enabled);
90469
90470 userpg->time_running = running +
90471- atomic64_read(&event->child_total_time_running);
90472+ atomic64_read_unchecked(&event->child_total_time_running);
90473
90474 arch_perf_update_userpage(event, userpg, now);
90475
90476@@ -4740,7 +4747,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
90477
90478 /* Data. */
90479 sp = perf_user_stack_pointer(regs);
90480- rem = __output_copy_user(handle, (void *) sp, dump_size);
90481+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
90482 dyn_size = dump_size - rem;
90483
90484 perf_output_skip(handle, rem);
90485@@ -4831,11 +4838,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
90486 values[n++] = perf_event_count(event);
90487 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
90488 values[n++] = enabled +
90489- atomic64_read(&event->child_total_time_enabled);
90490+ atomic64_read_unchecked(&event->child_total_time_enabled);
90491 }
90492 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
90493 values[n++] = running +
90494- atomic64_read(&event->child_total_time_running);
90495+ atomic64_read_unchecked(&event->child_total_time_running);
90496 }
90497 if (read_format & PERF_FORMAT_ID)
90498 values[n++] = primary_event_id(event);
90499@@ -7180,7 +7187,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
90500 event->parent = parent_event;
90501
90502 event->ns = get_pid_ns(task_active_pid_ns(current));
90503- event->id = atomic64_inc_return(&perf_event_id);
90504+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
90505
90506 event->state = PERF_EVENT_STATE_INACTIVE;
90507
90508@@ -7470,6 +7477,11 @@ SYSCALL_DEFINE5(perf_event_open,
90509 if (flags & ~PERF_FLAG_ALL)
90510 return -EINVAL;
90511
90512+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
90513+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
90514+ return -EACCES;
90515+#endif
90516+
90517 err = perf_copy_attr(attr_uptr, &attr);
90518 if (err)
90519 return err;
90520@@ -7892,10 +7904,10 @@ static void sync_child_event(struct perf_event *child_event,
90521 /*
90522 * Add back the child's count to the parent's count:
90523 */
90524- atomic64_add(child_val, &parent_event->child_count);
90525- atomic64_add(child_event->total_time_enabled,
90526+ atomic64_add_unchecked(child_val, &parent_event->child_count);
90527+ atomic64_add_unchecked(child_event->total_time_enabled,
90528 &parent_event->child_total_time_enabled);
90529- atomic64_add(child_event->total_time_running,
90530+ atomic64_add_unchecked(child_event->total_time_running,
90531 &parent_event->child_total_time_running);
90532
90533 /*
90534diff --git a/kernel/events/internal.h b/kernel/events/internal.h
90535index 569b2187..19940d9 100644
90536--- a/kernel/events/internal.h
90537+++ b/kernel/events/internal.h
90538@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
90539 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
90540 }
90541
90542-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
90543+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
90544 static inline unsigned long \
90545 func_name(struct perf_output_handle *handle, \
90546- const void *buf, unsigned long len) \
90547+ const void user *buf, unsigned long len) \
90548 { \
90549 unsigned long size, written; \
90550 \
90551@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
90552 return 0;
90553 }
90554
90555-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
90556+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
90557
90558 static inline unsigned long
90559 memcpy_skip(void *dst, const void *src, unsigned long n)
90560@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
90561 return 0;
90562 }
90563
90564-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
90565+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
90566
90567 #ifndef arch_perf_out_copy_user
90568 #define arch_perf_out_copy_user arch_perf_out_copy_user
90569@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
90570 }
90571 #endif
90572
90573-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
90574+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
90575
90576 /* Callchain handling */
90577 extern struct perf_callchain_entry *
90578diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
90579index cb346f2..e4dc317 100644
90580--- a/kernel/events/uprobes.c
90581+++ b/kernel/events/uprobes.c
90582@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
90583 {
90584 struct page *page;
90585 uprobe_opcode_t opcode;
90586- int result;
90587+ long result;
90588
90589 pagefault_disable();
90590 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
90591diff --git a/kernel/exit.c b/kernel/exit.c
90592index feff10b..f623dd5 100644
90593--- a/kernel/exit.c
90594+++ b/kernel/exit.c
90595@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
90596 struct task_struct *leader;
90597 int zap_leader;
90598 repeat:
90599+#ifdef CONFIG_NET
90600+ gr_del_task_from_ip_table(p);
90601+#endif
90602+
90603 /* don't need to get the RCU readlock here - the process is dead and
90604 * can't be modifying its own credentials. But shut RCU-lockdep up */
90605 rcu_read_lock();
90606@@ -656,6 +660,8 @@ void do_exit(long code)
90607 int group_dead;
90608 TASKS_RCU(int tasks_rcu_i);
90609
90610+ set_fs(USER_DS);
90611+
90612 profile_task_exit(tsk);
90613
90614 WARN_ON(blk_needs_flush_plug(tsk));
90615@@ -672,7 +678,6 @@ void do_exit(long code)
90616 * mm_release()->clear_child_tid() from writing to a user-controlled
90617 * kernel address.
90618 */
90619- set_fs(USER_DS);
90620
90621 ptrace_event(PTRACE_EVENT_EXIT, code);
90622
90623@@ -730,6 +735,9 @@ void do_exit(long code)
90624 tsk->exit_code = code;
90625 taskstats_exit(tsk, group_dead);
90626
90627+ gr_acl_handle_psacct(tsk, code);
90628+ gr_acl_handle_exit();
90629+
90630 exit_mm(tsk);
90631
90632 if (group_dead)
90633@@ -849,7 +857,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
90634 * Take down every thread in the group. This is called by fatal signals
90635 * as well as by sys_exit_group (below).
90636 */
90637-void
90638+__noreturn void
90639 do_group_exit(int exit_code)
90640 {
90641 struct signal_struct *sig = current->signal;
90642diff --git a/kernel/fork.c b/kernel/fork.c
90643index cf65139..704476e 100644
90644--- a/kernel/fork.c
90645+++ b/kernel/fork.c
90646@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
90647 void thread_info_cache_init(void)
90648 {
90649 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
90650- THREAD_SIZE, 0, NULL);
90651+ THREAD_SIZE, SLAB_USERCOPY, NULL);
90652 BUG_ON(thread_info_cache == NULL);
90653 }
90654 # endif
90655 #endif
90656
90657+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90658+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
90659+ int node, void **lowmem_stack)
90660+{
90661+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
90662+ void *ret = NULL;
90663+ unsigned int i;
90664+
90665+ *lowmem_stack = alloc_thread_info_node(tsk, node);
90666+ if (*lowmem_stack == NULL)
90667+ goto out;
90668+
90669+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
90670+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
90671+
90672+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
90673+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
90674+ if (ret == NULL) {
90675+ free_thread_info(*lowmem_stack);
90676+ *lowmem_stack = NULL;
90677+ }
90678+
90679+out:
90680+ return ret;
90681+}
90682+
90683+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
90684+{
90685+ unmap_process_stacks(tsk);
90686+}
90687+#else
90688+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
90689+ int node, void **lowmem_stack)
90690+{
90691+ return alloc_thread_info_node(tsk, node);
90692+}
90693+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
90694+{
90695+ free_thread_info(ti);
90696+}
90697+#endif
90698+
90699 /* SLAB cache for signal_struct structures (tsk->signal) */
90700 static struct kmem_cache *signal_cachep;
90701
90702@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
90703 /* SLAB cache for mm_struct structures (tsk->mm) */
90704 static struct kmem_cache *mm_cachep;
90705
90706-static void account_kernel_stack(struct thread_info *ti, int account)
90707+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
90708 {
90709+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90710+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
90711+#else
90712 struct zone *zone = page_zone(virt_to_page(ti));
90713+#endif
90714
90715 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
90716 }
90717
90718 void free_task(struct task_struct *tsk)
90719 {
90720- account_kernel_stack(tsk->stack, -1);
90721+ account_kernel_stack(tsk, tsk->stack, -1);
90722 arch_release_thread_info(tsk->stack);
90723- free_thread_info(tsk->stack);
90724+ gr_free_thread_info(tsk, tsk->stack);
90725 rt_mutex_debug_task_free(tsk);
90726 ftrace_graph_exit_task(tsk);
90727 put_seccomp_filter(tsk);
90728@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90729 {
90730 struct task_struct *tsk;
90731 struct thread_info *ti;
90732+ void *lowmem_stack;
90733 int node = tsk_fork_get_node(orig);
90734 int err;
90735
90736@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90737 if (!tsk)
90738 return NULL;
90739
90740- ti = alloc_thread_info_node(tsk, node);
90741+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
90742 if (!ti)
90743 goto free_tsk;
90744
90745@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90746 goto free_ti;
90747
90748 tsk->stack = ti;
90749+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90750+ tsk->lowmem_stack = lowmem_stack;
90751+#endif
90752 #ifdef CONFIG_SECCOMP
90753 /*
90754 * We must handle setting up seccomp filters once we're under
90755@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90756 set_task_stack_end_magic(tsk);
90757
90758 #ifdef CONFIG_CC_STACKPROTECTOR
90759- tsk->stack_canary = get_random_int();
90760+ tsk->stack_canary = pax_get_random_long();
90761 #endif
90762
90763 /*
90764@@ -352,24 +402,89 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90765 tsk->splice_pipe = NULL;
90766 tsk->task_frag.page = NULL;
90767
90768- account_kernel_stack(ti, 1);
90769+ account_kernel_stack(tsk, ti, 1);
90770
90771 return tsk;
90772
90773 free_ti:
90774- free_thread_info(ti);
90775+ gr_free_thread_info(tsk, ti);
90776 free_tsk:
90777 free_task_struct(tsk);
90778 return NULL;
90779 }
90780
90781 #ifdef CONFIG_MMU
90782-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90783+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
90784+{
90785+ struct vm_area_struct *tmp;
90786+ unsigned long charge;
90787+ struct file *file;
90788+ int retval;
90789+
90790+ charge = 0;
90791+ if (mpnt->vm_flags & VM_ACCOUNT) {
90792+ unsigned long len = vma_pages(mpnt);
90793+
90794+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
90795+ goto fail_nomem;
90796+ charge = len;
90797+ }
90798+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90799+ if (!tmp)
90800+ goto fail_nomem;
90801+ *tmp = *mpnt;
90802+ tmp->vm_mm = mm;
90803+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
90804+ retval = vma_dup_policy(mpnt, tmp);
90805+ if (retval)
90806+ goto fail_nomem_policy;
90807+ if (anon_vma_fork(tmp, mpnt))
90808+ goto fail_nomem_anon_vma_fork;
90809+ tmp->vm_flags &= ~VM_LOCKED;
90810+ tmp->vm_next = tmp->vm_prev = NULL;
90811+ tmp->vm_mirror = NULL;
90812+ file = tmp->vm_file;
90813+ if (file) {
90814+ struct inode *inode = file_inode(file);
90815+ struct address_space *mapping = file->f_mapping;
90816+
90817+ get_file(file);
90818+ if (tmp->vm_flags & VM_DENYWRITE)
90819+ atomic_dec(&inode->i_writecount);
90820+ i_mmap_lock_write(mapping);
90821+ if (tmp->vm_flags & VM_SHARED)
90822+ atomic_inc(&mapping->i_mmap_writable);
90823+ flush_dcache_mmap_lock(mapping);
90824+ /* insert tmp into the share list, just after mpnt */
90825+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
90826+ flush_dcache_mmap_unlock(mapping);
90827+ i_mmap_unlock_write(mapping);
90828+ }
90829+
90830+ /*
90831+ * Clear hugetlb-related page reserves for children. This only
90832+ * affects MAP_PRIVATE mappings. Faults generated by the child
90833+ * are not guaranteed to succeed, even if read-only
90834+ */
90835+ if (is_vm_hugetlb_page(tmp))
90836+ reset_vma_resv_huge_pages(tmp);
90837+
90838+ return tmp;
90839+
90840+fail_nomem_anon_vma_fork:
90841+ mpol_put(vma_policy(tmp));
90842+fail_nomem_policy:
90843+ kmem_cache_free(vm_area_cachep, tmp);
90844+fail_nomem:
90845+ vm_unacct_memory(charge);
90846+ return NULL;
90847+}
90848+
90849+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90850 {
90851 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
90852 struct rb_node **rb_link, *rb_parent;
90853 int retval;
90854- unsigned long charge;
90855
90856 uprobe_start_dup_mmap();
90857 down_write(&oldmm->mmap_sem);
90858@@ -397,51 +512,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90859
90860 prev = NULL;
90861 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
90862- struct file *file;
90863-
90864 if (mpnt->vm_flags & VM_DONTCOPY) {
90865 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
90866 -vma_pages(mpnt));
90867 continue;
90868 }
90869- charge = 0;
90870- if (mpnt->vm_flags & VM_ACCOUNT) {
90871- unsigned long len = vma_pages(mpnt);
90872-
90873- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
90874- goto fail_nomem;
90875- charge = len;
90876- }
90877- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90878- if (!tmp)
90879- goto fail_nomem;
90880- *tmp = *mpnt;
90881- INIT_LIST_HEAD(&tmp->anon_vma_chain);
90882- retval = vma_dup_policy(mpnt, tmp);
90883- if (retval)
90884- goto fail_nomem_policy;
90885- tmp->vm_mm = mm;
90886- if (anon_vma_fork(tmp, mpnt))
90887- goto fail_nomem_anon_vma_fork;
90888- tmp->vm_flags &= ~VM_LOCKED;
90889- tmp->vm_next = tmp->vm_prev = NULL;
90890- file = tmp->vm_file;
90891- if (file) {
90892- struct inode *inode = file_inode(file);
90893- struct address_space *mapping = file->f_mapping;
90894-
90895- get_file(file);
90896- if (tmp->vm_flags & VM_DENYWRITE)
90897- atomic_dec(&inode->i_writecount);
90898- i_mmap_lock_write(mapping);
90899- if (tmp->vm_flags & VM_SHARED)
90900- atomic_inc(&mapping->i_mmap_writable);
90901- flush_dcache_mmap_lock(mapping);
90902- /* insert tmp into the share list, just after mpnt */
90903- vma_interval_tree_insert_after(tmp, mpnt,
90904- &mapping->i_mmap);
90905- flush_dcache_mmap_unlock(mapping);
90906- i_mmap_unlock_write(mapping);
90907+ tmp = dup_vma(mm, oldmm, mpnt);
90908+ if (!tmp) {
90909+ retval = -ENOMEM;
90910+ goto out;
90911 }
90912
90913 /*
90914@@ -473,6 +552,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90915 if (retval)
90916 goto out;
90917 }
90918+
90919+#ifdef CONFIG_PAX_SEGMEXEC
90920+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
90921+ struct vm_area_struct *mpnt_m;
90922+
90923+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
90924+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
90925+
90926+ if (!mpnt->vm_mirror)
90927+ continue;
90928+
90929+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
90930+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
90931+ mpnt->vm_mirror = mpnt_m;
90932+ } else {
90933+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
90934+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
90935+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
90936+ mpnt->vm_mirror->vm_mirror = mpnt;
90937+ }
90938+ }
90939+ BUG_ON(mpnt_m);
90940+ }
90941+#endif
90942+
90943 /* a new mm has just been created */
90944 arch_dup_mmap(oldmm, mm);
90945 retval = 0;
90946@@ -482,14 +586,6 @@ out:
90947 up_write(&oldmm->mmap_sem);
90948 uprobe_end_dup_mmap();
90949 return retval;
90950-fail_nomem_anon_vma_fork:
90951- mpol_put(vma_policy(tmp));
90952-fail_nomem_policy:
90953- kmem_cache_free(vm_area_cachep, tmp);
90954-fail_nomem:
90955- retval = -ENOMEM;
90956- vm_unacct_memory(charge);
90957- goto out;
90958 }
90959
90960 static inline int mm_alloc_pgd(struct mm_struct *mm)
90961@@ -739,8 +835,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
90962 return ERR_PTR(err);
90963
90964 mm = get_task_mm(task);
90965- if (mm && mm != current->mm &&
90966- !ptrace_may_access(task, mode)) {
90967+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
90968+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
90969 mmput(mm);
90970 mm = ERR_PTR(-EACCES);
90971 }
90972@@ -943,13 +1039,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
90973 spin_unlock(&fs->lock);
90974 return -EAGAIN;
90975 }
90976- fs->users++;
90977+ atomic_inc(&fs->users);
90978 spin_unlock(&fs->lock);
90979 return 0;
90980 }
90981 tsk->fs = copy_fs_struct(fs);
90982 if (!tsk->fs)
90983 return -ENOMEM;
90984+ /* Carry through gr_chroot_dentry and is_chrooted instead
90985+ of recomputing it here. Already copied when the task struct
90986+ is duplicated. This allows pivot_root to not be treated as
90987+ a chroot
90988+ */
90989+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
90990+
90991 return 0;
90992 }
90993
90994@@ -1187,7 +1290,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
90995 * parts of the process environment (as per the clone
90996 * flags). The actual kick-off is left to the caller.
90997 */
90998-static struct task_struct *copy_process(unsigned long clone_flags,
90999+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
91000 unsigned long stack_start,
91001 unsigned long stack_size,
91002 int __user *child_tidptr,
91003@@ -1258,6 +1361,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91004 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
91005 #endif
91006 retval = -EAGAIN;
91007+
91008+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
91009+
91010 if (atomic_read(&p->real_cred->user->processes) >=
91011 task_rlimit(p, RLIMIT_NPROC)) {
91012 if (p->real_cred->user != INIT_USER &&
91013@@ -1507,6 +1613,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91014 goto bad_fork_free_pid;
91015 }
91016
91017+ /* synchronizes with gr_set_acls()
91018+ we need to call this past the point of no return for fork()
91019+ */
91020+ gr_copy_label(p);
91021+
91022 if (likely(p->pid)) {
91023 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
91024
91025@@ -1597,6 +1708,8 @@ bad_fork_cleanup_count:
91026 bad_fork_free:
91027 free_task(p);
91028 fork_out:
91029+ gr_log_forkfail(retval);
91030+
91031 return ERR_PTR(retval);
91032 }
91033
91034@@ -1658,6 +1771,7 @@ long do_fork(unsigned long clone_flags,
91035
91036 p = copy_process(clone_flags, stack_start, stack_size,
91037 child_tidptr, NULL, trace);
91038+ add_latent_entropy();
91039 /*
91040 * Do this prior waking up the new thread - the thread pointer
91041 * might get invalid after that point, if the thread exits quickly.
91042@@ -1674,6 +1788,8 @@ long do_fork(unsigned long clone_flags,
91043 if (clone_flags & CLONE_PARENT_SETTID)
91044 put_user(nr, parent_tidptr);
91045
91046+ gr_handle_brute_check();
91047+
91048 if (clone_flags & CLONE_VFORK) {
91049 p->vfork_done = &vfork;
91050 init_completion(&vfork);
91051@@ -1792,7 +1908,7 @@ void __init proc_caches_init(void)
91052 mm_cachep = kmem_cache_create("mm_struct",
91053 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
91054 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
91055- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
91056+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
91057 mmap_init();
91058 nsproxy_cache_init();
91059 }
91060@@ -1832,7 +1948,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
91061 return 0;
91062
91063 /* don't need lock here; in the worst case we'll do useless copy */
91064- if (fs->users == 1)
91065+ if (atomic_read(&fs->users) == 1)
91066 return 0;
91067
91068 *new_fsp = copy_fs_struct(fs);
91069@@ -1944,7 +2060,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
91070 fs = current->fs;
91071 spin_lock(&fs->lock);
91072 current->fs = new_fs;
91073- if (--fs->users)
91074+ gr_set_chroot_entries(current, &current->fs->root);
91075+ if (atomic_dec_return(&fs->users))
91076 new_fs = NULL;
91077 else
91078 new_fs = fs;
91079diff --git a/kernel/futex.c b/kernel/futex.c
91080index 2a5e383..878bac6 100644
91081--- a/kernel/futex.c
91082+++ b/kernel/futex.c
91083@@ -201,7 +201,7 @@ struct futex_pi_state {
91084 atomic_t refcount;
91085
91086 union futex_key key;
91087-};
91088+} __randomize_layout;
91089
91090 /**
91091 * struct futex_q - The hashed futex queue entry, one per waiting task
91092@@ -235,7 +235,7 @@ struct futex_q {
91093 struct rt_mutex_waiter *rt_waiter;
91094 union futex_key *requeue_pi_key;
91095 u32 bitset;
91096-};
91097+} __randomize_layout;
91098
91099 static const struct futex_q futex_q_init = {
91100 /* list gets initialized in queue_me()*/
91101@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
91102 struct page *page, *page_head;
91103 int err, ro = 0;
91104
91105+#ifdef CONFIG_PAX_SEGMEXEC
91106+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
91107+ return -EFAULT;
91108+#endif
91109+
91110 /*
91111 * The futex address must be "naturally" aligned.
91112 */
91113@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
91114
91115 static int get_futex_value_locked(u32 *dest, u32 __user *from)
91116 {
91117- int ret;
91118+ unsigned long ret;
91119
91120 pagefault_disable();
91121 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
91122@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
91123 {
91124 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
91125 u32 curval;
91126+ mm_segment_t oldfs;
91127
91128 /*
91129 * This will fail and we want it. Some arch implementations do
91130@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
91131 * implementation, the non-functional ones will return
91132 * -ENOSYS.
91133 */
91134+ oldfs = get_fs();
91135+ set_fs(USER_DS);
91136 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
91137 futex_cmpxchg_enabled = 1;
91138+ set_fs(oldfs);
91139 #endif
91140 }
91141
91142diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
91143index 55c8c93..9ba7ad6 100644
91144--- a/kernel/futex_compat.c
91145+++ b/kernel/futex_compat.c
91146@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
91147 return 0;
91148 }
91149
91150-static void __user *futex_uaddr(struct robust_list __user *entry,
91151+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
91152 compat_long_t futex_offset)
91153 {
91154 compat_uptr_t base = ptr_to_compat(entry);
91155diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
91156index b358a80..fc25240 100644
91157--- a/kernel/gcov/base.c
91158+++ b/kernel/gcov/base.c
91159@@ -114,11 +114,6 @@ void gcov_enable_events(void)
91160 }
91161
91162 #ifdef CONFIG_MODULES
91163-static inline int within(void *addr, void *start, unsigned long size)
91164-{
91165- return ((addr >= start) && (addr < start + size));
91166-}
91167-
91168 /* Update list and generate events when modules are unloaded. */
91169 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91170 void *data)
91171@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91172
91173 /* Remove entries located in module from linked list. */
91174 while ((info = gcov_info_next(info))) {
91175- if (within(info, mod->module_core, mod->core_size)) {
91176+ if (within_module_core_rw((unsigned long)info, mod)) {
91177 gcov_info_unlink(prev, info);
91178 if (gcov_events_enabled)
91179 gcov_event(GCOV_REMOVE, info);
91180diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
91181index 886d09e..c7ff4e5 100644
91182--- a/kernel/irq/manage.c
91183+++ b/kernel/irq/manage.c
91184@@ -874,7 +874,7 @@ static int irq_thread(void *data)
91185
91186 action_ret = handler_fn(desc, action);
91187 if (action_ret == IRQ_HANDLED)
91188- atomic_inc(&desc->threads_handled);
91189+ atomic_inc_unchecked(&desc->threads_handled);
91190
91191 wake_threads_waitq(desc);
91192 }
91193diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
91194index e2514b0..de3dfe0 100644
91195--- a/kernel/irq/spurious.c
91196+++ b/kernel/irq/spurious.c
91197@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
91198 * count. We just care about the count being
91199 * different than the one we saw before.
91200 */
91201- handled = atomic_read(&desc->threads_handled);
91202+ handled = atomic_read_unchecked(&desc->threads_handled);
91203 handled |= SPURIOUS_DEFERRED;
91204 if (handled != desc->threads_handled_last) {
91205 action_ret = IRQ_HANDLED;
91206diff --git a/kernel/jump_label.c b/kernel/jump_label.c
91207index 9019f15..9a3c42e 100644
91208--- a/kernel/jump_label.c
91209+++ b/kernel/jump_label.c
91210@@ -14,6 +14,7 @@
91211 #include <linux/err.h>
91212 #include <linux/static_key.h>
91213 #include <linux/jump_label_ratelimit.h>
91214+#include <linux/mm.h>
91215
91216 #ifdef HAVE_JUMP_LABEL
91217
91218@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
91219
91220 size = (((unsigned long)stop - (unsigned long)start)
91221 / sizeof(struct jump_entry));
91222+ pax_open_kernel();
91223 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
91224+ pax_close_kernel();
91225 }
91226
91227 static void jump_label_update(struct static_key *key, int enable);
91228@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
91229 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
91230 struct jump_entry *iter;
91231
91232+ pax_open_kernel();
91233 for (iter = iter_start; iter < iter_stop; iter++) {
91234 if (within_module_init(iter->code, mod))
91235 iter->code = 0;
91236 }
91237+ pax_close_kernel();
91238 }
91239
91240 static int
91241diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
91242index 5c5987f..bc502b0 100644
91243--- a/kernel/kallsyms.c
91244+++ b/kernel/kallsyms.c
91245@@ -11,6 +11,9 @@
91246 * Changed the compression method from stem compression to "table lookup"
91247 * compression (see scripts/kallsyms.c for a more complete description)
91248 */
91249+#ifdef CONFIG_GRKERNSEC_HIDESYM
91250+#define __INCLUDED_BY_HIDESYM 1
91251+#endif
91252 #include <linux/kallsyms.h>
91253 #include <linux/module.h>
91254 #include <linux/init.h>
91255@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
91256
91257 static inline int is_kernel_inittext(unsigned long addr)
91258 {
91259+ if (system_state != SYSTEM_BOOTING)
91260+ return 0;
91261+
91262 if (addr >= (unsigned long)_sinittext
91263 && addr <= (unsigned long)_einittext)
91264 return 1;
91265 return 0;
91266 }
91267
91268+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91269+#ifdef CONFIG_MODULES
91270+static inline int is_module_text(unsigned long addr)
91271+{
91272+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
91273+ return 1;
91274+
91275+ addr = ktla_ktva(addr);
91276+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
91277+}
91278+#else
91279+static inline int is_module_text(unsigned long addr)
91280+{
91281+ return 0;
91282+}
91283+#endif
91284+#endif
91285+
91286 static inline int is_kernel_text(unsigned long addr)
91287 {
91288 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
91289@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
91290
91291 static inline int is_kernel(unsigned long addr)
91292 {
91293+
91294+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91295+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
91296+ return 1;
91297+
91298+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
91299+#else
91300 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
91301+#endif
91302+
91303 return 1;
91304 return in_gate_area_no_mm(addr);
91305 }
91306
91307 static int is_ksym_addr(unsigned long addr)
91308 {
91309+
91310+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91311+ if (is_module_text(addr))
91312+ return 0;
91313+#endif
91314+
91315 if (all_var)
91316 return is_kernel(addr);
91317
91318@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
91319
91320 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
91321 {
91322- iter->name[0] = '\0';
91323 iter->nameoff = get_symbol_offset(new_pos);
91324 iter->pos = new_pos;
91325 }
91326@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
91327 {
91328 struct kallsym_iter *iter = m->private;
91329
91330+#ifdef CONFIG_GRKERNSEC_HIDESYM
91331+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
91332+ return 0;
91333+#endif
91334+
91335 /* Some debugging symbols have no name. Ignore them. */
91336 if (!iter->name[0])
91337 return 0;
91338@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
91339 */
91340 type = iter->exported ? toupper(iter->type) :
91341 tolower(iter->type);
91342+
91343 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
91344 type, iter->name, iter->module_name);
91345 } else
91346diff --git a/kernel/kcmp.c b/kernel/kcmp.c
91347index 0aa69ea..a7fcafb 100644
91348--- a/kernel/kcmp.c
91349+++ b/kernel/kcmp.c
91350@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
91351 struct task_struct *task1, *task2;
91352 int ret;
91353
91354+#ifdef CONFIG_GRKERNSEC
91355+ return -ENOSYS;
91356+#endif
91357+
91358 rcu_read_lock();
91359
91360 /*
91361diff --git a/kernel/kexec.c b/kernel/kexec.c
91362index 38c25b1..12b3f69 100644
91363--- a/kernel/kexec.c
91364+++ b/kernel/kexec.c
91365@@ -1348,7 +1348,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
91366 compat_ulong_t, flags)
91367 {
91368 struct compat_kexec_segment in;
91369- struct kexec_segment out, __user *ksegments;
91370+ struct kexec_segment out;
91371+ struct kexec_segment __user *ksegments;
91372 unsigned long i, result;
91373
91374 /* Don't allow clients that don't understand the native
91375diff --git a/kernel/kmod.c b/kernel/kmod.c
91376index 2777f40..a689506 100644
91377--- a/kernel/kmod.c
91378+++ b/kernel/kmod.c
91379@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
91380 kfree(info->argv);
91381 }
91382
91383-static int call_modprobe(char *module_name, int wait)
91384+static int call_modprobe(char *module_name, char *module_param, int wait)
91385 {
91386 struct subprocess_info *info;
91387 static char *envp[] = {
91388@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
91389 NULL
91390 };
91391
91392- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
91393+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
91394 if (!argv)
91395 goto out;
91396
91397@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
91398 argv[1] = "-q";
91399 argv[2] = "--";
91400 argv[3] = module_name; /* check free_modprobe_argv() */
91401- argv[4] = NULL;
91402+ argv[4] = module_param;
91403+ argv[5] = NULL;
91404
91405 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
91406 NULL, free_modprobe_argv, NULL);
91407@@ -122,9 +123,8 @@ out:
91408 * If module auto-loading support is disabled then this function
91409 * becomes a no-operation.
91410 */
91411-int __request_module(bool wait, const char *fmt, ...)
91412+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
91413 {
91414- va_list args;
91415 char module_name[MODULE_NAME_LEN];
91416 unsigned int max_modprobes;
91417 int ret;
91418@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
91419 if (!modprobe_path[0])
91420 return 0;
91421
91422- va_start(args, fmt);
91423- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
91424- va_end(args);
91425+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
91426 if (ret >= MODULE_NAME_LEN)
91427 return -ENAMETOOLONG;
91428
91429@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
91430 if (ret)
91431 return ret;
91432
91433+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91434+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
91435+ /* hack to workaround consolekit/udisks stupidity */
91436+ read_lock(&tasklist_lock);
91437+ if (!strcmp(current->comm, "mount") &&
91438+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
91439+ read_unlock(&tasklist_lock);
91440+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
91441+ return -EPERM;
91442+ }
91443+ read_unlock(&tasklist_lock);
91444+ }
91445+#endif
91446+
91447 /* If modprobe needs a service that is in a module, we get a recursive
91448 * loop. Limit the number of running kmod threads to max_threads/2 or
91449 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
91450@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
91451
91452 trace_module_request(module_name, wait, _RET_IP_);
91453
91454- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
91455+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
91456
91457 atomic_dec(&kmod_concurrent);
91458 return ret;
91459 }
91460+
91461+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
91462+{
91463+ va_list args;
91464+ int ret;
91465+
91466+ va_start(args, fmt);
91467+ ret = ____request_module(wait, module_param, fmt, args);
91468+ va_end(args);
91469+
91470+ return ret;
91471+}
91472+
91473+int __request_module(bool wait, const char *fmt, ...)
91474+{
91475+ va_list args;
91476+ int ret;
91477+
91478+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91479+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
91480+ char module_param[MODULE_NAME_LEN];
91481+
91482+ memset(module_param, 0, sizeof(module_param));
91483+
91484+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
91485+
91486+ va_start(args, fmt);
91487+ ret = ____request_module(wait, module_param, fmt, args);
91488+ va_end(args);
91489+
91490+ return ret;
91491+ }
91492+#endif
91493+
91494+ va_start(args, fmt);
91495+ ret = ____request_module(wait, NULL, fmt, args);
91496+ va_end(args);
91497+
91498+ return ret;
91499+}
91500+
91501 EXPORT_SYMBOL(__request_module);
91502 #endif /* CONFIG_MODULES */
91503
91504 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
91505 {
91506+#ifdef CONFIG_GRKERNSEC
91507+ kfree(info->path);
91508+ info->path = info->origpath;
91509+#endif
91510 if (info->cleanup)
91511 (*info->cleanup)(info);
91512 kfree(info);
91513@@ -232,6 +289,21 @@ static int ____call_usermodehelper(void *data)
91514 */
91515 set_user_nice(current, 0);
91516
91517+#ifdef CONFIG_GRKERNSEC
91518+ /* this is race-free as far as userland is concerned as we copied
91519+ out the path to be used prior to this point and are now operating
91520+ on that copy
91521+ */
91522+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
91523+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
91524+ strncmp(sub_info->path, "/usr/libexec/", 13) && strncmp(sub_info->path, "/usr/bin/", 9) &&
91525+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
91526+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of permitted system paths\n", sub_info->path);
91527+ retval = -EPERM;
91528+ goto out;
91529+ }
91530+#endif
91531+
91532 retval = -ENOMEM;
91533 new = prepare_kernel_cred(current);
91534 if (!new)
91535@@ -254,8 +326,8 @@ static int ____call_usermodehelper(void *data)
91536 commit_creds(new);
91537
91538 retval = do_execve(getname_kernel(sub_info->path),
91539- (const char __user *const __user *)sub_info->argv,
91540- (const char __user *const __user *)sub_info->envp);
91541+ (const char __user *const __force_user *)sub_info->argv,
91542+ (const char __user *const __force_user *)sub_info->envp);
91543 out:
91544 sub_info->retval = retval;
91545 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
91546@@ -288,7 +360,7 @@ static int wait_for_helper(void *data)
91547 *
91548 * Thus the __user pointer cast is valid here.
91549 */
91550- sys_wait4(pid, (int __user *)&ret, 0, NULL);
91551+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
91552
91553 /*
91554 * If ret is 0, either ____call_usermodehelper failed and the
91555@@ -510,7 +582,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
91556 goto out;
91557
91558 INIT_WORK(&sub_info->work, __call_usermodehelper);
91559+#ifdef CONFIG_GRKERNSEC
91560+ sub_info->origpath = path;
91561+ sub_info->path = kstrdup(path, gfp_mask);
91562+#else
91563 sub_info->path = path;
91564+#endif
91565 sub_info->argv = argv;
91566 sub_info->envp = envp;
91567
91568@@ -612,7 +689,7 @@ EXPORT_SYMBOL(call_usermodehelper);
91569 static int proc_cap_handler(struct ctl_table *table, int write,
91570 void __user *buffer, size_t *lenp, loff_t *ppos)
91571 {
91572- struct ctl_table t;
91573+ ctl_table_no_const t;
91574 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
91575 kernel_cap_t new_cap;
91576 int err, i;
91577diff --git a/kernel/kprobes.c b/kernel/kprobes.c
91578index c90e417..e6c515d 100644
91579--- a/kernel/kprobes.c
91580+++ b/kernel/kprobes.c
91581@@ -31,6 +31,9 @@
91582 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
91583 * <prasanna@in.ibm.com> added function-return probes.
91584 */
91585+#ifdef CONFIG_GRKERNSEC_HIDESYM
91586+#define __INCLUDED_BY_HIDESYM 1
91587+#endif
91588 #include <linux/kprobes.h>
91589 #include <linux/hash.h>
91590 #include <linux/init.h>
91591@@ -122,12 +125,12 @@ enum kprobe_slot_state {
91592
91593 static void *alloc_insn_page(void)
91594 {
91595- return module_alloc(PAGE_SIZE);
91596+ return module_alloc_exec(PAGE_SIZE);
91597 }
91598
91599 static void free_insn_page(void *page)
91600 {
91601- module_memfree(page);
91602+ module_memfree_exec(page);
91603 }
91604
91605 struct kprobe_insn_cache kprobe_insn_slots = {
91606@@ -2198,11 +2201,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
91607 kprobe_type = "k";
91608
91609 if (sym)
91610- seq_printf(pi, "%p %s %s+0x%x %s ",
91611+ seq_printf(pi, "%pK %s %s+0x%x %s ",
91612 p->addr, kprobe_type, sym, offset,
91613 (modname ? modname : " "));
91614 else
91615- seq_printf(pi, "%p %s %p ",
91616+ seq_printf(pi, "%pK %s %pK ",
91617 p->addr, kprobe_type, p->addr);
91618
91619 if (!pp)
91620diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
91621index 6683cce..daf8999 100644
91622--- a/kernel/ksysfs.c
91623+++ b/kernel/ksysfs.c
91624@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
91625 {
91626 if (count+1 > UEVENT_HELPER_PATH_LEN)
91627 return -ENOENT;
91628+ if (!capable(CAP_SYS_ADMIN))
91629+ return -EPERM;
91630 memcpy(uevent_helper, buf, count);
91631 uevent_helper[count] = '\0';
91632 if (count && uevent_helper[count-1] == '\n')
91633@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
91634 return count;
91635 }
91636
91637-static struct bin_attribute notes_attr = {
91638+static bin_attribute_no_const notes_attr __read_only = {
91639 .attr = {
91640 .name = "notes",
91641 .mode = S_IRUGO,
91642diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
91643index ba77ab5..d6a3e20 100644
91644--- a/kernel/locking/lockdep.c
91645+++ b/kernel/locking/lockdep.c
91646@@ -599,6 +599,10 @@ static int static_obj(void *obj)
91647 end = (unsigned long) &_end,
91648 addr = (unsigned long) obj;
91649
91650+#ifdef CONFIG_PAX_KERNEXEC
91651+ start = ktla_ktva(start);
91652+#endif
91653+
91654 /*
91655 * static variable?
91656 */
91657@@ -743,6 +747,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
91658 if (!static_obj(lock->key)) {
91659 debug_locks_off();
91660 printk("INFO: trying to register non-static key.\n");
91661+ printk("lock:%pS key:%pS.\n", lock, lock->key);
91662 printk("the code is fine but needs lockdep annotation.\n");
91663 printk("turning off the locking correctness validator.\n");
91664 dump_stack();
91665@@ -3088,7 +3093,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
91666 if (!class)
91667 return 0;
91668 }
91669- atomic_inc((atomic_t *)&class->ops);
91670+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
91671 if (very_verbose(class)) {
91672 printk("\nacquire class [%p] %s", class->key, class->name);
91673 if (class->name_version > 1)
91674diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
91675index ef43ac4..2720dfa 100644
91676--- a/kernel/locking/lockdep_proc.c
91677+++ b/kernel/locking/lockdep_proc.c
91678@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
91679 return 0;
91680 }
91681
91682- seq_printf(m, "%p", class->key);
91683+ seq_printf(m, "%pK", class->key);
91684 #ifdef CONFIG_DEBUG_LOCKDEP
91685 seq_printf(m, " OPS:%8ld", class->ops);
91686 #endif
91687@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
91688
91689 list_for_each_entry(entry, &class->locks_after, entry) {
91690 if (entry->distance == 1) {
91691- seq_printf(m, " -> [%p] ", entry->class->key);
91692+ seq_printf(m, " -> [%pK] ", entry->class->key);
91693 print_name(m, entry->class);
91694 seq_puts(m, "\n");
91695 }
91696@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
91697 if (!class->key)
91698 continue;
91699
91700- seq_printf(m, "[%p] ", class->key);
91701+ seq_printf(m, "[%pK] ", class->key);
91702 print_name(m, class);
91703 seq_puts(m, "\n");
91704 }
91705@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
91706 if (!i)
91707 seq_line(m, '-', 40-namelen, namelen);
91708
91709- snprintf(ip, sizeof(ip), "[<%p>]",
91710+ snprintf(ip, sizeof(ip), "[<%pK>]",
91711 (void *)class->contention_point[i]);
91712 seq_printf(m, "%40s %14lu %29s %pS\n",
91713 name, stats->contention_point[i],
91714@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
91715 if (!i)
91716 seq_line(m, '-', 40-namelen, namelen);
91717
91718- snprintf(ip, sizeof(ip), "[<%p>]",
91719+ snprintf(ip, sizeof(ip), "[<%pK>]",
91720 (void *)class->contending_point[i]);
91721 seq_printf(m, "%40s %14lu %29s %pS\n",
91722 name, stats->contending_point[i],
91723diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
91724index d1fe2ba..180cd65e 100644
91725--- a/kernel/locking/mcs_spinlock.h
91726+++ b/kernel/locking/mcs_spinlock.h
91727@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
91728 */
91729 return;
91730 }
91731- ACCESS_ONCE(prev->next) = node;
91732+ ACCESS_ONCE_RW(prev->next) = node;
91733
91734 /* Wait until the lock holder passes the lock down. */
91735 arch_mcs_spin_lock_contended(&node->locked);
91736diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
91737index 3ef3736..9c951fa 100644
91738--- a/kernel/locking/mutex-debug.c
91739+++ b/kernel/locking/mutex-debug.c
91740@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
91741 }
91742
91743 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91744- struct thread_info *ti)
91745+ struct task_struct *task)
91746 {
91747 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
91748
91749 /* Mark the current thread as blocked on the lock: */
91750- ti->task->blocked_on = waiter;
91751+ task->blocked_on = waiter;
91752 }
91753
91754 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91755- struct thread_info *ti)
91756+ struct task_struct *task)
91757 {
91758 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
91759- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
91760- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
91761- ti->task->blocked_on = NULL;
91762+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
91763+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
91764+ task->blocked_on = NULL;
91765
91766 list_del_init(&waiter->list);
91767 waiter->task = NULL;
91768diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
91769index 0799fd3..d06ae3b 100644
91770--- a/kernel/locking/mutex-debug.h
91771+++ b/kernel/locking/mutex-debug.h
91772@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
91773 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
91774 extern void debug_mutex_add_waiter(struct mutex *lock,
91775 struct mutex_waiter *waiter,
91776- struct thread_info *ti);
91777+ struct task_struct *task);
91778 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91779- struct thread_info *ti);
91780+ struct task_struct *task);
91781 extern void debug_mutex_unlock(struct mutex *lock);
91782 extern void debug_mutex_init(struct mutex *lock, const char *name,
91783 struct lock_class_key *key);
91784diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
91785index 94674e5..de4966f 100644
91786--- a/kernel/locking/mutex.c
91787+++ b/kernel/locking/mutex.c
91788@@ -542,7 +542,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
91789 goto skip_wait;
91790
91791 debug_mutex_lock_common(lock, &waiter);
91792- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
91793+ debug_mutex_add_waiter(lock, &waiter, task);
91794
91795 /* add waiting tasks to the end of the waitqueue (FIFO): */
91796 list_add_tail(&waiter.list, &lock->wait_list);
91797@@ -589,7 +589,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
91798 }
91799 __set_task_state(task, TASK_RUNNING);
91800
91801- mutex_remove_waiter(lock, &waiter, current_thread_info());
91802+ mutex_remove_waiter(lock, &waiter, task);
91803 /* set it to 0 if there are no waiters left: */
91804 if (likely(list_empty(&lock->wait_list)))
91805 atomic_set(&lock->count, 0);
91806@@ -610,7 +610,7 @@ skip_wait:
91807 return 0;
91808
91809 err:
91810- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
91811+ mutex_remove_waiter(lock, &waiter, task);
91812 spin_unlock_mutex(&lock->wait_lock, flags);
91813 debug_mutex_free_waiter(&waiter);
91814 mutex_release(&lock->dep_map, 1, ip);
91815diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
91816index c112d00..1946ad9 100644
91817--- a/kernel/locking/osq_lock.c
91818+++ b/kernel/locking/osq_lock.c
91819@@ -98,7 +98,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
91820
91821 prev = decode_cpu(old);
91822 node->prev = prev;
91823- ACCESS_ONCE(prev->next) = node;
91824+ ACCESS_ONCE_RW(prev->next) = node;
91825
91826 /*
91827 * Normally @prev is untouchable after the above store; because at that
91828@@ -170,8 +170,8 @@ unqueue:
91829 * it will wait in Step-A.
91830 */
91831
91832- ACCESS_ONCE(next->prev) = prev;
91833- ACCESS_ONCE(prev->next) = next;
91834+ ACCESS_ONCE_RW(next->prev) = prev;
91835+ ACCESS_ONCE_RW(prev->next) = next;
91836
91837 return false;
91838 }
91839@@ -193,11 +193,11 @@ void osq_unlock(struct optimistic_spin_queue *lock)
91840 node = this_cpu_ptr(&osq_node);
91841 next = xchg(&node->next, NULL);
91842 if (next) {
91843- ACCESS_ONCE(next->locked) = 1;
91844+ ACCESS_ONCE_RW(next->locked) = 1;
91845 return;
91846 }
91847
91848 next = osq_wait_next(lock, node, NULL);
91849 if (next)
91850- ACCESS_ONCE(next->locked) = 1;
91851+ ACCESS_ONCE_RW(next->locked) = 1;
91852 }
91853diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
91854index 1d96dd0..994ff19 100644
91855--- a/kernel/locking/rtmutex-tester.c
91856+++ b/kernel/locking/rtmutex-tester.c
91857@@ -22,7 +22,7 @@
91858 #define MAX_RT_TEST_MUTEXES 8
91859
91860 static spinlock_t rttest_lock;
91861-static atomic_t rttest_event;
91862+static atomic_unchecked_t rttest_event;
91863
91864 struct test_thread_data {
91865 int opcode;
91866@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91867
91868 case RTTEST_LOCKCONT:
91869 td->mutexes[td->opdata] = 1;
91870- td->event = atomic_add_return(1, &rttest_event);
91871+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91872 return 0;
91873
91874 case RTTEST_RESET:
91875@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91876 return 0;
91877
91878 case RTTEST_RESETEVENT:
91879- atomic_set(&rttest_event, 0);
91880+ atomic_set_unchecked(&rttest_event, 0);
91881 return 0;
91882
91883 default:
91884@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91885 return ret;
91886
91887 td->mutexes[id] = 1;
91888- td->event = atomic_add_return(1, &rttest_event);
91889+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91890 rt_mutex_lock(&mutexes[id]);
91891- td->event = atomic_add_return(1, &rttest_event);
91892+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91893 td->mutexes[id] = 4;
91894 return 0;
91895
91896@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91897 return ret;
91898
91899 td->mutexes[id] = 1;
91900- td->event = atomic_add_return(1, &rttest_event);
91901+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91902 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
91903- td->event = atomic_add_return(1, &rttest_event);
91904+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91905 td->mutexes[id] = ret ? 0 : 4;
91906 return ret ? -EINTR : 0;
91907
91908@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91909 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
91910 return ret;
91911
91912- td->event = atomic_add_return(1, &rttest_event);
91913+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91914 rt_mutex_unlock(&mutexes[id]);
91915- td->event = atomic_add_return(1, &rttest_event);
91916+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91917 td->mutexes[id] = 0;
91918 return 0;
91919
91920@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91921 break;
91922
91923 td->mutexes[dat] = 2;
91924- td->event = atomic_add_return(1, &rttest_event);
91925+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91926 break;
91927
91928 default:
91929@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91930 return;
91931
91932 td->mutexes[dat] = 3;
91933- td->event = atomic_add_return(1, &rttest_event);
91934+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91935 break;
91936
91937 case RTTEST_LOCKNOWAIT:
91938@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91939 return;
91940
91941 td->mutexes[dat] = 1;
91942- td->event = atomic_add_return(1, &rttest_event);
91943+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91944 return;
91945
91946 default:
91947diff --git a/kernel/module.c b/kernel/module.c
91948index ec53f59..67d9655 100644
91949--- a/kernel/module.c
91950+++ b/kernel/module.c
91951@@ -59,6 +59,7 @@
91952 #include <linux/jump_label.h>
91953 #include <linux/pfn.h>
91954 #include <linux/bsearch.h>
91955+#include <linux/grsecurity.h>
91956 #include <uapi/linux/module.h>
91957 #include "module-internal.h"
91958
91959@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
91960
91961 /* Bounds of module allocation, for speeding __module_address.
91962 * Protected by module_mutex. */
91963-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
91964+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
91965+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
91966
91967 int register_module_notifier(struct notifier_block *nb)
91968 {
91969@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
91970 return true;
91971
91972 list_for_each_entry_rcu(mod, &modules, list) {
91973- struct symsearch arr[] = {
91974+ struct symsearch modarr[] = {
91975 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
91976 NOT_GPL_ONLY, false },
91977 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
91978@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
91979 if (mod->state == MODULE_STATE_UNFORMED)
91980 continue;
91981
91982- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
91983+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
91984 return true;
91985 }
91986 return false;
91987@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
91988 if (!pcpusec->sh_size)
91989 return 0;
91990
91991- if (align > PAGE_SIZE) {
91992+ if (align-1 >= PAGE_SIZE) {
91993 pr_warn("%s: per-cpu alignment %li > %li\n",
91994 mod->name, align, PAGE_SIZE);
91995 align = PAGE_SIZE;
91996@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
91997 static ssize_t show_coresize(struct module_attribute *mattr,
91998 struct module_kobject *mk, char *buffer)
91999 {
92000- return sprintf(buffer, "%u\n", mk->mod->core_size);
92001+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
92002 }
92003
92004 static struct module_attribute modinfo_coresize =
92005@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
92006 static ssize_t show_initsize(struct module_attribute *mattr,
92007 struct module_kobject *mk, char *buffer)
92008 {
92009- return sprintf(buffer, "%u\n", mk->mod->init_size);
92010+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
92011 }
92012
92013 static struct module_attribute modinfo_initsize =
92014@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
92015 goto bad_version;
92016 }
92017
92018+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92019+ /*
92020+ * avoid potentially printing jibberish on attempted load
92021+ * of a module randomized with a different seed
92022+ */
92023+ pr_warn("no symbol version for %s\n", symname);
92024+#else
92025 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
92026+#endif
92027 return 0;
92028
92029 bad_version:
92030+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92031+ /*
92032+ * avoid potentially printing jibberish on attempted load
92033+ * of a module randomized with a different seed
92034+ */
92035+ pr_warn("attempted module disagrees about version of symbol %s\n",
92036+ symname);
92037+#else
92038 pr_warn("%s: disagrees about version of symbol %s\n",
92039 mod->name, symname);
92040+#endif
92041 return 0;
92042 }
92043
92044@@ -1281,7 +1300,7 @@ resolve_symbol_wait(struct module *mod,
92045 */
92046 #ifdef CONFIG_SYSFS
92047
92048-#ifdef CONFIG_KALLSYMS
92049+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
92050 static inline bool sect_empty(const Elf_Shdr *sect)
92051 {
92052 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
92053@@ -1419,7 +1438,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
92054 {
92055 unsigned int notes, loaded, i;
92056 struct module_notes_attrs *notes_attrs;
92057- struct bin_attribute *nattr;
92058+ bin_attribute_no_const *nattr;
92059
92060 /* failed to create section attributes, so can't create notes */
92061 if (!mod->sect_attrs)
92062@@ -1531,7 +1550,7 @@ static void del_usage_links(struct module *mod)
92063 static int module_add_modinfo_attrs(struct module *mod)
92064 {
92065 struct module_attribute *attr;
92066- struct module_attribute *temp_attr;
92067+ module_attribute_no_const *temp_attr;
92068 int error = 0;
92069 int i;
92070
92071@@ -1741,21 +1760,21 @@ static void set_section_ro_nx(void *base,
92072
92073 static void unset_module_core_ro_nx(struct module *mod)
92074 {
92075- set_page_attributes(mod->module_core + mod->core_text_size,
92076- mod->module_core + mod->core_size,
92077+ set_page_attributes(mod->module_core_rw,
92078+ mod->module_core_rw + mod->core_size_rw,
92079 set_memory_x);
92080- set_page_attributes(mod->module_core,
92081- mod->module_core + mod->core_ro_size,
92082+ set_page_attributes(mod->module_core_rx,
92083+ mod->module_core_rx + mod->core_size_rx,
92084 set_memory_rw);
92085 }
92086
92087 static void unset_module_init_ro_nx(struct module *mod)
92088 {
92089- set_page_attributes(mod->module_init + mod->init_text_size,
92090- mod->module_init + mod->init_size,
92091+ set_page_attributes(mod->module_init_rw,
92092+ mod->module_init_rw + mod->init_size_rw,
92093 set_memory_x);
92094- set_page_attributes(mod->module_init,
92095- mod->module_init + mod->init_ro_size,
92096+ set_page_attributes(mod->module_init_rx,
92097+ mod->module_init_rx + mod->init_size_rx,
92098 set_memory_rw);
92099 }
92100
92101@@ -1768,14 +1787,14 @@ void set_all_modules_text_rw(void)
92102 list_for_each_entry_rcu(mod, &modules, list) {
92103 if (mod->state == MODULE_STATE_UNFORMED)
92104 continue;
92105- if ((mod->module_core) && (mod->core_text_size)) {
92106- set_page_attributes(mod->module_core,
92107- mod->module_core + mod->core_text_size,
92108+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
92109+ set_page_attributes(mod->module_core_rx,
92110+ mod->module_core_rx + mod->core_size_rx,
92111 set_memory_rw);
92112 }
92113- if ((mod->module_init) && (mod->init_text_size)) {
92114- set_page_attributes(mod->module_init,
92115- mod->module_init + mod->init_text_size,
92116+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
92117+ set_page_attributes(mod->module_init_rx,
92118+ mod->module_init_rx + mod->init_size_rx,
92119 set_memory_rw);
92120 }
92121 }
92122@@ -1791,14 +1810,14 @@ void set_all_modules_text_ro(void)
92123 list_for_each_entry_rcu(mod, &modules, list) {
92124 if (mod->state == MODULE_STATE_UNFORMED)
92125 continue;
92126- if ((mod->module_core) && (mod->core_text_size)) {
92127- set_page_attributes(mod->module_core,
92128- mod->module_core + mod->core_text_size,
92129+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
92130+ set_page_attributes(mod->module_core_rx,
92131+ mod->module_core_rx + mod->core_size_rx,
92132 set_memory_ro);
92133 }
92134- if ((mod->module_init) && (mod->init_text_size)) {
92135- set_page_attributes(mod->module_init,
92136- mod->module_init + mod->init_text_size,
92137+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
92138+ set_page_attributes(mod->module_init_rx,
92139+ mod->module_init_rx + mod->init_size_rx,
92140 set_memory_ro);
92141 }
92142 }
92143@@ -1807,7 +1826,15 @@ void set_all_modules_text_ro(void)
92144 #else
92145 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
92146 static void unset_module_core_ro_nx(struct module *mod) { }
92147-static void unset_module_init_ro_nx(struct module *mod) { }
92148+static void unset_module_init_ro_nx(struct module *mod)
92149+{
92150+
92151+#ifdef CONFIG_PAX_KERNEXEC
92152+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
92153+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
92154+#endif
92155+
92156+}
92157 #endif
92158
92159 void __weak module_memfree(void *module_region)
92160@@ -1861,16 +1888,19 @@ static void free_module(struct module *mod)
92161 /* This may be NULL, but that's OK */
92162 unset_module_init_ro_nx(mod);
92163 module_arch_freeing_init(mod);
92164- module_memfree(mod->module_init);
92165+ module_memfree(mod->module_init_rw);
92166+ module_memfree_exec(mod->module_init_rx);
92167 kfree(mod->args);
92168 percpu_modfree(mod);
92169
92170 /* Free lock-classes; relies on the preceding sync_rcu(). */
92171- lockdep_free_key_range(mod->module_core, mod->core_size);
92172+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
92173+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
92174
92175 /* Finally, free the core (containing the module structure) */
92176 unset_module_core_ro_nx(mod);
92177- module_memfree(mod->module_core);
92178+ module_memfree_exec(mod->module_core_rx);
92179+ module_memfree(mod->module_core_rw);
92180
92181 #ifdef CONFIG_MPU
92182 update_protections(current->mm);
92183@@ -1939,9 +1969,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92184 int ret = 0;
92185 const struct kernel_symbol *ksym;
92186
92187+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92188+ int is_fs_load = 0;
92189+ int register_filesystem_found = 0;
92190+ char *p;
92191+
92192+ p = strstr(mod->args, "grsec_modharden_fs");
92193+ if (p) {
92194+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
92195+ /* copy \0 as well */
92196+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
92197+ is_fs_load = 1;
92198+ }
92199+#endif
92200+
92201 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
92202 const char *name = info->strtab + sym[i].st_name;
92203
92204+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92205+ /* it's a real shame this will never get ripped and copied
92206+ upstream! ;(
92207+ */
92208+ if (is_fs_load && !strcmp(name, "register_filesystem"))
92209+ register_filesystem_found = 1;
92210+#endif
92211+
92212 switch (sym[i].st_shndx) {
92213 case SHN_COMMON:
92214 /* Ignore common symbols */
92215@@ -1966,7 +2018,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92216 ksym = resolve_symbol_wait(mod, info, name);
92217 /* Ok if resolved. */
92218 if (ksym && !IS_ERR(ksym)) {
92219+ pax_open_kernel();
92220 sym[i].st_value = ksym->value;
92221+ pax_close_kernel();
92222 break;
92223 }
92224
92225@@ -1985,11 +2039,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92226 secbase = (unsigned long)mod_percpu(mod);
92227 else
92228 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
92229+ pax_open_kernel();
92230 sym[i].st_value += secbase;
92231+ pax_close_kernel();
92232 break;
92233 }
92234 }
92235
92236+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92237+ if (is_fs_load && !register_filesystem_found) {
92238+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
92239+ ret = -EPERM;
92240+ }
92241+#endif
92242+
92243 return ret;
92244 }
92245
92246@@ -2073,22 +2136,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
92247 || s->sh_entsize != ~0UL
92248 || strstarts(sname, ".init"))
92249 continue;
92250- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
92251+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92252+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
92253+ else
92254+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
92255 pr_debug("\t%s\n", sname);
92256 }
92257- switch (m) {
92258- case 0: /* executable */
92259- mod->core_size = debug_align(mod->core_size);
92260- mod->core_text_size = mod->core_size;
92261- break;
92262- case 1: /* RO: text and ro-data */
92263- mod->core_size = debug_align(mod->core_size);
92264- mod->core_ro_size = mod->core_size;
92265- break;
92266- case 3: /* whole core */
92267- mod->core_size = debug_align(mod->core_size);
92268- break;
92269- }
92270 }
92271
92272 pr_debug("Init section allocation order:\n");
92273@@ -2102,23 +2155,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
92274 || s->sh_entsize != ~0UL
92275 || !strstarts(sname, ".init"))
92276 continue;
92277- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
92278- | INIT_OFFSET_MASK);
92279+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92280+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
92281+ else
92282+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
92283+ s->sh_entsize |= INIT_OFFSET_MASK;
92284 pr_debug("\t%s\n", sname);
92285 }
92286- switch (m) {
92287- case 0: /* executable */
92288- mod->init_size = debug_align(mod->init_size);
92289- mod->init_text_size = mod->init_size;
92290- break;
92291- case 1: /* RO: text and ro-data */
92292- mod->init_size = debug_align(mod->init_size);
92293- mod->init_ro_size = mod->init_size;
92294- break;
92295- case 3: /* whole init */
92296- mod->init_size = debug_align(mod->init_size);
92297- break;
92298- }
92299 }
92300 }
92301
92302@@ -2291,7 +2334,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
92303
92304 /* Put symbol section at end of init part of module. */
92305 symsect->sh_flags |= SHF_ALLOC;
92306- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
92307+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
92308 info->index.sym) | INIT_OFFSET_MASK;
92309 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
92310
92311@@ -2308,16 +2351,16 @@ static void layout_symtab(struct module *mod, struct load_info *info)
92312 }
92313
92314 /* Append room for core symbols at end of core part. */
92315- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
92316- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
92317- mod->core_size += strtab_size;
92318- mod->core_size = debug_align(mod->core_size);
92319+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
92320+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
92321+ mod->core_size_rx += strtab_size;
92322+ mod->core_size_rx = debug_align(mod->core_size_rx);
92323
92324 /* Put string table section at end of init part of module. */
92325 strsect->sh_flags |= SHF_ALLOC;
92326- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
92327+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
92328 info->index.str) | INIT_OFFSET_MASK;
92329- mod->init_size = debug_align(mod->init_size);
92330+ mod->init_size_rx = debug_align(mod->init_size_rx);
92331 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
92332 }
92333
92334@@ -2334,12 +2377,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
92335 /* Make sure we get permanent strtab: don't use info->strtab. */
92336 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
92337
92338+ pax_open_kernel();
92339+
92340 /* Set types up while we still have access to sections. */
92341 for (i = 0; i < mod->num_symtab; i++)
92342 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
92343
92344- mod->core_symtab = dst = mod->module_core + info->symoffs;
92345- mod->core_strtab = s = mod->module_core + info->stroffs;
92346+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
92347+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
92348 src = mod->symtab;
92349 for (ndst = i = 0; i < mod->num_symtab; i++) {
92350 if (i == 0 ||
92351@@ -2351,6 +2396,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
92352 }
92353 }
92354 mod->core_num_syms = ndst;
92355+
92356+ pax_close_kernel();
92357 }
92358 #else
92359 static inline void layout_symtab(struct module *mod, struct load_info *info)
92360@@ -2384,17 +2431,33 @@ void * __weak module_alloc(unsigned long size)
92361 return vmalloc_exec(size);
92362 }
92363
92364-static void *module_alloc_update_bounds(unsigned long size)
92365+static void *module_alloc_update_bounds_rw(unsigned long size)
92366 {
92367 void *ret = module_alloc(size);
92368
92369 if (ret) {
92370 mutex_lock(&module_mutex);
92371 /* Update module bounds. */
92372- if ((unsigned long)ret < module_addr_min)
92373- module_addr_min = (unsigned long)ret;
92374- if ((unsigned long)ret + size > module_addr_max)
92375- module_addr_max = (unsigned long)ret + size;
92376+ if ((unsigned long)ret < module_addr_min_rw)
92377+ module_addr_min_rw = (unsigned long)ret;
92378+ if ((unsigned long)ret + size > module_addr_max_rw)
92379+ module_addr_max_rw = (unsigned long)ret + size;
92380+ mutex_unlock(&module_mutex);
92381+ }
92382+ return ret;
92383+}
92384+
92385+static void *module_alloc_update_bounds_rx(unsigned long size)
92386+{
92387+ void *ret = module_alloc_exec(size);
92388+
92389+ if (ret) {
92390+ mutex_lock(&module_mutex);
92391+ /* Update module bounds. */
92392+ if ((unsigned long)ret < module_addr_min_rx)
92393+ module_addr_min_rx = (unsigned long)ret;
92394+ if ((unsigned long)ret + size > module_addr_max_rx)
92395+ module_addr_max_rx = (unsigned long)ret + size;
92396 mutex_unlock(&module_mutex);
92397 }
92398 return ret;
92399@@ -2665,7 +2728,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
92400 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
92401
92402 if (info->index.sym == 0) {
92403+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92404+ /*
92405+ * avoid potentially printing jibberish on attempted load
92406+ * of a module randomized with a different seed
92407+ */
92408+ pr_warn("module has no symbols (stripped?)\n");
92409+#else
92410 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
92411+#endif
92412 return ERR_PTR(-ENOEXEC);
92413 }
92414
92415@@ -2681,8 +2752,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
92416 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
92417 {
92418 const char *modmagic = get_modinfo(info, "vermagic");
92419+ const char *license = get_modinfo(info, "license");
92420 int err;
92421
92422+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
92423+ if (!license || !license_is_gpl_compatible(license))
92424+ return -ENOEXEC;
92425+#endif
92426+
92427 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
92428 modmagic = NULL;
92429
92430@@ -2707,7 +2784,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
92431 }
92432
92433 /* Set up license info based on the info section */
92434- set_license(mod, get_modinfo(info, "license"));
92435+ set_license(mod, license);
92436
92437 return 0;
92438 }
92439@@ -2801,7 +2878,7 @@ static int move_module(struct module *mod, struct load_info *info)
92440 void *ptr;
92441
92442 /* Do the allocs. */
92443- ptr = module_alloc_update_bounds(mod->core_size);
92444+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
92445 /*
92446 * The pointer to this block is stored in the module structure
92447 * which is inside the block. Just mark it as not being a
92448@@ -2811,11 +2888,11 @@ static int move_module(struct module *mod, struct load_info *info)
92449 if (!ptr)
92450 return -ENOMEM;
92451
92452- memset(ptr, 0, mod->core_size);
92453- mod->module_core = ptr;
92454+ memset(ptr, 0, mod->core_size_rw);
92455+ mod->module_core_rw = ptr;
92456
92457- if (mod->init_size) {
92458- ptr = module_alloc_update_bounds(mod->init_size);
92459+ if (mod->init_size_rw) {
92460+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
92461 /*
92462 * The pointer to this block is stored in the module structure
92463 * which is inside the block. This block doesn't need to be
92464@@ -2824,13 +2901,45 @@ static int move_module(struct module *mod, struct load_info *info)
92465 */
92466 kmemleak_ignore(ptr);
92467 if (!ptr) {
92468- module_memfree(mod->module_core);
92469+ module_memfree(mod->module_core_rw);
92470 return -ENOMEM;
92471 }
92472- memset(ptr, 0, mod->init_size);
92473- mod->module_init = ptr;
92474+ memset(ptr, 0, mod->init_size_rw);
92475+ mod->module_init_rw = ptr;
92476 } else
92477- mod->module_init = NULL;
92478+ mod->module_init_rw = NULL;
92479+
92480+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
92481+ kmemleak_not_leak(ptr);
92482+ if (!ptr) {
92483+ if (mod->module_init_rw)
92484+ module_memfree(mod->module_init_rw);
92485+ module_memfree(mod->module_core_rw);
92486+ return -ENOMEM;
92487+ }
92488+
92489+ pax_open_kernel();
92490+ memset(ptr, 0, mod->core_size_rx);
92491+ pax_close_kernel();
92492+ mod->module_core_rx = ptr;
92493+
92494+ if (mod->init_size_rx) {
92495+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
92496+ kmemleak_ignore(ptr);
92497+ if (!ptr && mod->init_size_rx) {
92498+ module_memfree_exec(mod->module_core_rx);
92499+ if (mod->module_init_rw)
92500+ module_memfree(mod->module_init_rw);
92501+ module_memfree(mod->module_core_rw);
92502+ return -ENOMEM;
92503+ }
92504+
92505+ pax_open_kernel();
92506+ memset(ptr, 0, mod->init_size_rx);
92507+ pax_close_kernel();
92508+ mod->module_init_rx = ptr;
92509+ } else
92510+ mod->module_init_rx = NULL;
92511
92512 /* Transfer each section which specifies SHF_ALLOC */
92513 pr_debug("final section addresses:\n");
92514@@ -2841,16 +2950,45 @@ static int move_module(struct module *mod, struct load_info *info)
92515 if (!(shdr->sh_flags & SHF_ALLOC))
92516 continue;
92517
92518- if (shdr->sh_entsize & INIT_OFFSET_MASK)
92519- dest = mod->module_init
92520- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92521- else
92522- dest = mod->module_core + shdr->sh_entsize;
92523+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
92524+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
92525+ dest = mod->module_init_rw
92526+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92527+ else
92528+ dest = mod->module_init_rx
92529+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92530+ } else {
92531+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
92532+ dest = mod->module_core_rw + shdr->sh_entsize;
92533+ else
92534+ dest = mod->module_core_rx + shdr->sh_entsize;
92535+ }
92536+
92537+ if (shdr->sh_type != SHT_NOBITS) {
92538+
92539+#ifdef CONFIG_PAX_KERNEXEC
92540+#ifdef CONFIG_X86_64
92541+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
92542+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
92543+#endif
92544+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
92545+ pax_open_kernel();
92546+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
92547+ pax_close_kernel();
92548+ } else
92549+#endif
92550
92551- if (shdr->sh_type != SHT_NOBITS)
92552 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
92553+ }
92554 /* Update sh_addr to point to copy in image. */
92555- shdr->sh_addr = (unsigned long)dest;
92556+
92557+#ifdef CONFIG_PAX_KERNEXEC
92558+ if (shdr->sh_flags & SHF_EXECINSTR)
92559+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
92560+ else
92561+#endif
92562+
92563+ shdr->sh_addr = (unsigned long)dest;
92564 pr_debug("\t0x%lx %s\n",
92565 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
92566 }
92567@@ -2907,12 +3045,12 @@ static void flush_module_icache(const struct module *mod)
92568 * Do it before processing of module parameters, so the module
92569 * can provide parameter accessor functions of its own.
92570 */
92571- if (mod->module_init)
92572- flush_icache_range((unsigned long)mod->module_init,
92573- (unsigned long)mod->module_init
92574- + mod->init_size);
92575- flush_icache_range((unsigned long)mod->module_core,
92576- (unsigned long)mod->module_core + mod->core_size);
92577+ if (mod->module_init_rx)
92578+ flush_icache_range((unsigned long)mod->module_init_rx,
92579+ (unsigned long)mod->module_init_rx
92580+ + mod->init_size_rx);
92581+ flush_icache_range((unsigned long)mod->module_core_rx,
92582+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
92583
92584 set_fs(old_fs);
92585 }
92586@@ -2970,8 +3108,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
92587 {
92588 percpu_modfree(mod);
92589 module_arch_freeing_init(mod);
92590- module_memfree(mod->module_init);
92591- module_memfree(mod->module_core);
92592+ module_memfree_exec(mod->module_init_rx);
92593+ module_memfree_exec(mod->module_core_rx);
92594+ module_memfree(mod->module_init_rw);
92595+ module_memfree(mod->module_core_rw);
92596 }
92597
92598 int __weak module_finalize(const Elf_Ehdr *hdr,
92599@@ -2984,7 +3124,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
92600 static int post_relocation(struct module *mod, const struct load_info *info)
92601 {
92602 /* Sort exception table now relocations are done. */
92603+ pax_open_kernel();
92604 sort_extable(mod->extable, mod->extable + mod->num_exentries);
92605+ pax_close_kernel();
92606
92607 /* Copy relocated percpu area over. */
92608 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
92609@@ -3032,13 +3174,15 @@ static void do_mod_ctors(struct module *mod)
92610 /* For freeing module_init on success, in case kallsyms traversing */
92611 struct mod_initfree {
92612 struct rcu_head rcu;
92613- void *module_init;
92614+ void *module_init_rw;
92615+ void *module_init_rx;
92616 };
92617
92618 static void do_free_init(struct rcu_head *head)
92619 {
92620 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
92621- module_memfree(m->module_init);
92622+ module_memfree(m->module_init_rw);
92623+ module_memfree_exec(m->module_init_rx);
92624 kfree(m);
92625 }
92626
92627@@ -3058,7 +3202,8 @@ static noinline int do_init_module(struct module *mod)
92628 ret = -ENOMEM;
92629 goto fail;
92630 }
92631- freeinit->module_init = mod->module_init;
92632+ freeinit->module_init_rw = mod->module_init_rw;
92633+ freeinit->module_init_rx = mod->module_init_rx;
92634
92635 /*
92636 * We want to find out whether @mod uses async during init. Clear
92637@@ -3117,10 +3262,10 @@ static noinline int do_init_module(struct module *mod)
92638 #endif
92639 unset_module_init_ro_nx(mod);
92640 module_arch_freeing_init(mod);
92641- mod->module_init = NULL;
92642- mod->init_size = 0;
92643- mod->init_ro_size = 0;
92644- mod->init_text_size = 0;
92645+ mod->module_init_rw = NULL;
92646+ mod->module_init_rx = NULL;
92647+ mod->init_size_rw = 0;
92648+ mod->init_size_rx = 0;
92649 /*
92650 * We want to free module_init, but be aware that kallsyms may be
92651 * walking this with preempt disabled. In all the failure paths,
92652@@ -3208,16 +3353,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
92653 module_bug_finalize(info->hdr, info->sechdrs, mod);
92654
92655 /* Set RO and NX regions for core */
92656- set_section_ro_nx(mod->module_core,
92657- mod->core_text_size,
92658- mod->core_ro_size,
92659- mod->core_size);
92660+ set_section_ro_nx(mod->module_core_rx,
92661+ mod->core_size_rx,
92662+ mod->core_size_rx,
92663+ mod->core_size_rx);
92664
92665 /* Set RO and NX regions for init */
92666- set_section_ro_nx(mod->module_init,
92667- mod->init_text_size,
92668- mod->init_ro_size,
92669- mod->init_size);
92670+ set_section_ro_nx(mod->module_init_rx,
92671+ mod->init_size_rx,
92672+ mod->init_size_rx,
92673+ mod->init_size_rx);
92674
92675 /* Mark state as coming so strong_try_module_get() ignores us,
92676 * but kallsyms etc. can see us. */
92677@@ -3301,9 +3446,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
92678 if (err)
92679 goto free_unload;
92680
92681+ /* Now copy in args */
92682+ mod->args = strndup_user(uargs, ~0UL >> 1);
92683+ if (IS_ERR(mod->args)) {
92684+ err = PTR_ERR(mod->args);
92685+ goto free_unload;
92686+ }
92687+
92688 /* Set up MODINFO_ATTR fields */
92689 setup_modinfo(mod, info);
92690
92691+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92692+ {
92693+ char *p, *p2;
92694+
92695+ if (strstr(mod->args, "grsec_modharden_netdev")) {
92696+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
92697+ err = -EPERM;
92698+ goto free_modinfo;
92699+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
92700+ p += sizeof("grsec_modharden_normal") - 1;
92701+ p2 = strstr(p, "_");
92702+ if (p2) {
92703+ *p2 = '\0';
92704+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
92705+ *p2 = '_';
92706+ }
92707+ err = -EPERM;
92708+ goto free_modinfo;
92709+ }
92710+ }
92711+#endif
92712+
92713 /* Fix up syms, so that st_value is a pointer to location. */
92714 err = simplify_symbols(mod, info);
92715 if (err < 0)
92716@@ -3319,13 +3493,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
92717
92718 flush_module_icache(mod);
92719
92720- /* Now copy in args */
92721- mod->args = strndup_user(uargs, ~0UL >> 1);
92722- if (IS_ERR(mod->args)) {
92723- err = PTR_ERR(mod->args);
92724- goto free_arch_cleanup;
92725- }
92726-
92727 dynamic_debug_setup(info->debug, info->num_debug);
92728
92729 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
92730@@ -3373,11 +3540,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
92731 ddebug_cleanup:
92732 dynamic_debug_remove(info->debug);
92733 synchronize_sched();
92734- kfree(mod->args);
92735- free_arch_cleanup:
92736 module_arch_cleanup(mod);
92737 free_modinfo:
92738 free_modinfo(mod);
92739+ kfree(mod->args);
92740 free_unload:
92741 module_unload_free(mod);
92742 unlink_mod:
92743@@ -3390,7 +3556,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
92744 mutex_unlock(&module_mutex);
92745 free_module:
92746 /* Free lock-classes; relies on the preceding sync_rcu() */
92747- lockdep_free_key_range(mod->module_core, mod->core_size);
92748+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
92749+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
92750
92751 module_deallocate(mod, info);
92752 free_copy:
92753@@ -3467,10 +3634,16 @@ static const char *get_ksymbol(struct module *mod,
92754 unsigned long nextval;
92755
92756 /* At worse, next value is at end of module */
92757- if (within_module_init(addr, mod))
92758- nextval = (unsigned long)mod->module_init+mod->init_text_size;
92759+ if (within_module_init_rx(addr, mod))
92760+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
92761+ else if (within_module_init_rw(addr, mod))
92762+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
92763+ else if (within_module_core_rx(addr, mod))
92764+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
92765+ else if (within_module_core_rw(addr, mod))
92766+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
92767 else
92768- nextval = (unsigned long)mod->module_core+mod->core_text_size;
92769+ return NULL;
92770
92771 /* Scan for closest preceding symbol, and next symbol. (ELF
92772 starts real symbols at 1). */
92773@@ -3718,7 +3891,7 @@ static int m_show(struct seq_file *m, void *p)
92774 return 0;
92775
92776 seq_printf(m, "%s %u",
92777- mod->name, mod->init_size + mod->core_size);
92778+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
92779 print_unload_info(m, mod);
92780
92781 /* Informative for users. */
92782@@ -3727,7 +3900,7 @@ static int m_show(struct seq_file *m, void *p)
92783 mod->state == MODULE_STATE_COMING ? "Loading" :
92784 "Live");
92785 /* Used by oprofile and other similar tools. */
92786- seq_printf(m, " 0x%pK", mod->module_core);
92787+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
92788
92789 /* Taints info */
92790 if (mod->taints)
92791@@ -3763,7 +3936,17 @@ static const struct file_operations proc_modules_operations = {
92792
92793 static int __init proc_modules_init(void)
92794 {
92795+#ifndef CONFIG_GRKERNSEC_HIDESYM
92796+#ifdef CONFIG_GRKERNSEC_PROC_USER
92797+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92798+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92799+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
92800+#else
92801 proc_create("modules", 0, NULL, &proc_modules_operations);
92802+#endif
92803+#else
92804+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92805+#endif
92806 return 0;
92807 }
92808 module_init(proc_modules_init);
92809@@ -3824,7 +4007,8 @@ struct module *__module_address(unsigned long addr)
92810 {
92811 struct module *mod;
92812
92813- if (addr < module_addr_min || addr > module_addr_max)
92814+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
92815+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
92816 return NULL;
92817
92818 list_for_each_entry_rcu(mod, &modules, list) {
92819@@ -3865,11 +4049,20 @@ bool is_module_text_address(unsigned long addr)
92820 */
92821 struct module *__module_text_address(unsigned long addr)
92822 {
92823- struct module *mod = __module_address(addr);
92824+ struct module *mod;
92825+
92826+#ifdef CONFIG_X86_32
92827+ addr = ktla_ktva(addr);
92828+#endif
92829+
92830+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
92831+ return NULL;
92832+
92833+ mod = __module_address(addr);
92834+
92835 if (mod) {
92836 /* Make sure it's within the text section. */
92837- if (!within(addr, mod->module_init, mod->init_text_size)
92838- && !within(addr, mod->module_core, mod->core_text_size))
92839+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
92840 mod = NULL;
92841 }
92842 return mod;
92843diff --git a/kernel/notifier.c b/kernel/notifier.c
92844index ae9fc7c..5085fbf 100644
92845--- a/kernel/notifier.c
92846+++ b/kernel/notifier.c
92847@@ -5,6 +5,7 @@
92848 #include <linux/rcupdate.h>
92849 #include <linux/vmalloc.h>
92850 #include <linux/reboot.h>
92851+#include <linux/mm.h>
92852
92853 /*
92854 * Notifier list for kernel code which wants to be called
92855@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
92856 while ((*nl) != NULL) {
92857 if (n->priority > (*nl)->priority)
92858 break;
92859- nl = &((*nl)->next);
92860+ nl = (struct notifier_block **)&((*nl)->next);
92861 }
92862- n->next = *nl;
92863+ pax_open_kernel();
92864+ *(const void **)&n->next = *nl;
92865 rcu_assign_pointer(*nl, n);
92866+ pax_close_kernel();
92867 return 0;
92868 }
92869
92870@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
92871 return 0;
92872 if (n->priority > (*nl)->priority)
92873 break;
92874- nl = &((*nl)->next);
92875+ nl = (struct notifier_block **)&((*nl)->next);
92876 }
92877- n->next = *nl;
92878+ pax_open_kernel();
92879+ *(const void **)&n->next = *nl;
92880 rcu_assign_pointer(*nl, n);
92881+ pax_close_kernel();
92882 return 0;
92883 }
92884
92885@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
92886 {
92887 while ((*nl) != NULL) {
92888 if ((*nl) == n) {
92889+ pax_open_kernel();
92890 rcu_assign_pointer(*nl, n->next);
92891+ pax_close_kernel();
92892 return 0;
92893 }
92894- nl = &((*nl)->next);
92895+ nl = (struct notifier_block **)&((*nl)->next);
92896 }
92897 return -ENOENT;
92898 }
92899diff --git a/kernel/padata.c b/kernel/padata.c
92900index b38bea9..91acfbe 100644
92901--- a/kernel/padata.c
92902+++ b/kernel/padata.c
92903@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
92904 * seq_nr mod. number of cpus in use.
92905 */
92906
92907- seq_nr = atomic_inc_return(&pd->seq_nr);
92908+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
92909 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
92910
92911 return padata_index_to_cpu(pd, cpu_index);
92912@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
92913 padata_init_pqueues(pd);
92914 padata_init_squeues(pd);
92915 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
92916- atomic_set(&pd->seq_nr, -1);
92917+ atomic_set_unchecked(&pd->seq_nr, -1);
92918 atomic_set(&pd->reorder_objects, 0);
92919 atomic_set(&pd->refcnt, 0);
92920 pd->pinst = pinst;
92921diff --git a/kernel/panic.c b/kernel/panic.c
92922index 8136ad7..15c857b 100644
92923--- a/kernel/panic.c
92924+++ b/kernel/panic.c
92925@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
92926 /*
92927 * Stop ourself in panic -- architecture code may override this
92928 */
92929-void __weak panic_smp_self_stop(void)
92930+void __weak __noreturn panic_smp_self_stop(void)
92931 {
92932 while (1)
92933 cpu_relax();
92934@@ -425,7 +425,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
92935 disable_trace_on_warning();
92936
92937 pr_warn("------------[ cut here ]------------\n");
92938- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
92939+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
92940 raw_smp_processor_id(), current->pid, file, line, caller);
92941
92942 if (args)
92943@@ -490,7 +490,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
92944 */
92945 __visible void __stack_chk_fail(void)
92946 {
92947- panic("stack-protector: Kernel stack is corrupted in: %p\n",
92948+ dump_stack();
92949+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
92950 __builtin_return_address(0));
92951 }
92952 EXPORT_SYMBOL(__stack_chk_fail);
92953diff --git a/kernel/pid.c b/kernel/pid.c
92954index cd36a5e..11f185d 100644
92955--- a/kernel/pid.c
92956+++ b/kernel/pid.c
92957@@ -33,6 +33,7 @@
92958 #include <linux/rculist.h>
92959 #include <linux/bootmem.h>
92960 #include <linux/hash.h>
92961+#include <linux/security.h>
92962 #include <linux/pid_namespace.h>
92963 #include <linux/init_task.h>
92964 #include <linux/syscalls.h>
92965@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
92966
92967 int pid_max = PID_MAX_DEFAULT;
92968
92969-#define RESERVED_PIDS 300
92970+#define RESERVED_PIDS 500
92971
92972 int pid_max_min = RESERVED_PIDS + 1;
92973 int pid_max_max = PID_MAX_LIMIT;
92974@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
92975 */
92976 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
92977 {
92978+ struct task_struct *task;
92979+
92980 rcu_lockdep_assert(rcu_read_lock_held(),
92981 "find_task_by_pid_ns() needs rcu_read_lock()"
92982 " protection");
92983- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92984+
92985+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92986+
92987+ if (gr_pid_is_chrooted(task))
92988+ return NULL;
92989+
92990+ return task;
92991 }
92992
92993 struct task_struct *find_task_by_vpid(pid_t vnr)
92994@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
92995 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
92996 }
92997
92998+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
92999+{
93000+ rcu_lockdep_assert(rcu_read_lock_held(),
93001+ "find_task_by_pid_ns() needs rcu_read_lock()"
93002+ " protection");
93003+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
93004+}
93005+
93006 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
93007 {
93008 struct pid *pid;
93009diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
93010index a65ba13..f600dbb 100644
93011--- a/kernel/pid_namespace.c
93012+++ b/kernel/pid_namespace.c
93013@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
93014 void __user *buffer, size_t *lenp, loff_t *ppos)
93015 {
93016 struct pid_namespace *pid_ns = task_active_pid_ns(current);
93017- struct ctl_table tmp = *table;
93018+ ctl_table_no_const tmp = *table;
93019
93020 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
93021 return -EPERM;
93022diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
93023index 7e01f78..f5da19d 100644
93024--- a/kernel/power/Kconfig
93025+++ b/kernel/power/Kconfig
93026@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
93027 config HIBERNATION
93028 bool "Hibernation (aka 'suspend to disk')"
93029 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
93030+ depends on !GRKERNSEC_KMEM
93031+ depends on !PAX_MEMORY_SANITIZE
93032 select HIBERNATE_CALLBACKS
93033 select LZO_COMPRESS
93034 select LZO_DECOMPRESS
93035diff --git a/kernel/power/process.c b/kernel/power/process.c
93036index 564f786..361a18e 100644
93037--- a/kernel/power/process.c
93038+++ b/kernel/power/process.c
93039@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
93040 unsigned int elapsed_msecs;
93041 bool wakeup = false;
93042 int sleep_usecs = USEC_PER_MSEC;
93043+ bool timedout = false;
93044
93045 do_gettimeofday(&start);
93046
93047@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
93048
93049 while (true) {
93050 todo = 0;
93051+ if (time_after(jiffies, end_time))
93052+ timedout = true;
93053 read_lock(&tasklist_lock);
93054 for_each_process_thread(g, p) {
93055 if (p == current || !freeze_task(p))
93056 continue;
93057
93058- if (!freezer_should_skip(p))
93059+ if (!freezer_should_skip(p)) {
93060 todo++;
93061+ if (timedout) {
93062+ printk(KERN_ERR "Task refusing to freeze:\n");
93063+ sched_show_task(p);
93064+ }
93065+ }
93066 }
93067 read_unlock(&tasklist_lock);
93068
93069@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
93070 todo += wq_busy;
93071 }
93072
93073- if (!todo || time_after(jiffies, end_time))
93074+ if (!todo || timedout)
93075 break;
93076
93077 if (pm_wakeup_pending()) {
93078diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
93079index bb0635b..9aff9f3 100644
93080--- a/kernel/printk/printk.c
93081+++ b/kernel/printk/printk.c
93082@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
93083 if (from_file && type != SYSLOG_ACTION_OPEN)
93084 return 0;
93085
93086+#ifdef CONFIG_GRKERNSEC_DMESG
93087+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
93088+ return -EPERM;
93089+#endif
93090+
93091 if (syslog_action_restricted(type)) {
93092 if (capable(CAP_SYSLOG))
93093 return 0;
93094diff --git a/kernel/profile.c b/kernel/profile.c
93095index a7bcd28..5b368fa 100644
93096--- a/kernel/profile.c
93097+++ b/kernel/profile.c
93098@@ -37,7 +37,7 @@ struct profile_hit {
93099 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
93100 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
93101
93102-static atomic_t *prof_buffer;
93103+static atomic_unchecked_t *prof_buffer;
93104 static unsigned long prof_len, prof_shift;
93105
93106 int prof_on __read_mostly;
93107@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
93108 hits[i].pc = 0;
93109 continue;
93110 }
93111- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93112+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93113 hits[i].hits = hits[i].pc = 0;
93114 }
93115 }
93116@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
93117 * Add the current hit(s) and flush the write-queue out
93118 * to the global buffer:
93119 */
93120- atomic_add(nr_hits, &prof_buffer[pc]);
93121+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
93122 for (i = 0; i < NR_PROFILE_HIT; ++i) {
93123- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93124+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93125 hits[i].pc = hits[i].hits = 0;
93126 }
93127 out:
93128@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
93129 {
93130 unsigned long pc;
93131 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
93132- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93133+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93134 }
93135 #endif /* !CONFIG_SMP */
93136
93137@@ -489,7 +489,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
93138 return -EFAULT;
93139 buf++; p++; count--; read++;
93140 }
93141- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
93142+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
93143 if (copy_to_user(buf, (void *)pnt, count))
93144 return -EFAULT;
93145 read += count;
93146@@ -520,7 +520,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
93147 }
93148 #endif
93149 profile_discard_flip_buffers();
93150- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
93151+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
93152 return count;
93153 }
93154
93155diff --git a/kernel/ptrace.c b/kernel/ptrace.c
93156index 9a34bd8..38d90e5 100644
93157--- a/kernel/ptrace.c
93158+++ b/kernel/ptrace.c
93159@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
93160 if (seize)
93161 flags |= PT_SEIZED;
93162 rcu_read_lock();
93163- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
93164+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
93165 flags |= PT_PTRACE_CAP;
93166 rcu_read_unlock();
93167 task->ptrace = flags;
93168@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
93169 break;
93170 return -EIO;
93171 }
93172- if (copy_to_user(dst, buf, retval))
93173+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
93174 return -EFAULT;
93175 copied += retval;
93176 src += retval;
93177@@ -803,7 +803,7 @@ int ptrace_request(struct task_struct *child, long request,
93178 bool seized = child->ptrace & PT_SEIZED;
93179 int ret = -EIO;
93180 siginfo_t siginfo, *si;
93181- void __user *datavp = (void __user *) data;
93182+ void __user *datavp = (__force void __user *) data;
93183 unsigned long __user *datalp = datavp;
93184 unsigned long flags;
93185
93186@@ -1049,14 +1049,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
93187 goto out;
93188 }
93189
93190+ if (gr_handle_ptrace(child, request)) {
93191+ ret = -EPERM;
93192+ goto out_put_task_struct;
93193+ }
93194+
93195 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
93196 ret = ptrace_attach(child, request, addr, data);
93197 /*
93198 * Some architectures need to do book-keeping after
93199 * a ptrace attach.
93200 */
93201- if (!ret)
93202+ if (!ret) {
93203 arch_ptrace_attach(child);
93204+ gr_audit_ptrace(child);
93205+ }
93206 goto out_put_task_struct;
93207 }
93208
93209@@ -1084,7 +1091,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
93210 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
93211 if (copied != sizeof(tmp))
93212 return -EIO;
93213- return put_user(tmp, (unsigned long __user *)data);
93214+ return put_user(tmp, (__force unsigned long __user *)data);
93215 }
93216
93217 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
93218@@ -1177,7 +1184,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
93219 }
93220
93221 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
93222- compat_long_t, addr, compat_long_t, data)
93223+ compat_ulong_t, addr, compat_ulong_t, data)
93224 {
93225 struct task_struct *child;
93226 long ret;
93227@@ -1193,14 +1200,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
93228 goto out;
93229 }
93230
93231+ if (gr_handle_ptrace(child, request)) {
93232+ ret = -EPERM;
93233+ goto out_put_task_struct;
93234+ }
93235+
93236 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
93237 ret = ptrace_attach(child, request, addr, data);
93238 /*
93239 * Some architectures need to do book-keeping after
93240 * a ptrace attach.
93241 */
93242- if (!ret)
93243+ if (!ret) {
93244 arch_ptrace_attach(child);
93245+ gr_audit_ptrace(child);
93246+ }
93247 goto out_put_task_struct;
93248 }
93249
93250diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
93251index 30d42aa..cac5d66 100644
93252--- a/kernel/rcu/rcutorture.c
93253+++ b/kernel/rcu/rcutorture.c
93254@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
93255 rcu_torture_count) = { 0 };
93256 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
93257 rcu_torture_batch) = { 0 };
93258-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93259-static atomic_t n_rcu_torture_alloc;
93260-static atomic_t n_rcu_torture_alloc_fail;
93261-static atomic_t n_rcu_torture_free;
93262-static atomic_t n_rcu_torture_mberror;
93263-static atomic_t n_rcu_torture_error;
93264+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93265+static atomic_unchecked_t n_rcu_torture_alloc;
93266+static atomic_unchecked_t n_rcu_torture_alloc_fail;
93267+static atomic_unchecked_t n_rcu_torture_free;
93268+static atomic_unchecked_t n_rcu_torture_mberror;
93269+static atomic_unchecked_t n_rcu_torture_error;
93270 static long n_rcu_torture_barrier_error;
93271 static long n_rcu_torture_boost_ktrerror;
93272 static long n_rcu_torture_boost_rterror;
93273@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
93274 static long n_rcu_torture_timers;
93275 static long n_barrier_attempts;
93276 static long n_barrier_successes;
93277-static atomic_long_t n_cbfloods;
93278+static atomic_long_unchecked_t n_cbfloods;
93279 static struct list_head rcu_torture_removed;
93280
93281 static int rcu_torture_writer_state;
93282@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
93283
93284 spin_lock_bh(&rcu_torture_lock);
93285 if (list_empty(&rcu_torture_freelist)) {
93286- atomic_inc(&n_rcu_torture_alloc_fail);
93287+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
93288 spin_unlock_bh(&rcu_torture_lock);
93289 return NULL;
93290 }
93291- atomic_inc(&n_rcu_torture_alloc);
93292+ atomic_inc_unchecked(&n_rcu_torture_alloc);
93293 p = rcu_torture_freelist.next;
93294 list_del_init(p);
93295 spin_unlock_bh(&rcu_torture_lock);
93296@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
93297 static void
93298 rcu_torture_free(struct rcu_torture *p)
93299 {
93300- atomic_inc(&n_rcu_torture_free);
93301+ atomic_inc_unchecked(&n_rcu_torture_free);
93302 spin_lock_bh(&rcu_torture_lock);
93303 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
93304 spin_unlock_bh(&rcu_torture_lock);
93305@@ -308,7 +308,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
93306 i = rp->rtort_pipe_count;
93307 if (i > RCU_TORTURE_PIPE_LEN)
93308 i = RCU_TORTURE_PIPE_LEN;
93309- atomic_inc(&rcu_torture_wcount[i]);
93310+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93311 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93312 rp->rtort_mbtest = 0;
93313 return true;
93314@@ -796,7 +796,7 @@ rcu_torture_cbflood(void *arg)
93315 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
93316 do {
93317 schedule_timeout_interruptible(cbflood_inter_holdoff);
93318- atomic_long_inc(&n_cbfloods);
93319+ atomic_long_inc_unchecked(&n_cbfloods);
93320 WARN_ON(signal_pending(current));
93321 for (i = 0; i < cbflood_n_burst; i++) {
93322 for (j = 0; j < cbflood_n_per_burst; j++) {
93323@@ -915,7 +915,7 @@ rcu_torture_writer(void *arg)
93324 i = old_rp->rtort_pipe_count;
93325 if (i > RCU_TORTURE_PIPE_LEN)
93326 i = RCU_TORTURE_PIPE_LEN;
93327- atomic_inc(&rcu_torture_wcount[i]);
93328+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93329 old_rp->rtort_pipe_count++;
93330 switch (synctype[torture_random(&rand) % nsynctypes]) {
93331 case RTWS_DEF_FREE:
93332@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
93333 return;
93334 }
93335 if (p->rtort_mbtest == 0)
93336- atomic_inc(&n_rcu_torture_mberror);
93337+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93338 spin_lock(&rand_lock);
93339 cur_ops->read_delay(&rand);
93340 n_rcu_torture_timers++;
93341@@ -1111,7 +1111,7 @@ rcu_torture_reader(void *arg)
93342 continue;
93343 }
93344 if (p->rtort_mbtest == 0)
93345- atomic_inc(&n_rcu_torture_mberror);
93346+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93347 cur_ops->read_delay(&rand);
93348 preempt_disable();
93349 pipe_count = p->rtort_pipe_count;
93350@@ -1180,11 +1180,11 @@ rcu_torture_stats_print(void)
93351 rcu_torture_current,
93352 rcu_torture_current_version,
93353 list_empty(&rcu_torture_freelist),
93354- atomic_read(&n_rcu_torture_alloc),
93355- atomic_read(&n_rcu_torture_alloc_fail),
93356- atomic_read(&n_rcu_torture_free));
93357+ atomic_read_unchecked(&n_rcu_torture_alloc),
93358+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
93359+ atomic_read_unchecked(&n_rcu_torture_free));
93360 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
93361- atomic_read(&n_rcu_torture_mberror),
93362+ atomic_read_unchecked(&n_rcu_torture_mberror),
93363 n_rcu_torture_boost_ktrerror,
93364 n_rcu_torture_boost_rterror);
93365 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
93366@@ -1196,17 +1196,17 @@ rcu_torture_stats_print(void)
93367 n_barrier_successes,
93368 n_barrier_attempts,
93369 n_rcu_torture_barrier_error);
93370- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
93371+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
93372
93373 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
93374- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
93375+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
93376 n_rcu_torture_barrier_error != 0 ||
93377 n_rcu_torture_boost_ktrerror != 0 ||
93378 n_rcu_torture_boost_rterror != 0 ||
93379 n_rcu_torture_boost_failure != 0 ||
93380 i > 1) {
93381 pr_cont("%s", "!!! ");
93382- atomic_inc(&n_rcu_torture_error);
93383+ atomic_inc_unchecked(&n_rcu_torture_error);
93384 WARN_ON_ONCE(1);
93385 }
93386 pr_cont("Reader Pipe: ");
93387@@ -1223,7 +1223,7 @@ rcu_torture_stats_print(void)
93388 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
93389 pr_cont("Free-Block Circulation: ");
93390 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93391- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
93392+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
93393 }
93394 pr_cont("\n");
93395
93396@@ -1570,7 +1570,7 @@ rcu_torture_cleanup(void)
93397
93398 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
93399
93400- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
93401+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
93402 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
93403 else if (torture_onoff_failures())
93404 rcu_torture_print_module_parms(cur_ops,
93405@@ -1695,18 +1695,18 @@ rcu_torture_init(void)
93406
93407 rcu_torture_current = NULL;
93408 rcu_torture_current_version = 0;
93409- atomic_set(&n_rcu_torture_alloc, 0);
93410- atomic_set(&n_rcu_torture_alloc_fail, 0);
93411- atomic_set(&n_rcu_torture_free, 0);
93412- atomic_set(&n_rcu_torture_mberror, 0);
93413- atomic_set(&n_rcu_torture_error, 0);
93414+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
93415+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
93416+ atomic_set_unchecked(&n_rcu_torture_free, 0);
93417+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
93418+ atomic_set_unchecked(&n_rcu_torture_error, 0);
93419 n_rcu_torture_barrier_error = 0;
93420 n_rcu_torture_boost_ktrerror = 0;
93421 n_rcu_torture_boost_rterror = 0;
93422 n_rcu_torture_boost_failure = 0;
93423 n_rcu_torture_boosts = 0;
93424 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
93425- atomic_set(&rcu_torture_wcount[i], 0);
93426+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
93427 for_each_possible_cpu(cpu) {
93428 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93429 per_cpu(rcu_torture_count, cpu)[i] = 0;
93430diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
93431index cc9ceca..ce075a6 100644
93432--- a/kernel/rcu/tiny.c
93433+++ b/kernel/rcu/tiny.c
93434@@ -42,7 +42,7 @@
93435 /* Forward declarations for tiny_plugin.h. */
93436 struct rcu_ctrlblk;
93437 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
93438-static void rcu_process_callbacks(struct softirq_action *unused);
93439+static void rcu_process_callbacks(void);
93440 static void __call_rcu(struct rcu_head *head,
93441 void (*func)(struct rcu_head *rcu),
93442 struct rcu_ctrlblk *rcp);
93443@@ -210,7 +210,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
93444 false));
93445 }
93446
93447-static void rcu_process_callbacks(struct softirq_action *unused)
93448+static __latent_entropy void rcu_process_callbacks(void)
93449 {
93450 __rcu_process_callbacks(&rcu_sched_ctrlblk);
93451 __rcu_process_callbacks(&rcu_bh_ctrlblk);
93452diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
93453index f94e209..d2985bd 100644
93454--- a/kernel/rcu/tiny_plugin.h
93455+++ b/kernel/rcu/tiny_plugin.h
93456@@ -150,10 +150,10 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
93457 rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
93458 jiffies - rcp->gp_start, rcp->qlen);
93459 dump_stack();
93460- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
93461+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
93462 3 * rcu_jiffies_till_stall_check() + 3;
93463 } else if (ULONG_CMP_GE(j, js)) {
93464- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93465+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93466 }
93467 }
93468
93469@@ -161,7 +161,7 @@ static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
93470 {
93471 rcp->ticks_this_gp = 0;
93472 rcp->gp_start = jiffies;
93473- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93474+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93475 }
93476
93477 static void check_cpu_stalls(void)
93478diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
93479index 48d640c..9401d30 100644
93480--- a/kernel/rcu/tree.c
93481+++ b/kernel/rcu/tree.c
93482@@ -268,7 +268,7 @@ static void rcu_momentary_dyntick_idle(void)
93483 */
93484 rdtp = this_cpu_ptr(&rcu_dynticks);
93485 smp_mb__before_atomic(); /* Earlier stuff before QS. */
93486- atomic_add(2, &rdtp->dynticks); /* QS. */
93487+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
93488 smp_mb__after_atomic(); /* Later stuff after QS. */
93489 break;
93490 }
93491@@ -580,9 +580,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
93492 rcu_prepare_for_idle();
93493 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
93494 smp_mb__before_atomic(); /* See above. */
93495- atomic_inc(&rdtp->dynticks);
93496+ atomic_inc_unchecked(&rdtp->dynticks);
93497 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
93498- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
93499+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
93500 rcu_dynticks_task_enter();
93501
93502 /*
93503@@ -703,10 +703,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
93504
93505 rcu_dynticks_task_exit();
93506 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
93507- atomic_inc(&rdtp->dynticks);
93508+ atomic_inc_unchecked(&rdtp->dynticks);
93509 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
93510 smp_mb__after_atomic(); /* See above. */
93511- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
93512+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
93513 rcu_cleanup_after_idle();
93514 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
93515 if (!user && !is_idle_task(current)) {
93516@@ -840,12 +840,12 @@ void rcu_nmi_enter(void)
93517 * to be in the outermost NMI handler that interrupted an RCU-idle
93518 * period (observation due to Andy Lutomirski).
93519 */
93520- if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
93521+ if (!(atomic_read_unchecked(&rdtp->dynticks) & 0x1)) {
93522 smp_mb__before_atomic(); /* Force delay from prior write. */
93523- atomic_inc(&rdtp->dynticks);
93524+ atomic_inc_unchecked(&rdtp->dynticks);
93525 /* atomic_inc() before later RCU read-side crit sects */
93526 smp_mb__after_atomic(); /* See above. */
93527- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
93528+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
93529 incby = 1;
93530 }
93531 rdtp->dynticks_nmi_nesting += incby;
93532@@ -870,7 +870,7 @@ void rcu_nmi_exit(void)
93533 * to us!)
93534 */
93535 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
93536- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
93537+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
93538
93539 /*
93540 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
93541@@ -885,9 +885,9 @@ void rcu_nmi_exit(void)
93542 rdtp->dynticks_nmi_nesting = 0;
93543 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
93544 smp_mb__before_atomic(); /* See above. */
93545- atomic_inc(&rdtp->dynticks);
93546+ atomic_inc_unchecked(&rdtp->dynticks);
93547 smp_mb__after_atomic(); /* Force delay to next write. */
93548- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
93549+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
93550 }
93551
93552 /**
93553@@ -900,7 +900,7 @@ void rcu_nmi_exit(void)
93554 */
93555 bool notrace __rcu_is_watching(void)
93556 {
93557- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
93558+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
93559 }
93560
93561 /**
93562@@ -983,7 +983,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
93563 static int dyntick_save_progress_counter(struct rcu_data *rdp,
93564 bool *isidle, unsigned long *maxj)
93565 {
93566- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
93567+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
93568 rcu_sysidle_check_cpu(rdp, isidle, maxj);
93569 if ((rdp->dynticks_snap & 0x1) == 0) {
93570 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
93571@@ -991,7 +991,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
93572 } else {
93573 if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
93574 rdp->mynode->gpnum))
93575- ACCESS_ONCE(rdp->gpwrap) = true;
93576+ ACCESS_ONCE_RW(rdp->gpwrap) = true;
93577 return 0;
93578 }
93579 }
93580@@ -1009,7 +1009,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
93581 int *rcrmp;
93582 unsigned int snap;
93583
93584- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
93585+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
93586 snap = (unsigned int)rdp->dynticks_snap;
93587
93588 /*
93589@@ -1072,10 +1072,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
93590 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
93591 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
93592 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
93593- ACCESS_ONCE(rdp->cond_resched_completed) =
93594+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
93595 ACCESS_ONCE(rdp->mynode->completed);
93596 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
93597- ACCESS_ONCE(*rcrmp) =
93598+ ACCESS_ONCE_RW(*rcrmp) =
93599 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
93600 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
93601 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
93602@@ -1097,7 +1097,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
93603 rsp->gp_start = j;
93604 smp_wmb(); /* Record start time before stall time. */
93605 j1 = rcu_jiffies_till_stall_check();
93606- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
93607+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
93608 rsp->jiffies_resched = j + j1 / 2;
93609 rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
93610 }
93611@@ -1156,7 +1156,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
93612 raw_spin_unlock_irqrestore(&rnp->lock, flags);
93613 return;
93614 }
93615- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
93616+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
93617 raw_spin_unlock_irqrestore(&rnp->lock, flags);
93618
93619 /*
93620@@ -1240,7 +1240,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
93621
93622 raw_spin_lock_irqsave(&rnp->lock, flags);
93623 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
93624- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
93625+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
93626 3 * rcu_jiffies_till_stall_check() + 3;
93627 raw_spin_unlock_irqrestore(&rnp->lock, flags);
93628
93629@@ -1324,7 +1324,7 @@ void rcu_cpu_stall_reset(void)
93630 struct rcu_state *rsp;
93631
93632 for_each_rcu_flavor(rsp)
93633- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
93634+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
93635 }
93636
93637 /*
93638@@ -1671,7 +1671,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
93639 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
93640 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
93641 zero_cpu_stall_ticks(rdp);
93642- ACCESS_ONCE(rdp->gpwrap) = false;
93643+ ACCESS_ONCE_RW(rdp->gpwrap) = false;
93644 }
93645 return ret;
93646 }
93647@@ -1706,7 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
93648 struct rcu_data *rdp;
93649 struct rcu_node *rnp = rcu_get_root(rsp);
93650
93651- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93652+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93653 rcu_bind_gp_kthread();
93654 raw_spin_lock_irq(&rnp->lock);
93655 smp_mb__after_unlock_lock();
93656@@ -1715,7 +1715,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
93657 raw_spin_unlock_irq(&rnp->lock);
93658 return 0;
93659 }
93660- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
93661+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
93662
93663 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
93664 /*
93665@@ -1756,9 +1756,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
93666 rdp = this_cpu_ptr(rsp->rda);
93667 rcu_preempt_check_blocked_tasks(rnp);
93668 rnp->qsmask = rnp->qsmaskinit;
93669- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
93670+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
93671 WARN_ON_ONCE(rnp->completed != rsp->completed);
93672- ACCESS_ONCE(rnp->completed) = rsp->completed;
93673+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
93674 if (rnp == rdp->mynode)
93675 (void)__note_gp_changes(rsp, rnp, rdp);
93676 rcu_preempt_boost_start_gp(rnp);
93677@@ -1767,7 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
93678 rnp->grphi, rnp->qsmask);
93679 raw_spin_unlock_irq(&rnp->lock);
93680 cond_resched_rcu_qs();
93681- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93682+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93683 }
93684
93685 mutex_unlock(&rsp->onoff_mutex);
93686@@ -1784,7 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
93687 unsigned long maxj;
93688 struct rcu_node *rnp = rcu_get_root(rsp);
93689
93690- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93691+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93692 rsp->n_force_qs++;
93693 if (fqs_state == RCU_SAVE_DYNTICK) {
93694 /* Collect dyntick-idle snapshots. */
93695@@ -1805,7 +1805,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
93696 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
93697 raw_spin_lock_irq(&rnp->lock);
93698 smp_mb__after_unlock_lock();
93699- ACCESS_ONCE(rsp->gp_flags) =
93700+ ACCESS_ONCE_RW(rsp->gp_flags) =
93701 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
93702 raw_spin_unlock_irq(&rnp->lock);
93703 }
93704@@ -1823,7 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
93705 struct rcu_data *rdp;
93706 struct rcu_node *rnp = rcu_get_root(rsp);
93707
93708- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93709+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93710 raw_spin_lock_irq(&rnp->lock);
93711 smp_mb__after_unlock_lock();
93712 gp_duration = jiffies - rsp->gp_start;
93713@@ -1852,7 +1852,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
93714 rcu_for_each_node_breadth_first(rsp, rnp) {
93715 raw_spin_lock_irq(&rnp->lock);
93716 smp_mb__after_unlock_lock();
93717- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
93718+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
93719 rdp = this_cpu_ptr(rsp->rda);
93720 if (rnp == rdp->mynode)
93721 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
93722@@ -1860,7 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
93723 nocb += rcu_future_gp_cleanup(rsp, rnp);
93724 raw_spin_unlock_irq(&rnp->lock);
93725 cond_resched_rcu_qs();
93726- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93727+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93728 }
93729 rnp = rcu_get_root(rsp);
93730 raw_spin_lock_irq(&rnp->lock);
93731@@ -1868,14 +1868,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
93732 rcu_nocb_gp_set(rnp, nocb);
93733
93734 /* Declare grace period done. */
93735- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
93736+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
93737 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
93738 rsp->fqs_state = RCU_GP_IDLE;
93739 rdp = this_cpu_ptr(rsp->rda);
93740 /* Advance CBs to reduce false positives below. */
93741 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
93742 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
93743- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93744+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93745 trace_rcu_grace_period(rsp->name,
93746 ACCESS_ONCE(rsp->gpnum),
93747 TPS("newreq"));
93748@@ -1910,7 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
93749 if (rcu_gp_init(rsp))
93750 break;
93751 cond_resched_rcu_qs();
93752- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93753+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93754 WARN_ON(signal_pending(current));
93755 trace_rcu_grace_period(rsp->name,
93756 ACCESS_ONCE(rsp->gpnum),
93757@@ -1954,11 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
93758 ACCESS_ONCE(rsp->gpnum),
93759 TPS("fqsend"));
93760 cond_resched_rcu_qs();
93761- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93762+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93763 } else {
93764 /* Deal with stray signal. */
93765 cond_resched_rcu_qs();
93766- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93767+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93768 WARN_ON(signal_pending(current));
93769 trace_rcu_grace_period(rsp->name,
93770 ACCESS_ONCE(rsp->gpnum),
93771@@ -2003,7 +2003,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
93772 */
93773 return false;
93774 }
93775- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93776+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93777 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
93778 TPS("newreq"));
93779
93780@@ -2228,7 +2228,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
93781 rsp->qlen += rdp->qlen;
93782 rdp->n_cbs_orphaned += rdp->qlen;
93783 rdp->qlen_lazy = 0;
93784- ACCESS_ONCE(rdp->qlen) = 0;
93785+ ACCESS_ONCE_RW(rdp->qlen) = 0;
93786 }
93787
93788 /*
93789@@ -2490,7 +2490,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
93790 }
93791 smp_mb(); /* List handling before counting for rcu_barrier(). */
93792 rdp->qlen_lazy -= count_lazy;
93793- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
93794+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
93795 rdp->n_cbs_invoked += count;
93796
93797 /* Reinstate batch limit if we have worked down the excess. */
93798@@ -2647,7 +2647,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
93799 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
93800 return; /* Someone beat us to it. */
93801 }
93802- ACCESS_ONCE(rsp->gp_flags) =
93803+ ACCESS_ONCE_RW(rsp->gp_flags) =
93804 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
93805 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
93806 rcu_gp_kthread_wake(rsp);
93807@@ -2693,7 +2693,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
93808 /*
93809 * Do RCU core processing for the current CPU.
93810 */
93811-static void rcu_process_callbacks(struct softirq_action *unused)
93812+static void rcu_process_callbacks(void)
93813 {
93814 struct rcu_state *rsp;
93815
93816@@ -2805,7 +2805,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
93817 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
93818 if (debug_rcu_head_queue(head)) {
93819 /* Probable double call_rcu(), so leak the callback. */
93820- ACCESS_ONCE(head->func) = rcu_leak_callback;
93821+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
93822 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
93823 return;
93824 }
93825@@ -2833,7 +2833,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
93826 local_irq_restore(flags);
93827 return;
93828 }
93829- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
93830+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
93831 if (lazy)
93832 rdp->qlen_lazy++;
93833 else
93834@@ -3106,11 +3106,11 @@ void synchronize_sched_expedited(void)
93835 * counter wrap on a 32-bit system. Quite a few more CPUs would of
93836 * course be required on a 64-bit system.
93837 */
93838- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
93839+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
93840 (ulong)atomic_long_read(&rsp->expedited_done) +
93841 ULONG_MAX / 8)) {
93842 synchronize_sched();
93843- atomic_long_inc(&rsp->expedited_wrap);
93844+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
93845 return;
93846 }
93847
93848@@ -3118,12 +3118,12 @@ void synchronize_sched_expedited(void)
93849 * Take a ticket. Note that atomic_inc_return() implies a
93850 * full memory barrier.
93851 */
93852- snap = atomic_long_inc_return(&rsp->expedited_start);
93853+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
93854 firstsnap = snap;
93855 if (!try_get_online_cpus()) {
93856 /* CPU hotplug operation in flight, fall back to normal GP. */
93857 wait_rcu_gp(call_rcu_sched);
93858- atomic_long_inc(&rsp->expedited_normal);
93859+ atomic_long_inc_unchecked(&rsp->expedited_normal);
93860 return;
93861 }
93862 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
93863@@ -3136,7 +3136,7 @@ void synchronize_sched_expedited(void)
93864 for_each_cpu(cpu, cm) {
93865 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
93866
93867- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
93868+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
93869 cpumask_clear_cpu(cpu, cm);
93870 }
93871 if (cpumask_weight(cm) == 0)
93872@@ -3151,14 +3151,14 @@ void synchronize_sched_expedited(void)
93873 synchronize_sched_expedited_cpu_stop,
93874 NULL) == -EAGAIN) {
93875 put_online_cpus();
93876- atomic_long_inc(&rsp->expedited_tryfail);
93877+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
93878
93879 /* Check to see if someone else did our work for us. */
93880 s = atomic_long_read(&rsp->expedited_done);
93881 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
93882 /* ensure test happens before caller kfree */
93883 smp_mb__before_atomic(); /* ^^^ */
93884- atomic_long_inc(&rsp->expedited_workdone1);
93885+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
93886 free_cpumask_var(cm);
93887 return;
93888 }
93889@@ -3168,7 +3168,7 @@ void synchronize_sched_expedited(void)
93890 udelay(trycount * num_online_cpus());
93891 } else {
93892 wait_rcu_gp(call_rcu_sched);
93893- atomic_long_inc(&rsp->expedited_normal);
93894+ atomic_long_inc_unchecked(&rsp->expedited_normal);
93895 free_cpumask_var(cm);
93896 return;
93897 }
93898@@ -3178,7 +3178,7 @@ void synchronize_sched_expedited(void)
93899 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
93900 /* ensure test happens before caller kfree */
93901 smp_mb__before_atomic(); /* ^^^ */
93902- atomic_long_inc(&rsp->expedited_workdone2);
93903+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
93904 free_cpumask_var(cm);
93905 return;
93906 }
93907@@ -3193,14 +3193,14 @@ void synchronize_sched_expedited(void)
93908 if (!try_get_online_cpus()) {
93909 /* CPU hotplug operation in flight, use normal GP. */
93910 wait_rcu_gp(call_rcu_sched);
93911- atomic_long_inc(&rsp->expedited_normal);
93912+ atomic_long_inc_unchecked(&rsp->expedited_normal);
93913 free_cpumask_var(cm);
93914 return;
93915 }
93916- snap = atomic_long_read(&rsp->expedited_start);
93917+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
93918 smp_mb(); /* ensure read is before try_stop_cpus(). */
93919 }
93920- atomic_long_inc(&rsp->expedited_stoppedcpus);
93921+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
93922
93923 all_cpus_idle:
93924 free_cpumask_var(cm);
93925@@ -3212,16 +3212,16 @@ all_cpus_idle:
93926 * than we did already did their update.
93927 */
93928 do {
93929- atomic_long_inc(&rsp->expedited_done_tries);
93930+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
93931 s = atomic_long_read(&rsp->expedited_done);
93932 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
93933 /* ensure test happens before caller kfree */
93934 smp_mb__before_atomic(); /* ^^^ */
93935- atomic_long_inc(&rsp->expedited_done_lost);
93936+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
93937 break;
93938 }
93939 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
93940- atomic_long_inc(&rsp->expedited_done_exit);
93941+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
93942
93943 put_online_cpus();
93944 }
93945@@ -3431,7 +3431,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
93946 * ACCESS_ONCE() to prevent the compiler from speculating
93947 * the increment to precede the early-exit check.
93948 */
93949- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93950+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93951 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
93952 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
93953 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
93954@@ -3487,7 +3487,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
93955
93956 /* Increment ->n_barrier_done to prevent duplicate work. */
93957 smp_mb(); /* Keep increment after above mechanism. */
93958- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93959+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93960 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
93961 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
93962 smp_mb(); /* Keep increment before caller's subsequent code. */
93963@@ -3532,7 +3532,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
93964 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
93965 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
93966 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
93967- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
93968+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
93969 rdp->cpu = cpu;
93970 rdp->rsp = rsp;
93971 rcu_boot_init_nocb_percpu_data(rdp);
93972@@ -3565,8 +3565,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
93973 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
93974 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
93975 rcu_sysidle_init_percpu_data(rdp->dynticks);
93976- atomic_set(&rdp->dynticks->dynticks,
93977- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
93978+ atomic_set_unchecked(&rdp->dynticks->dynticks,
93979+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
93980 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
93981
93982 /* Add CPU to rcu_node bitmasks. */
93983diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
93984index 119de39..f07d31a 100644
93985--- a/kernel/rcu/tree.h
93986+++ b/kernel/rcu/tree.h
93987@@ -86,11 +86,11 @@ struct rcu_dynticks {
93988 long long dynticks_nesting; /* Track irq/process nesting level. */
93989 /* Process level is worth LLONG_MAX/2. */
93990 int dynticks_nmi_nesting; /* Track NMI nesting level. */
93991- atomic_t dynticks; /* Even value for idle, else odd. */
93992+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
93993 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
93994 long long dynticks_idle_nesting;
93995 /* irq/process nesting level from idle. */
93996- atomic_t dynticks_idle; /* Even value for idle, else odd. */
93997+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
93998 /* "Idle" excludes userspace execution. */
93999 unsigned long dynticks_idle_jiffies;
94000 /* End of last non-NMI non-idle period. */
94001@@ -457,17 +457,17 @@ struct rcu_state {
94002 /* _rcu_barrier(). */
94003 /* End of fields guarded by barrier_mutex. */
94004
94005- atomic_long_t expedited_start; /* Starting ticket. */
94006- atomic_long_t expedited_done; /* Done ticket. */
94007- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
94008- atomic_long_t expedited_tryfail; /* # acquisition failures. */
94009- atomic_long_t expedited_workdone1; /* # done by others #1. */
94010- atomic_long_t expedited_workdone2; /* # done by others #2. */
94011- atomic_long_t expedited_normal; /* # fallbacks to normal. */
94012- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
94013- atomic_long_t expedited_done_tries; /* # tries to update _done. */
94014- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
94015- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
94016+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
94017+ atomic_long_t expedited_done; /* Done ticket. */
94018+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
94019+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
94020+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
94021+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
94022+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
94023+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
94024+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
94025+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
94026+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
94027
94028 unsigned long jiffies_force_qs; /* Time at which to invoke */
94029 /* force_quiescent_state(). */
94030diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
94031index 0a571e9..fbfd611 100644
94032--- a/kernel/rcu/tree_plugin.h
94033+++ b/kernel/rcu/tree_plugin.h
94034@@ -619,7 +619,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
94035 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
94036 {
94037 return !rcu_preempted_readers_exp(rnp) &&
94038- ACCESS_ONCE(rnp->expmask) == 0;
94039+ ACCESS_ONCE_RW(rnp->expmask) == 0;
94040 }
94041
94042 /*
94043@@ -780,7 +780,7 @@ void synchronize_rcu_expedited(void)
94044
94045 /* Clean up and exit. */
94046 smp_mb(); /* ensure expedited GP seen before counter increment. */
94047- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
94048+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
94049 sync_rcu_preempt_exp_count + 1;
94050 unlock_mb_ret:
94051 mutex_unlock(&sync_rcu_preempt_exp_mutex);
94052@@ -1290,7 +1290,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
94053 free_cpumask_var(cm);
94054 }
94055
94056-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
94057+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
94058 .store = &rcu_cpu_kthread_task,
94059 .thread_should_run = rcu_cpu_kthread_should_run,
94060 .thread_fn = rcu_cpu_kthread,
94061@@ -1761,7 +1761,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
94062 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
94063 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
94064 cpu, ticks_value, ticks_title,
94065- atomic_read(&rdtp->dynticks) & 0xfff,
94066+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
94067 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
94068 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
94069 ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
94070@@ -1906,7 +1906,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
94071 return;
94072 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
94073 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
94074- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
94075+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
94076 wake_up(&rdp_leader->nocb_wq);
94077 }
94078 }
94079@@ -1978,7 +1978,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
94080 atomic_long_add(rhcount, &rdp->nocb_q_count);
94081 /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
94082 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
94083- ACCESS_ONCE(*old_rhpp) = rhp;
94084+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
94085 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
94086 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
94087
94088@@ -2167,7 +2167,7 @@ wait_again:
94089 continue; /* No CBs here, try next follower. */
94090
94091 /* Move callbacks to wait-for-GP list, which is empty. */
94092- ACCESS_ONCE(rdp->nocb_head) = NULL;
94093+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
94094 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
94095 gotcbs = true;
94096 }
94097@@ -2288,7 +2288,7 @@ static int rcu_nocb_kthread(void *arg)
94098 list = ACCESS_ONCE(rdp->nocb_follower_head);
94099 BUG_ON(!list);
94100 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
94101- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
94102+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
94103 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
94104
94105 /* Each pass through the following loop invokes a callback. */
94106@@ -2338,7 +2338,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
94107 if (!rcu_nocb_need_deferred_wakeup(rdp))
94108 return;
94109 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
94110- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
94111+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
94112 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
94113 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
94114 }
94115@@ -2461,7 +2461,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
94116 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
94117 "rcuo%c/%d", rsp->abbr, cpu);
94118 BUG_ON(IS_ERR(t));
94119- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
94120+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
94121 }
94122
94123 /*
94124@@ -2666,11 +2666,11 @@ static void rcu_sysidle_enter(int irq)
94125
94126 /* Record start of fully idle period. */
94127 j = jiffies;
94128- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
94129+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
94130 smp_mb__before_atomic();
94131- atomic_inc(&rdtp->dynticks_idle);
94132+ atomic_inc_unchecked(&rdtp->dynticks_idle);
94133 smp_mb__after_atomic();
94134- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
94135+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
94136 }
94137
94138 /*
94139@@ -2741,9 +2741,9 @@ static void rcu_sysidle_exit(int irq)
94140
94141 /* Record end of idle period. */
94142 smp_mb__before_atomic();
94143- atomic_inc(&rdtp->dynticks_idle);
94144+ atomic_inc_unchecked(&rdtp->dynticks_idle);
94145 smp_mb__after_atomic();
94146- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
94147+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
94148
94149 /*
94150 * If we are the timekeeping CPU, we are permitted to be non-idle
94151@@ -2788,7 +2788,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
94152 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
94153
94154 /* Pick up current idle and NMI-nesting counter and check. */
94155- cur = atomic_read(&rdtp->dynticks_idle);
94156+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
94157 if (cur & 0x1) {
94158 *isidle = false; /* We are not idle! */
94159 return;
94160@@ -2837,7 +2837,7 @@ static void rcu_sysidle(unsigned long j)
94161 case RCU_SYSIDLE_NOT:
94162
94163 /* First time all are idle, so note a short idle period. */
94164- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
94165+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
94166 break;
94167
94168 case RCU_SYSIDLE_SHORT:
94169@@ -2875,7 +2875,7 @@ static void rcu_sysidle_cancel(void)
94170 {
94171 smp_mb();
94172 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
94173- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
94174+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
94175 }
94176
94177 /*
94178@@ -2927,7 +2927,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
94179 smp_mb(); /* grace period precedes setting inuse. */
94180
94181 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
94182- ACCESS_ONCE(rshp->inuse) = 0;
94183+ ACCESS_ONCE_RW(rshp->inuse) = 0;
94184 }
94185
94186 /*
94187@@ -3080,7 +3080,7 @@ static void rcu_bind_gp_kthread(void)
94188 static void rcu_dynticks_task_enter(void)
94189 {
94190 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
94191- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
94192+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
94193 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
94194 }
94195
94196@@ -3088,6 +3088,6 @@ static void rcu_dynticks_task_enter(void)
94197 static void rcu_dynticks_task_exit(void)
94198 {
94199 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
94200- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
94201+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
94202 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
94203 }
94204diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
94205index fbb6240..f6c5097 100644
94206--- a/kernel/rcu/tree_trace.c
94207+++ b/kernel/rcu/tree_trace.c
94208@@ -125,7 +125,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
94209 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
94210 rdp->qs_pending);
94211 seq_printf(m, " dt=%d/%llx/%d df=%lu",
94212- atomic_read(&rdp->dynticks->dynticks),
94213+ atomic_read_unchecked(&rdp->dynticks->dynticks),
94214 rdp->dynticks->dynticks_nesting,
94215 rdp->dynticks->dynticks_nmi_nesting,
94216 rdp->dynticks_fqs);
94217@@ -186,17 +186,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
94218 struct rcu_state *rsp = (struct rcu_state *)m->private;
94219
94220 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
94221- atomic_long_read(&rsp->expedited_start),
94222+ atomic_long_read_unchecked(&rsp->expedited_start),
94223 atomic_long_read(&rsp->expedited_done),
94224- atomic_long_read(&rsp->expedited_wrap),
94225- atomic_long_read(&rsp->expedited_tryfail),
94226- atomic_long_read(&rsp->expedited_workdone1),
94227- atomic_long_read(&rsp->expedited_workdone2),
94228- atomic_long_read(&rsp->expedited_normal),
94229- atomic_long_read(&rsp->expedited_stoppedcpus),
94230- atomic_long_read(&rsp->expedited_done_tries),
94231- atomic_long_read(&rsp->expedited_done_lost),
94232- atomic_long_read(&rsp->expedited_done_exit));
94233+ atomic_long_read_unchecked(&rsp->expedited_wrap),
94234+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
94235+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
94236+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
94237+ atomic_long_read_unchecked(&rsp->expedited_normal),
94238+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
94239+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
94240+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
94241+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
94242 return 0;
94243 }
94244
94245diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
94246index e0d31a3..f4dafe3 100644
94247--- a/kernel/rcu/update.c
94248+++ b/kernel/rcu/update.c
94249@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
94250 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
94251 */
94252 if (till_stall_check < 3) {
94253- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
94254+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
94255 till_stall_check = 3;
94256 } else if (till_stall_check > 300) {
94257- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
94258+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
94259 till_stall_check = 300;
94260 }
94261 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
94262@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
94263 !ACCESS_ONCE(t->on_rq) ||
94264 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
94265 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
94266- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
94267+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
94268 list_del_init(&t->rcu_tasks_holdout_list);
94269 put_task_struct(t);
94270 return;
94271@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
94272 !is_idle_task(t)) {
94273 get_task_struct(t);
94274 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
94275- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
94276+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
94277 list_add(&t->rcu_tasks_holdout_list,
94278 &rcu_tasks_holdouts);
94279 }
94280@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
94281 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
94282 BUG_ON(IS_ERR(t));
94283 smp_mb(); /* Ensure others see full kthread. */
94284- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
94285+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
94286 mutex_unlock(&rcu_tasks_kthread_mutex);
94287 }
94288
94289diff --git a/kernel/resource.c b/kernel/resource.c
94290index 19f2357..ebe7f35 100644
94291--- a/kernel/resource.c
94292+++ b/kernel/resource.c
94293@@ -162,8 +162,18 @@ static const struct file_operations proc_iomem_operations = {
94294
94295 static int __init ioresources_init(void)
94296 {
94297+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94298+#ifdef CONFIG_GRKERNSEC_PROC_USER
94299+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
94300+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
94301+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
94302+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
94303+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
94304+#endif
94305+#else
94306 proc_create("ioports", 0, NULL, &proc_ioports_operations);
94307 proc_create("iomem", 0, NULL, &proc_iomem_operations);
94308+#endif
94309 return 0;
94310 }
94311 __initcall(ioresources_init);
94312diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
94313index eae160d..c9aa22e 100644
94314--- a/kernel/sched/auto_group.c
94315+++ b/kernel/sched/auto_group.c
94316@@ -11,7 +11,7 @@
94317
94318 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
94319 static struct autogroup autogroup_default;
94320-static atomic_t autogroup_seq_nr;
94321+static atomic_unchecked_t autogroup_seq_nr;
94322
94323 void __init autogroup_init(struct task_struct *init_task)
94324 {
94325@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
94326
94327 kref_init(&ag->kref);
94328 init_rwsem(&ag->lock);
94329- ag->id = atomic_inc_return(&autogroup_seq_nr);
94330+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
94331 ag->tg = tg;
94332 #ifdef CONFIG_RT_GROUP_SCHED
94333 /*
94334diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
94335index 8d0f35d..c16360d 100644
94336--- a/kernel/sched/completion.c
94337+++ b/kernel/sched/completion.c
94338@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
94339 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
94340 * or number of jiffies left till timeout) if completed.
94341 */
94342-long __sched
94343+long __sched __intentional_overflow(-1)
94344 wait_for_completion_interruptible_timeout(struct completion *x,
94345 unsigned long timeout)
94346 {
94347@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
94348 *
94349 * Return: -ERESTARTSYS if interrupted, 0 if completed.
94350 */
94351-int __sched wait_for_completion_killable(struct completion *x)
94352+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
94353 {
94354 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
94355 if (t == -ERESTARTSYS)
94356@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
94357 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
94358 * or number of jiffies left till timeout) if completed.
94359 */
94360-long __sched
94361+long __sched __intentional_overflow(-1)
94362 wait_for_completion_killable_timeout(struct completion *x,
94363 unsigned long timeout)
94364 {
94365diff --git a/kernel/sched/core.c b/kernel/sched/core.c
94366index 3d5f6f6..a94298f 100644
94367--- a/kernel/sched/core.c
94368+++ b/kernel/sched/core.c
94369@@ -1862,7 +1862,7 @@ void set_numabalancing_state(bool enabled)
94370 int sysctl_numa_balancing(struct ctl_table *table, int write,
94371 void __user *buffer, size_t *lenp, loff_t *ppos)
94372 {
94373- struct ctl_table t;
94374+ ctl_table_no_const t;
94375 int err;
94376 int state = numabalancing_enabled;
94377
94378@@ -2312,8 +2312,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
94379 next->active_mm = oldmm;
94380 atomic_inc(&oldmm->mm_count);
94381 enter_lazy_tlb(oldmm, next);
94382- } else
94383+ } else {
94384 switch_mm(oldmm, mm, next);
94385+ populate_stack();
94386+ }
94387
94388 if (!prev->mm) {
94389 prev->active_mm = NULL;
94390@@ -3124,6 +3126,8 @@ int can_nice(const struct task_struct *p, const int nice)
94391 /* convert nice value [19,-20] to rlimit style value [1,40] */
94392 int nice_rlim = nice_to_rlimit(nice);
94393
94394+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
94395+
94396 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
94397 capable(CAP_SYS_NICE));
94398 }
94399@@ -3150,7 +3154,8 @@ SYSCALL_DEFINE1(nice, int, increment)
94400 nice = task_nice(current) + increment;
94401
94402 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
94403- if (increment < 0 && !can_nice(current, nice))
94404+ if (increment < 0 && (!can_nice(current, nice) ||
94405+ gr_handle_chroot_nice()))
94406 return -EPERM;
94407
94408 retval = security_task_setnice(current, nice);
94409@@ -3459,6 +3464,7 @@ recheck:
94410 if (policy != p->policy && !rlim_rtprio)
94411 return -EPERM;
94412
94413+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
94414 /* can't increase priority */
94415 if (attr->sched_priority > p->rt_priority &&
94416 attr->sched_priority > rlim_rtprio)
94417@@ -4946,6 +4952,7 @@ void idle_task_exit(void)
94418
94419 if (mm != &init_mm) {
94420 switch_mm(mm, &init_mm, current);
94421+ populate_stack();
94422 finish_arch_post_lock_switch();
94423 }
94424 mmdrop(mm);
94425@@ -5041,7 +5048,7 @@ static void migrate_tasks(unsigned int dead_cpu)
94426
94427 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
94428
94429-static struct ctl_table sd_ctl_dir[] = {
94430+static ctl_table_no_const sd_ctl_dir[] __read_only = {
94431 {
94432 .procname = "sched_domain",
94433 .mode = 0555,
94434@@ -5058,17 +5065,17 @@ static struct ctl_table sd_ctl_root[] = {
94435 {}
94436 };
94437
94438-static struct ctl_table *sd_alloc_ctl_entry(int n)
94439+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
94440 {
94441- struct ctl_table *entry =
94442+ ctl_table_no_const *entry =
94443 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
94444
94445 return entry;
94446 }
94447
94448-static void sd_free_ctl_entry(struct ctl_table **tablep)
94449+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
94450 {
94451- struct ctl_table *entry;
94452+ ctl_table_no_const *entry;
94453
94454 /*
94455 * In the intermediate directories, both the child directory and
94456@@ -5076,22 +5083,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
94457 * will always be set. In the lowest directory the names are
94458 * static strings and all have proc handlers.
94459 */
94460- for (entry = *tablep; entry->mode; entry++) {
94461- if (entry->child)
94462- sd_free_ctl_entry(&entry->child);
94463+ for (entry = tablep; entry->mode; entry++) {
94464+ if (entry->child) {
94465+ sd_free_ctl_entry(entry->child);
94466+ pax_open_kernel();
94467+ entry->child = NULL;
94468+ pax_close_kernel();
94469+ }
94470 if (entry->proc_handler == NULL)
94471 kfree(entry->procname);
94472 }
94473
94474- kfree(*tablep);
94475- *tablep = NULL;
94476+ kfree(tablep);
94477 }
94478
94479 static int min_load_idx = 0;
94480 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
94481
94482 static void
94483-set_table_entry(struct ctl_table *entry,
94484+set_table_entry(ctl_table_no_const *entry,
94485 const char *procname, void *data, int maxlen,
94486 umode_t mode, proc_handler *proc_handler,
94487 bool load_idx)
94488@@ -5111,7 +5121,7 @@ set_table_entry(struct ctl_table *entry,
94489 static struct ctl_table *
94490 sd_alloc_ctl_domain_table(struct sched_domain *sd)
94491 {
94492- struct ctl_table *table = sd_alloc_ctl_entry(14);
94493+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
94494
94495 if (table == NULL)
94496 return NULL;
94497@@ -5149,9 +5159,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
94498 return table;
94499 }
94500
94501-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
94502+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
94503 {
94504- struct ctl_table *entry, *table;
94505+ ctl_table_no_const *entry, *table;
94506 struct sched_domain *sd;
94507 int domain_num = 0, i;
94508 char buf[32];
94509@@ -5178,11 +5188,13 @@ static struct ctl_table_header *sd_sysctl_header;
94510 static void register_sched_domain_sysctl(void)
94511 {
94512 int i, cpu_num = num_possible_cpus();
94513- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
94514+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
94515 char buf[32];
94516
94517 WARN_ON(sd_ctl_dir[0].child);
94518+ pax_open_kernel();
94519 sd_ctl_dir[0].child = entry;
94520+ pax_close_kernel();
94521
94522 if (entry == NULL)
94523 return;
94524@@ -5205,8 +5217,12 @@ static void unregister_sched_domain_sysctl(void)
94525 if (sd_sysctl_header)
94526 unregister_sysctl_table(sd_sysctl_header);
94527 sd_sysctl_header = NULL;
94528- if (sd_ctl_dir[0].child)
94529- sd_free_ctl_entry(&sd_ctl_dir[0].child);
94530+ if (sd_ctl_dir[0].child) {
94531+ sd_free_ctl_entry(sd_ctl_dir[0].child);
94532+ pax_open_kernel();
94533+ sd_ctl_dir[0].child = NULL;
94534+ pax_close_kernel();
94535+ }
94536 }
94537 #else
94538 static void register_sched_domain_sysctl(void)
94539diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
94540index 241213b..6a64c91 100644
94541--- a/kernel/sched/fair.c
94542+++ b/kernel/sched/fair.c
94543@@ -2092,7 +2092,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
94544
94545 static void reset_ptenuma_scan(struct task_struct *p)
94546 {
94547- ACCESS_ONCE(p->mm->numa_scan_seq)++;
94548+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
94549 p->mm->numa_scan_offset = 0;
94550 }
94551
94552@@ -7656,7 +7656,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
94553 * run_rebalance_domains is triggered when needed from the scheduler tick.
94554 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
94555 */
94556-static void run_rebalance_domains(struct softirq_action *h)
94557+static __latent_entropy void run_rebalance_domains(void)
94558 {
94559 struct rq *this_rq = this_rq();
94560 enum cpu_idle_type idle = this_rq->idle_balance ?
94561diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
94562index dc0f435..ae2e085 100644
94563--- a/kernel/sched/sched.h
94564+++ b/kernel/sched/sched.h
94565@@ -1200,7 +1200,7 @@ struct sched_class {
94566 #ifdef CONFIG_FAIR_GROUP_SCHED
94567 void (*task_move_group) (struct task_struct *p, int on_rq);
94568 #endif
94569-};
94570+} __do_const;
94571
94572 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
94573 {
94574diff --git a/kernel/signal.c b/kernel/signal.c
94575index a390499..ebe9a21 100644
94576--- a/kernel/signal.c
94577+++ b/kernel/signal.c
94578@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
94579
94580 int print_fatal_signals __read_mostly;
94581
94582-static void __user *sig_handler(struct task_struct *t, int sig)
94583+static __sighandler_t sig_handler(struct task_struct *t, int sig)
94584 {
94585 return t->sighand->action[sig - 1].sa.sa_handler;
94586 }
94587
94588-static int sig_handler_ignored(void __user *handler, int sig)
94589+static int sig_handler_ignored(__sighandler_t handler, int sig)
94590 {
94591 /* Is it explicitly or implicitly ignored? */
94592 return handler == SIG_IGN ||
94593@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
94594
94595 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
94596 {
94597- void __user *handler;
94598+ __sighandler_t handler;
94599
94600 handler = sig_handler(t, sig);
94601
94602@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
94603 atomic_inc(&user->sigpending);
94604 rcu_read_unlock();
94605
94606+ if (!override_rlimit)
94607+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
94608+
94609 if (override_rlimit ||
94610 atomic_read(&user->sigpending) <=
94611 task_rlimit(t, RLIMIT_SIGPENDING)) {
94612@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
94613
94614 int unhandled_signal(struct task_struct *tsk, int sig)
94615 {
94616- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
94617+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
94618 if (is_global_init(tsk))
94619 return 1;
94620 if (handler != SIG_IGN && handler != SIG_DFL)
94621@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
94622 }
94623 }
94624
94625+ /* allow glibc communication via tgkill to other threads in our
94626+ thread group */
94627+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
94628+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
94629+ && gr_handle_signal(t, sig))
94630+ return -EPERM;
94631+
94632 return security_task_kill(t, info, sig, 0);
94633 }
94634
94635@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94636 return send_signal(sig, info, p, 1);
94637 }
94638
94639-static int
94640+int
94641 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94642 {
94643 return send_signal(sig, info, t, 0);
94644@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94645 unsigned long int flags;
94646 int ret, blocked, ignored;
94647 struct k_sigaction *action;
94648+ int is_unhandled = 0;
94649
94650 spin_lock_irqsave(&t->sighand->siglock, flags);
94651 action = &t->sighand->action[sig-1];
94652@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94653 }
94654 if (action->sa.sa_handler == SIG_DFL)
94655 t->signal->flags &= ~SIGNAL_UNKILLABLE;
94656+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
94657+ is_unhandled = 1;
94658 ret = specific_send_sig_info(sig, info, t);
94659 spin_unlock_irqrestore(&t->sighand->siglock, flags);
94660
94661+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
94662+ normal operation */
94663+ if (is_unhandled) {
94664+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
94665+ gr_handle_crash(t, sig);
94666+ }
94667+
94668 return ret;
94669 }
94670
94671@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94672 ret = check_kill_permission(sig, info, p);
94673 rcu_read_unlock();
94674
94675- if (!ret && sig)
94676+ if (!ret && sig) {
94677 ret = do_send_sig_info(sig, info, p, true);
94678+ if (!ret)
94679+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
94680+ }
94681
94682 return ret;
94683 }
94684@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
94685 int error = -ESRCH;
94686
94687 rcu_read_lock();
94688- p = find_task_by_vpid(pid);
94689+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
94690+ /* allow glibc communication via tgkill to other threads in our
94691+ thread group */
94692+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
94693+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
94694+ p = find_task_by_vpid_unrestricted(pid);
94695+ else
94696+#endif
94697+ p = find_task_by_vpid(pid);
94698 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
94699 error = check_kill_permission(sig, info, p);
94700 /*
94701@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
94702 }
94703 seg = get_fs();
94704 set_fs(KERNEL_DS);
94705- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
94706- (stack_t __force __user *) &uoss,
94707+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
94708+ (stack_t __force_user *) &uoss,
94709 compat_user_stack_pointer());
94710 set_fs(seg);
94711 if (ret >= 0 && uoss_ptr) {
94712diff --git a/kernel/smpboot.c b/kernel/smpboot.c
94713index 40190f2..8861d40 100644
94714--- a/kernel/smpboot.c
94715+++ b/kernel/smpboot.c
94716@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
94717 }
94718 smpboot_unpark_thread(plug_thread, cpu);
94719 }
94720- list_add(&plug_thread->list, &hotplug_threads);
94721+ pax_list_add(&plug_thread->list, &hotplug_threads);
94722 out:
94723 mutex_unlock(&smpboot_threads_lock);
94724 put_online_cpus();
94725@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
94726 {
94727 get_online_cpus();
94728 mutex_lock(&smpboot_threads_lock);
94729- list_del(&plug_thread->list);
94730+ pax_list_del(&plug_thread->list);
94731 smpboot_destroy_threads(plug_thread);
94732 mutex_unlock(&smpboot_threads_lock);
94733 put_online_cpus();
94734diff --git a/kernel/softirq.c b/kernel/softirq.c
94735index 479e443..66d845e1 100644
94736--- a/kernel/softirq.c
94737+++ b/kernel/softirq.c
94738@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
94739 EXPORT_SYMBOL(irq_stat);
94740 #endif
94741
94742-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
94743+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
94744
94745 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
94746
94747@@ -270,7 +270,7 @@ restart:
94748 kstat_incr_softirqs_this_cpu(vec_nr);
94749
94750 trace_softirq_entry(vec_nr);
94751- h->action(h);
94752+ h->action();
94753 trace_softirq_exit(vec_nr);
94754 if (unlikely(prev_count != preempt_count())) {
94755 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
94756@@ -430,7 +430,7 @@ void __raise_softirq_irqoff(unsigned int nr)
94757 or_softirq_pending(1UL << nr);
94758 }
94759
94760-void open_softirq(int nr, void (*action)(struct softirq_action *))
94761+void __init open_softirq(int nr, void (*action)(void))
94762 {
94763 softirq_vec[nr].action = action;
94764 }
94765@@ -482,7 +482,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
94766 }
94767 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
94768
94769-static void tasklet_action(struct softirq_action *a)
94770+static void tasklet_action(void)
94771 {
94772 struct tasklet_struct *list;
94773
94774@@ -518,7 +518,7 @@ static void tasklet_action(struct softirq_action *a)
94775 }
94776 }
94777
94778-static void tasklet_hi_action(struct softirq_action *a)
94779+static __latent_entropy void tasklet_hi_action(void)
94780 {
94781 struct tasklet_struct *list;
94782
94783@@ -744,7 +744,7 @@ static struct notifier_block cpu_nfb = {
94784 .notifier_call = cpu_callback
94785 };
94786
94787-static struct smp_hotplug_thread softirq_threads = {
94788+static struct smp_hotplug_thread softirq_threads __read_only = {
94789 .store = &ksoftirqd,
94790 .thread_should_run = ksoftirqd_should_run,
94791 .thread_fn = run_ksoftirqd,
94792diff --git a/kernel/sys.c b/kernel/sys.c
94793index a03d9cd..55dbe9c 100644
94794--- a/kernel/sys.c
94795+++ b/kernel/sys.c
94796@@ -160,6 +160,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
94797 error = -EACCES;
94798 goto out;
94799 }
94800+
94801+ if (gr_handle_chroot_setpriority(p, niceval)) {
94802+ error = -EACCES;
94803+ goto out;
94804+ }
94805+
94806 no_nice = security_task_setnice(p, niceval);
94807 if (no_nice) {
94808 error = no_nice;
94809@@ -365,6 +371,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
94810 goto error;
94811 }
94812
94813+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
94814+ goto error;
94815+
94816+ if (!gid_eq(new->gid, old->gid)) {
94817+ /* make sure we generate a learn log for what will
94818+ end up being a role transition after a full-learning
94819+ policy is generated
94820+ CAP_SETGID is required to perform a transition
94821+ we may not log a CAP_SETGID check above, e.g.
94822+ in the case where new rgid = old egid
94823+ */
94824+ gr_learn_cap(current, new, CAP_SETGID);
94825+ }
94826+
94827 if (rgid != (gid_t) -1 ||
94828 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
94829 new->sgid = new->egid;
94830@@ -400,6 +420,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
94831 old = current_cred();
94832
94833 retval = -EPERM;
94834+
94835+ if (gr_check_group_change(kgid, kgid, kgid))
94836+ goto error;
94837+
94838 if (ns_capable(old->user_ns, CAP_SETGID))
94839 new->gid = new->egid = new->sgid = new->fsgid = kgid;
94840 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
94841@@ -417,7 +441,7 @@ error:
94842 /*
94843 * change the user struct in a credentials set to match the new UID
94844 */
94845-static int set_user(struct cred *new)
94846+int set_user(struct cred *new)
94847 {
94848 struct user_struct *new_user;
94849
94850@@ -497,7 +521,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
94851 goto error;
94852 }
94853
94854+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
94855+ goto error;
94856+
94857 if (!uid_eq(new->uid, old->uid)) {
94858+ /* make sure we generate a learn log for what will
94859+ end up being a role transition after a full-learning
94860+ policy is generated
94861+ CAP_SETUID is required to perform a transition
94862+ we may not log a CAP_SETUID check above, e.g.
94863+ in the case where new ruid = old euid
94864+ */
94865+ gr_learn_cap(current, new, CAP_SETUID);
94866 retval = set_user(new);
94867 if (retval < 0)
94868 goto error;
94869@@ -547,6 +582,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
94870 old = current_cred();
94871
94872 retval = -EPERM;
94873+
94874+ if (gr_check_crash_uid(kuid))
94875+ goto error;
94876+ if (gr_check_user_change(kuid, kuid, kuid))
94877+ goto error;
94878+
94879 if (ns_capable(old->user_ns, CAP_SETUID)) {
94880 new->suid = new->uid = kuid;
94881 if (!uid_eq(kuid, old->uid)) {
94882@@ -616,6 +657,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
94883 goto error;
94884 }
94885
94886+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
94887+ goto error;
94888+
94889 if (ruid != (uid_t) -1) {
94890 new->uid = kruid;
94891 if (!uid_eq(kruid, old->uid)) {
94892@@ -700,6 +744,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
94893 goto error;
94894 }
94895
94896+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
94897+ goto error;
94898+
94899 if (rgid != (gid_t) -1)
94900 new->gid = krgid;
94901 if (egid != (gid_t) -1)
94902@@ -764,12 +811,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
94903 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
94904 ns_capable(old->user_ns, CAP_SETUID)) {
94905 if (!uid_eq(kuid, old->fsuid)) {
94906+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
94907+ goto error;
94908+
94909 new->fsuid = kuid;
94910 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
94911 goto change_okay;
94912 }
94913 }
94914
94915+error:
94916 abort_creds(new);
94917 return old_fsuid;
94918
94919@@ -802,12 +853,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
94920 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
94921 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
94922 ns_capable(old->user_ns, CAP_SETGID)) {
94923+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
94924+ goto error;
94925+
94926 if (!gid_eq(kgid, old->fsgid)) {
94927 new->fsgid = kgid;
94928 goto change_okay;
94929 }
94930 }
94931
94932+error:
94933 abort_creds(new);
94934 return old_fsgid;
94935
94936@@ -1185,19 +1240,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
94937 return -EFAULT;
94938
94939 down_read(&uts_sem);
94940- error = __copy_to_user(&name->sysname, &utsname()->sysname,
94941+ error = __copy_to_user(name->sysname, &utsname()->sysname,
94942 __OLD_UTS_LEN);
94943 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
94944- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
94945+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
94946 __OLD_UTS_LEN);
94947 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
94948- error |= __copy_to_user(&name->release, &utsname()->release,
94949+ error |= __copy_to_user(name->release, &utsname()->release,
94950 __OLD_UTS_LEN);
94951 error |= __put_user(0, name->release + __OLD_UTS_LEN);
94952- error |= __copy_to_user(&name->version, &utsname()->version,
94953+ error |= __copy_to_user(name->version, &utsname()->version,
94954 __OLD_UTS_LEN);
94955 error |= __put_user(0, name->version + __OLD_UTS_LEN);
94956- error |= __copy_to_user(&name->machine, &utsname()->machine,
94957+ error |= __copy_to_user(name->machine, &utsname()->machine,
94958 __OLD_UTS_LEN);
94959 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
94960 up_read(&uts_sem);
94961@@ -1398,6 +1453,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
94962 */
94963 new_rlim->rlim_cur = 1;
94964 }
94965+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
94966+ is changed to a lower value. Since tasks can be created by the same
94967+ user in between this limit change and an execve by this task, force
94968+ a recheck only for this task by setting PF_NPROC_EXCEEDED
94969+ */
94970+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
94971+ tsk->flags |= PF_NPROC_EXCEEDED;
94972 }
94973 if (!retval) {
94974 if (old_rlim)
94975diff --git a/kernel/sysctl.c b/kernel/sysctl.c
94976index ce410bb..cd276f0 100644
94977--- a/kernel/sysctl.c
94978+++ b/kernel/sysctl.c
94979@@ -94,7 +94,6 @@
94980
94981
94982 #if defined(CONFIG_SYSCTL)
94983-
94984 /* External variables not in a header file. */
94985 extern int max_threads;
94986 extern int suid_dumpable;
94987@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
94988
94989 /* Constants used for minimum and maximum */
94990 #ifdef CONFIG_LOCKUP_DETECTOR
94991-static int sixty = 60;
94992+static int sixty __read_only = 60;
94993 #endif
94994
94995-static int __maybe_unused neg_one = -1;
94996+static int __maybe_unused neg_one __read_only = -1;
94997
94998-static int zero;
94999-static int __maybe_unused one = 1;
95000-static int __maybe_unused two = 2;
95001-static int __maybe_unused four = 4;
95002-static unsigned long one_ul = 1;
95003-static int one_hundred = 100;
95004+static int zero __read_only = 0;
95005+static int __maybe_unused one __read_only = 1;
95006+static int __maybe_unused two __read_only = 2;
95007+static int __maybe_unused three __read_only = 3;
95008+static int __maybe_unused four __read_only = 4;
95009+static unsigned long one_ul __read_only = 1;
95010+static int one_hundred __read_only = 100;
95011 #ifdef CONFIG_PRINTK
95012-static int ten_thousand = 10000;
95013+static int ten_thousand __read_only = 10000;
95014 #endif
95015
95016 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
95017@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
95018 void __user *buffer, size_t *lenp, loff_t *ppos);
95019 #endif
95020
95021-#ifdef CONFIG_PRINTK
95022 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95023 void __user *buffer, size_t *lenp, loff_t *ppos);
95024-#endif
95025
95026 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
95027 void __user *buffer, size_t *lenp, loff_t *ppos);
95028@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
95029
95030 #endif
95031
95032+extern struct ctl_table grsecurity_table[];
95033+
95034 static struct ctl_table kern_table[];
95035 static struct ctl_table vm_table[];
95036 static struct ctl_table fs_table[];
95037@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
95038 int sysctl_legacy_va_layout;
95039 #endif
95040
95041+#ifdef CONFIG_PAX_SOFTMODE
95042+static struct ctl_table pax_table[] = {
95043+ {
95044+ .procname = "softmode",
95045+ .data = &pax_softmode,
95046+ .maxlen = sizeof(unsigned int),
95047+ .mode = 0600,
95048+ .proc_handler = &proc_dointvec,
95049+ },
95050+
95051+ { }
95052+};
95053+#endif
95054+
95055 /* The default sysctl tables: */
95056
95057 static struct ctl_table sysctl_base_table[] = {
95058@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
95059 #endif
95060
95061 static struct ctl_table kern_table[] = {
95062+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
95063+ {
95064+ .procname = "grsecurity",
95065+ .mode = 0500,
95066+ .child = grsecurity_table,
95067+ },
95068+#endif
95069+
95070+#ifdef CONFIG_PAX_SOFTMODE
95071+ {
95072+ .procname = "pax",
95073+ .mode = 0500,
95074+ .child = pax_table,
95075+ },
95076+#endif
95077+
95078 {
95079 .procname = "sched_child_runs_first",
95080 .data = &sysctl_sched_child_runs_first,
95081@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
95082 .data = &modprobe_path,
95083 .maxlen = KMOD_PATH_LEN,
95084 .mode = 0644,
95085- .proc_handler = proc_dostring,
95086+ .proc_handler = proc_dostring_modpriv,
95087 },
95088 {
95089 .procname = "modules_disabled",
95090@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
95091 .extra1 = &zero,
95092 .extra2 = &one,
95093 },
95094+#endif
95095 {
95096 .procname = "kptr_restrict",
95097 .data = &kptr_restrict,
95098 .maxlen = sizeof(int),
95099 .mode = 0644,
95100 .proc_handler = proc_dointvec_minmax_sysadmin,
95101+#ifdef CONFIG_GRKERNSEC_HIDESYM
95102+ .extra1 = &two,
95103+#else
95104 .extra1 = &zero,
95105+#endif
95106 .extra2 = &two,
95107 },
95108-#endif
95109 {
95110 .procname = "ngroups_max",
95111 .data = &ngroups_max,
95112@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
95113 */
95114 {
95115 .procname = "perf_event_paranoid",
95116- .data = &sysctl_perf_event_paranoid,
95117- .maxlen = sizeof(sysctl_perf_event_paranoid),
95118+ .data = &sysctl_perf_event_legitimately_concerned,
95119+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
95120 .mode = 0644,
95121- .proc_handler = proc_dointvec,
95122+ /* go ahead, be a hero */
95123+ .proc_handler = proc_dointvec_minmax_sysadmin,
95124+ .extra1 = &neg_one,
95125+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
95126+ .extra2 = &three,
95127+#else
95128+ .extra2 = &two,
95129+#endif
95130 },
95131 {
95132 .procname = "perf_event_mlock_kb",
95133@@ -1348,6 +1389,13 @@ static struct ctl_table vm_table[] = {
95134 .proc_handler = proc_dointvec_minmax,
95135 .extra1 = &zero,
95136 },
95137+ {
95138+ .procname = "heap_stack_gap",
95139+ .data = &sysctl_heap_stack_gap,
95140+ .maxlen = sizeof(sysctl_heap_stack_gap),
95141+ .mode = 0644,
95142+ .proc_handler = proc_doulongvec_minmax,
95143+ },
95144 #else
95145 {
95146 .procname = "nr_trim_pages",
95147@@ -1830,6 +1878,16 @@ int proc_dostring(struct ctl_table *table, int write,
95148 (char __user *)buffer, lenp, ppos);
95149 }
95150
95151+int proc_dostring_modpriv(struct ctl_table *table, int write,
95152+ void __user *buffer, size_t *lenp, loff_t *ppos)
95153+{
95154+ if (write && !capable(CAP_SYS_MODULE))
95155+ return -EPERM;
95156+
95157+ return _proc_do_string(table->data, table->maxlen, write,
95158+ buffer, lenp, ppos);
95159+}
95160+
95161 static size_t proc_skip_spaces(char **buf)
95162 {
95163 size_t ret;
95164@@ -1935,6 +1993,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
95165 len = strlen(tmp);
95166 if (len > *size)
95167 len = *size;
95168+ if (len > sizeof(tmp))
95169+ len = sizeof(tmp);
95170 if (copy_to_user(*buf, tmp, len))
95171 return -EFAULT;
95172 *size -= len;
95173@@ -2112,7 +2172,7 @@ int proc_dointvec(struct ctl_table *table, int write,
95174 static int proc_taint(struct ctl_table *table, int write,
95175 void __user *buffer, size_t *lenp, loff_t *ppos)
95176 {
95177- struct ctl_table t;
95178+ ctl_table_no_const t;
95179 unsigned long tmptaint = get_taint();
95180 int err;
95181
95182@@ -2140,7 +2200,6 @@ static int proc_taint(struct ctl_table *table, int write,
95183 return err;
95184 }
95185
95186-#ifdef CONFIG_PRINTK
95187 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95188 void __user *buffer, size_t *lenp, loff_t *ppos)
95189 {
95190@@ -2149,7 +2208,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95191
95192 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
95193 }
95194-#endif
95195
95196 struct do_proc_dointvec_minmax_conv_param {
95197 int *min;
95198@@ -2709,6 +2767,12 @@ int proc_dostring(struct ctl_table *table, int write,
95199 return -ENOSYS;
95200 }
95201
95202+int proc_dostring_modpriv(struct ctl_table *table, int write,
95203+ void __user *buffer, size_t *lenp, loff_t *ppos)
95204+{
95205+ return -ENOSYS;
95206+}
95207+
95208 int proc_dointvec(struct ctl_table *table, int write,
95209 void __user *buffer, size_t *lenp, loff_t *ppos)
95210 {
95211@@ -2765,5 +2829,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
95212 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
95213 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
95214 EXPORT_SYMBOL(proc_dostring);
95215+EXPORT_SYMBOL(proc_dostring_modpriv);
95216 EXPORT_SYMBOL(proc_doulongvec_minmax);
95217 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
95218diff --git a/kernel/taskstats.c b/kernel/taskstats.c
95219index 21f82c2..c1984e5 100644
95220--- a/kernel/taskstats.c
95221+++ b/kernel/taskstats.c
95222@@ -28,9 +28,12 @@
95223 #include <linux/fs.h>
95224 #include <linux/file.h>
95225 #include <linux/pid_namespace.h>
95226+#include <linux/grsecurity.h>
95227 #include <net/genetlink.h>
95228 #include <linux/atomic.h>
95229
95230+extern int gr_is_taskstats_denied(int pid);
95231+
95232 /*
95233 * Maximum length of a cpumask that can be specified in
95234 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
95235@@ -567,6 +570,9 @@ err:
95236
95237 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
95238 {
95239+ if (gr_is_taskstats_denied(current->pid))
95240+ return -EACCES;
95241+
95242 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
95243 return cmd_attr_register_cpumask(info);
95244 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
95245diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
95246index 1b001ed..55ef9e4 100644
95247--- a/kernel/time/alarmtimer.c
95248+++ b/kernel/time/alarmtimer.c
95249@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
95250 struct platform_device *pdev;
95251 int error = 0;
95252 int i;
95253- struct k_clock alarm_clock = {
95254+ static struct k_clock alarm_clock = {
95255 .clock_getres = alarm_clock_getres,
95256 .clock_get = alarm_clock_get,
95257 .timer_create = alarm_timer_create,
95258diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
95259index bee0c1f..a23fe2d 100644
95260--- a/kernel/time/hrtimer.c
95261+++ b/kernel/time/hrtimer.c
95262@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
95263 local_irq_restore(flags);
95264 }
95265
95266-static void run_hrtimer_softirq(struct softirq_action *h)
95267+static __latent_entropy void run_hrtimer_softirq(void)
95268 {
95269 hrtimer_peek_ahead_timers();
95270 }
95271diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
95272index 0075da7..63cc872 100644
95273--- a/kernel/time/posix-cpu-timers.c
95274+++ b/kernel/time/posix-cpu-timers.c
95275@@ -1449,14 +1449,14 @@ struct k_clock clock_posix_cpu = {
95276
95277 static __init int init_posix_cpu_timers(void)
95278 {
95279- struct k_clock process = {
95280+ static struct k_clock process = {
95281 .clock_getres = process_cpu_clock_getres,
95282 .clock_get = process_cpu_clock_get,
95283 .timer_create = process_cpu_timer_create,
95284 .nsleep = process_cpu_nsleep,
95285 .nsleep_restart = process_cpu_nsleep_restart,
95286 };
95287- struct k_clock thread = {
95288+ static struct k_clock thread = {
95289 .clock_getres = thread_cpu_clock_getres,
95290 .clock_get = thread_cpu_clock_get,
95291 .timer_create = thread_cpu_timer_create,
95292diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
95293index 31ea01f..7fc61ef 100644
95294--- a/kernel/time/posix-timers.c
95295+++ b/kernel/time/posix-timers.c
95296@@ -43,6 +43,7 @@
95297 #include <linux/hash.h>
95298 #include <linux/posix-clock.h>
95299 #include <linux/posix-timers.h>
95300+#include <linux/grsecurity.h>
95301 #include <linux/syscalls.h>
95302 #include <linux/wait.h>
95303 #include <linux/workqueue.h>
95304@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
95305 * which we beg off on and pass to do_sys_settimeofday().
95306 */
95307
95308-static struct k_clock posix_clocks[MAX_CLOCKS];
95309+static struct k_clock *posix_clocks[MAX_CLOCKS];
95310
95311 /*
95312 * These ones are defined below.
95313@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
95314 */
95315 static __init int init_posix_timers(void)
95316 {
95317- struct k_clock clock_realtime = {
95318+ static struct k_clock clock_realtime = {
95319 .clock_getres = hrtimer_get_res,
95320 .clock_get = posix_clock_realtime_get,
95321 .clock_set = posix_clock_realtime_set,
95322@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
95323 .timer_get = common_timer_get,
95324 .timer_del = common_timer_del,
95325 };
95326- struct k_clock clock_monotonic = {
95327+ static struct k_clock clock_monotonic = {
95328 .clock_getres = hrtimer_get_res,
95329 .clock_get = posix_ktime_get_ts,
95330 .nsleep = common_nsleep,
95331@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
95332 .timer_get = common_timer_get,
95333 .timer_del = common_timer_del,
95334 };
95335- struct k_clock clock_monotonic_raw = {
95336+ static struct k_clock clock_monotonic_raw = {
95337 .clock_getres = hrtimer_get_res,
95338 .clock_get = posix_get_monotonic_raw,
95339 };
95340- struct k_clock clock_realtime_coarse = {
95341+ static struct k_clock clock_realtime_coarse = {
95342 .clock_getres = posix_get_coarse_res,
95343 .clock_get = posix_get_realtime_coarse,
95344 };
95345- struct k_clock clock_monotonic_coarse = {
95346+ static struct k_clock clock_monotonic_coarse = {
95347 .clock_getres = posix_get_coarse_res,
95348 .clock_get = posix_get_monotonic_coarse,
95349 };
95350- struct k_clock clock_tai = {
95351+ static struct k_clock clock_tai = {
95352 .clock_getres = hrtimer_get_res,
95353 .clock_get = posix_get_tai,
95354 .nsleep = common_nsleep,
95355@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
95356 .timer_get = common_timer_get,
95357 .timer_del = common_timer_del,
95358 };
95359- struct k_clock clock_boottime = {
95360+ static struct k_clock clock_boottime = {
95361 .clock_getres = hrtimer_get_res,
95362 .clock_get = posix_get_boottime,
95363 .nsleep = common_nsleep,
95364@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
95365 return;
95366 }
95367
95368- posix_clocks[clock_id] = *new_clock;
95369+ posix_clocks[clock_id] = new_clock;
95370 }
95371 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
95372
95373@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
95374 return (id & CLOCKFD_MASK) == CLOCKFD ?
95375 &clock_posix_dynamic : &clock_posix_cpu;
95376
95377- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
95378+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
95379 return NULL;
95380- return &posix_clocks[id];
95381+ return posix_clocks[id];
95382 }
95383
95384 static int common_timer_create(struct k_itimer *new_timer)
95385@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
95386 struct k_clock *kc = clockid_to_kclock(which_clock);
95387 struct k_itimer *new_timer;
95388 int error, new_timer_id;
95389- sigevent_t event;
95390+ sigevent_t event = { };
95391 int it_id_set = IT_ID_NOT_SET;
95392
95393 if (!kc)
95394@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
95395 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
95396 return -EFAULT;
95397
95398+ /* only the CLOCK_REALTIME clock can be set, all other clocks
95399+ have their clock_set fptr set to a nosettime dummy function
95400+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
95401+ call common_clock_set, which calls do_sys_settimeofday, which
95402+ we hook
95403+ */
95404+
95405 return kc->clock_set(which_clock, &new_tp);
95406 }
95407
95408diff --git a/kernel/time/time.c b/kernel/time/time.c
95409index 2c85b77..6530536 100644
95410--- a/kernel/time/time.c
95411+++ b/kernel/time/time.c
95412@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
95413 return error;
95414
95415 if (tz) {
95416+ /* we log in do_settimeofday called below, so don't log twice
95417+ */
95418+ if (!tv)
95419+ gr_log_timechange();
95420+
95421 sys_tz = *tz;
95422 update_vsyscall_tz();
95423 if (firsttime) {
95424diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
95425index 91db941..a371671 100644
95426--- a/kernel/time/timekeeping.c
95427+++ b/kernel/time/timekeeping.c
95428@@ -15,6 +15,7 @@
95429 #include <linux/init.h>
95430 #include <linux/mm.h>
95431 #include <linux/sched.h>
95432+#include <linux/grsecurity.h>
95433 #include <linux/syscore_ops.h>
95434 #include <linux/clocksource.h>
95435 #include <linux/jiffies.h>
95436@@ -802,6 +803,8 @@ int do_settimeofday64(const struct timespec64 *ts)
95437 if (!timespec64_valid_strict(ts))
95438 return -EINVAL;
95439
95440+ gr_log_timechange();
95441+
95442 raw_spin_lock_irqsave(&timekeeper_lock, flags);
95443 write_seqcount_begin(&tk_core.seq);
95444
95445diff --git a/kernel/time/timer.c b/kernel/time/timer.c
95446index 2d3f5c5..7ed7dc5 100644
95447--- a/kernel/time/timer.c
95448+++ b/kernel/time/timer.c
95449@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
95450 /*
95451 * This function runs timers and the timer-tq in bottom half context.
95452 */
95453-static void run_timer_softirq(struct softirq_action *h)
95454+static __latent_entropy void run_timer_softirq(void)
95455 {
95456 struct tvec_base *base = __this_cpu_read(tvec_bases);
95457
95458@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
95459 *
95460 * In all cases the return value is guaranteed to be non-negative.
95461 */
95462-signed long __sched schedule_timeout(signed long timeout)
95463+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
95464 {
95465 struct timer_list timer;
95466 unsigned long expire;
95467diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
95468index 61ed862..3b52c65 100644
95469--- a/kernel/time/timer_list.c
95470+++ b/kernel/time/timer_list.c
95471@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
95472
95473 static void print_name_offset(struct seq_file *m, void *sym)
95474 {
95475+#ifdef CONFIG_GRKERNSEC_HIDESYM
95476+ SEQ_printf(m, "<%p>", NULL);
95477+#else
95478 char symname[KSYM_NAME_LEN];
95479
95480 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
95481 SEQ_printf(m, "<%pK>", sym);
95482 else
95483 SEQ_printf(m, "%s", symname);
95484+#endif
95485 }
95486
95487 static void
95488@@ -119,7 +123,11 @@ next_one:
95489 static void
95490 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
95491 {
95492+#ifdef CONFIG_GRKERNSEC_HIDESYM
95493+ SEQ_printf(m, " .base: %p\n", NULL);
95494+#else
95495 SEQ_printf(m, " .base: %pK\n", base);
95496+#endif
95497 SEQ_printf(m, " .index: %d\n",
95498 base->index);
95499 SEQ_printf(m, " .resolution: %Lu nsecs\n",
95500@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
95501 {
95502 struct proc_dir_entry *pe;
95503
95504+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95505+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
95506+#else
95507 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
95508+#endif
95509 if (!pe)
95510 return -ENOMEM;
95511 return 0;
95512diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
95513index 1fb08f2..ca4bb1e 100644
95514--- a/kernel/time/timer_stats.c
95515+++ b/kernel/time/timer_stats.c
95516@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
95517 static unsigned long nr_entries;
95518 static struct entry entries[MAX_ENTRIES];
95519
95520-static atomic_t overflow_count;
95521+static atomic_unchecked_t overflow_count;
95522
95523 /*
95524 * The entries are in a hash-table, for fast lookup:
95525@@ -140,7 +140,7 @@ static void reset_entries(void)
95526 nr_entries = 0;
95527 memset(entries, 0, sizeof(entries));
95528 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
95529- atomic_set(&overflow_count, 0);
95530+ atomic_set_unchecked(&overflow_count, 0);
95531 }
95532
95533 static struct entry *alloc_entry(void)
95534@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
95535 if (likely(entry))
95536 entry->count++;
95537 else
95538- atomic_inc(&overflow_count);
95539+ atomic_inc_unchecked(&overflow_count);
95540
95541 out_unlock:
95542 raw_spin_unlock_irqrestore(lock, flags);
95543@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
95544
95545 static void print_name_offset(struct seq_file *m, unsigned long addr)
95546 {
95547+#ifdef CONFIG_GRKERNSEC_HIDESYM
95548+ seq_printf(m, "<%p>", NULL);
95549+#else
95550 char symname[KSYM_NAME_LEN];
95551
95552 if (lookup_symbol_name(addr, symname) < 0)
95553- seq_printf(m, "<%p>", (void *)addr);
95554+ seq_printf(m, "<%pK>", (void *)addr);
95555 else
95556 seq_printf(m, "%s", symname);
95557+#endif
95558 }
95559
95560 static int tstats_show(struct seq_file *m, void *v)
95561@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
95562
95563 seq_puts(m, "Timer Stats Version: v0.3\n");
95564 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
95565- if (atomic_read(&overflow_count))
95566- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
95567+ if (atomic_read_unchecked(&overflow_count))
95568+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
95569 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
95570
95571 for (i = 0; i < nr_entries; i++) {
95572@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
95573 {
95574 struct proc_dir_entry *pe;
95575
95576+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95577+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
95578+#else
95579 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
95580+#endif
95581 if (!pe)
95582 return -ENOMEM;
95583 return 0;
95584diff --git a/kernel/torture.c b/kernel/torture.c
95585index dd70993..0bf694b 100644
95586--- a/kernel/torture.c
95587+++ b/kernel/torture.c
95588@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
95589 mutex_lock(&fullstop_mutex);
95590 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
95591 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
95592- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
95593+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
95594 } else {
95595 pr_warn("Concurrent rmmod and shutdown illegal!\n");
95596 }
95597@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
95598 if (!torture_must_stop()) {
95599 if (stutter > 1) {
95600 schedule_timeout_interruptible(stutter - 1);
95601- ACCESS_ONCE(stutter_pause_test) = 2;
95602+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
95603 }
95604 schedule_timeout_interruptible(1);
95605- ACCESS_ONCE(stutter_pause_test) = 1;
95606+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
95607 }
95608 if (!torture_must_stop())
95609 schedule_timeout_interruptible(stutter);
95610- ACCESS_ONCE(stutter_pause_test) = 0;
95611+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
95612 torture_shutdown_absorb("torture_stutter");
95613 } while (!torture_must_stop());
95614 torture_kthread_stopping("torture_stutter");
95615@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
95616 schedule_timeout_uninterruptible(10);
95617 return true;
95618 }
95619- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
95620+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
95621 mutex_unlock(&fullstop_mutex);
95622 torture_shutdown_cleanup();
95623 torture_shuffle_cleanup();
95624diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
95625index 483cecf..ac46091 100644
95626--- a/kernel/trace/blktrace.c
95627+++ b/kernel/trace/blktrace.c
95628@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
95629 struct blk_trace *bt = filp->private_data;
95630 char buf[16];
95631
95632- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
95633+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
95634
95635 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
95636 }
95637@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
95638 return 1;
95639
95640 bt = buf->chan->private_data;
95641- atomic_inc(&bt->dropped);
95642+ atomic_inc_unchecked(&bt->dropped);
95643 return 0;
95644 }
95645
95646@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
95647
95648 bt->dir = dir;
95649 bt->dev = dev;
95650- atomic_set(&bt->dropped, 0);
95651+ atomic_set_unchecked(&bt->dropped, 0);
95652 INIT_LIST_HEAD(&bt->running_list);
95653
95654 ret = -EIO;
95655diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
95656index 4f22802..bd268b1 100644
95657--- a/kernel/trace/ftrace.c
95658+++ b/kernel/trace/ftrace.c
95659@@ -2382,12 +2382,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
95660 if (unlikely(ftrace_disabled))
95661 return 0;
95662
95663+ ret = ftrace_arch_code_modify_prepare();
95664+ FTRACE_WARN_ON(ret);
95665+ if (ret)
95666+ return 0;
95667+
95668 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
95669+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
95670 if (ret) {
95671 ftrace_bug(ret, rec);
95672- return 0;
95673 }
95674- return 1;
95675+ return ret ? 0 : 1;
95676 }
95677
95678 /*
95679@@ -4776,8 +4781,10 @@ static int ftrace_process_locs(struct module *mod,
95680 if (!count)
95681 return 0;
95682
95683+ pax_open_kernel();
95684 sort(start, count, sizeof(*start),
95685 ftrace_cmp_ips, ftrace_swap_ips);
95686+ pax_close_kernel();
95687
95688 start_pg = ftrace_allocate_pages(count);
95689 if (!start_pg)
95690@@ -5653,7 +5660,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
95691
95692 if (t->ret_stack == NULL) {
95693 atomic_set(&t->tracing_graph_pause, 0);
95694- atomic_set(&t->trace_overrun, 0);
95695+ atomic_set_unchecked(&t->trace_overrun, 0);
95696 t->curr_ret_stack = -1;
95697 /* Make sure the tasks see the -1 first: */
95698 smp_wmb();
95699@@ -5876,7 +5883,7 @@ static void
95700 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
95701 {
95702 atomic_set(&t->tracing_graph_pause, 0);
95703- atomic_set(&t->trace_overrun, 0);
95704+ atomic_set_unchecked(&t->trace_overrun, 0);
95705 t->ftrace_timestamp = 0;
95706 /* make curr_ret_stack visible before we add the ret_stack */
95707 smp_wmb();
95708diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
95709index 922048a..bb71a55 100644
95710--- a/kernel/trace/ring_buffer.c
95711+++ b/kernel/trace/ring_buffer.c
95712@@ -348,9 +348,9 @@ struct buffer_data_page {
95713 */
95714 struct buffer_page {
95715 struct list_head list; /* list of buffer pages */
95716- local_t write; /* index for next write */
95717+ local_unchecked_t write; /* index for next write */
95718 unsigned read; /* index for next read */
95719- local_t entries; /* entries on this page */
95720+ local_unchecked_t entries; /* entries on this page */
95721 unsigned long real_end; /* real end of data */
95722 struct buffer_data_page *page; /* Actual data page */
95723 };
95724@@ -471,11 +471,11 @@ struct ring_buffer_per_cpu {
95725 unsigned long last_overrun;
95726 local_t entries_bytes;
95727 local_t entries;
95728- local_t overrun;
95729- local_t commit_overrun;
95730- local_t dropped_events;
95731+ local_unchecked_t overrun;
95732+ local_unchecked_t commit_overrun;
95733+ local_unchecked_t dropped_events;
95734 local_t committing;
95735- local_t commits;
95736+ local_unchecked_t commits;
95737 unsigned long read;
95738 unsigned long read_bytes;
95739 u64 write_stamp;
95740@@ -1045,8 +1045,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
95741 *
95742 * We add a counter to the write field to denote this.
95743 */
95744- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
95745- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
95746+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
95747+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
95748
95749 /*
95750 * Just make sure we have seen our old_write and synchronize
95751@@ -1074,8 +1074,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
95752 * cmpxchg to only update if an interrupt did not already
95753 * do it for us. If the cmpxchg fails, we don't care.
95754 */
95755- (void)local_cmpxchg(&next_page->write, old_write, val);
95756- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
95757+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
95758+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
95759
95760 /*
95761 * No need to worry about races with clearing out the commit.
95762@@ -1443,12 +1443,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
95763
95764 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
95765 {
95766- return local_read(&bpage->entries) & RB_WRITE_MASK;
95767+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
95768 }
95769
95770 static inline unsigned long rb_page_write(struct buffer_page *bpage)
95771 {
95772- return local_read(&bpage->write) & RB_WRITE_MASK;
95773+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
95774 }
95775
95776 static int
95777@@ -1543,7 +1543,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
95778 * bytes consumed in ring buffer from here.
95779 * Increment overrun to account for the lost events.
95780 */
95781- local_add(page_entries, &cpu_buffer->overrun);
95782+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
95783 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
95784 }
95785
95786@@ -2105,7 +2105,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
95787 * it is our responsibility to update
95788 * the counters.
95789 */
95790- local_add(entries, &cpu_buffer->overrun);
95791+ local_add_unchecked(entries, &cpu_buffer->overrun);
95792 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
95793
95794 /*
95795@@ -2255,7 +2255,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
95796 if (tail == BUF_PAGE_SIZE)
95797 tail_page->real_end = 0;
95798
95799- local_sub(length, &tail_page->write);
95800+ local_sub_unchecked(length, &tail_page->write);
95801 return;
95802 }
95803
95804@@ -2290,7 +2290,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
95805 rb_event_set_padding(event);
95806
95807 /* Set the write back to the previous setting */
95808- local_sub(length, &tail_page->write);
95809+ local_sub_unchecked(length, &tail_page->write);
95810 return;
95811 }
95812
95813@@ -2302,7 +2302,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
95814
95815 /* Set write to end of buffer */
95816 length = (tail + length) - BUF_PAGE_SIZE;
95817- local_sub(length, &tail_page->write);
95818+ local_sub_unchecked(length, &tail_page->write);
95819 }
95820
95821 /*
95822@@ -2328,7 +2328,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
95823 * about it.
95824 */
95825 if (unlikely(next_page == commit_page)) {
95826- local_inc(&cpu_buffer->commit_overrun);
95827+ local_inc_unchecked(&cpu_buffer->commit_overrun);
95828 goto out_reset;
95829 }
95830
95831@@ -2358,7 +2358,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
95832 * this is easy, just stop here.
95833 */
95834 if (!(buffer->flags & RB_FL_OVERWRITE)) {
95835- local_inc(&cpu_buffer->dropped_events);
95836+ local_inc_unchecked(&cpu_buffer->dropped_events);
95837 goto out_reset;
95838 }
95839
95840@@ -2384,7 +2384,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
95841 cpu_buffer->tail_page) &&
95842 (cpu_buffer->commit_page ==
95843 cpu_buffer->reader_page))) {
95844- local_inc(&cpu_buffer->commit_overrun);
95845+ local_inc_unchecked(&cpu_buffer->commit_overrun);
95846 goto out_reset;
95847 }
95848 }
95849@@ -2432,7 +2432,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
95850 length += RB_LEN_TIME_EXTEND;
95851
95852 tail_page = cpu_buffer->tail_page;
95853- write = local_add_return(length, &tail_page->write);
95854+ write = local_add_return_unchecked(length, &tail_page->write);
95855
95856 /* set write to only the index of the write */
95857 write &= RB_WRITE_MASK;
95858@@ -2456,7 +2456,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
95859 kmemcheck_annotate_bitfield(event, bitfield);
95860 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
95861
95862- local_inc(&tail_page->entries);
95863+ local_inc_unchecked(&tail_page->entries);
95864
95865 /*
95866 * If this is the first commit on the page, then update
95867@@ -2489,7 +2489,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
95868
95869 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
95870 unsigned long write_mask =
95871- local_read(&bpage->write) & ~RB_WRITE_MASK;
95872+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
95873 unsigned long event_length = rb_event_length(event);
95874 /*
95875 * This is on the tail page. It is possible that
95876@@ -2499,7 +2499,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
95877 */
95878 old_index += write_mask;
95879 new_index += write_mask;
95880- index = local_cmpxchg(&bpage->write, old_index, new_index);
95881+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
95882 if (index == old_index) {
95883 /* update counters */
95884 local_sub(event_length, &cpu_buffer->entries_bytes);
95885@@ -2514,7 +2514,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
95886 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
95887 {
95888 local_inc(&cpu_buffer->committing);
95889- local_inc(&cpu_buffer->commits);
95890+ local_inc_unchecked(&cpu_buffer->commits);
95891 }
95892
95893 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
95894@@ -2526,7 +2526,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
95895 return;
95896
95897 again:
95898- commits = local_read(&cpu_buffer->commits);
95899+ commits = local_read_unchecked(&cpu_buffer->commits);
95900 /* synchronize with interrupts */
95901 barrier();
95902 if (local_read(&cpu_buffer->committing) == 1)
95903@@ -2542,7 +2542,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
95904 * updating of the commit page and the clearing of the
95905 * committing counter.
95906 */
95907- if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
95908+ if (unlikely(local_read_unchecked(&cpu_buffer->commits) != commits) &&
95909 !local_read(&cpu_buffer->committing)) {
95910 local_inc(&cpu_buffer->committing);
95911 goto again;
95912@@ -2572,7 +2572,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
95913 barrier();
95914 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
95915 local_dec(&cpu_buffer->committing);
95916- local_dec(&cpu_buffer->commits);
95917+ local_dec_unchecked(&cpu_buffer->commits);
95918 return NULL;
95919 }
95920 #endif
95921@@ -2901,7 +2901,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
95922
95923 /* Do the likely case first */
95924 if (likely(bpage->page == (void *)addr)) {
95925- local_dec(&bpage->entries);
95926+ local_dec_unchecked(&bpage->entries);
95927 return;
95928 }
95929
95930@@ -2913,7 +2913,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
95931 start = bpage;
95932 do {
95933 if (bpage->page == (void *)addr) {
95934- local_dec(&bpage->entries);
95935+ local_dec_unchecked(&bpage->entries);
95936 return;
95937 }
95938 rb_inc_page(cpu_buffer, &bpage);
95939@@ -3197,7 +3197,7 @@ static inline unsigned long
95940 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
95941 {
95942 return local_read(&cpu_buffer->entries) -
95943- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
95944+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
95945 }
95946
95947 /**
95948@@ -3286,7 +3286,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
95949 return 0;
95950
95951 cpu_buffer = buffer->buffers[cpu];
95952- ret = local_read(&cpu_buffer->overrun);
95953+ ret = local_read_unchecked(&cpu_buffer->overrun);
95954
95955 return ret;
95956 }
95957@@ -3309,7 +3309,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
95958 return 0;
95959
95960 cpu_buffer = buffer->buffers[cpu];
95961- ret = local_read(&cpu_buffer->commit_overrun);
95962+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
95963
95964 return ret;
95965 }
95966@@ -3331,7 +3331,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
95967 return 0;
95968
95969 cpu_buffer = buffer->buffers[cpu];
95970- ret = local_read(&cpu_buffer->dropped_events);
95971+ ret = local_read_unchecked(&cpu_buffer->dropped_events);
95972
95973 return ret;
95974 }
95975@@ -3394,7 +3394,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
95976 /* if you care about this being correct, lock the buffer */
95977 for_each_buffer_cpu(buffer, cpu) {
95978 cpu_buffer = buffer->buffers[cpu];
95979- overruns += local_read(&cpu_buffer->overrun);
95980+ overruns += local_read_unchecked(&cpu_buffer->overrun);
95981 }
95982
95983 return overruns;
95984@@ -3565,8 +3565,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
95985 /*
95986 * Reset the reader page to size zero.
95987 */
95988- local_set(&cpu_buffer->reader_page->write, 0);
95989- local_set(&cpu_buffer->reader_page->entries, 0);
95990+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
95991+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
95992 local_set(&cpu_buffer->reader_page->page->commit, 0);
95993 cpu_buffer->reader_page->real_end = 0;
95994
95995@@ -3600,7 +3600,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
95996 * want to compare with the last_overrun.
95997 */
95998 smp_mb();
95999- overwrite = local_read(&(cpu_buffer->overrun));
96000+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
96001
96002 /*
96003 * Here's the tricky part.
96004@@ -4172,8 +4172,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
96005
96006 cpu_buffer->head_page
96007 = list_entry(cpu_buffer->pages, struct buffer_page, list);
96008- local_set(&cpu_buffer->head_page->write, 0);
96009- local_set(&cpu_buffer->head_page->entries, 0);
96010+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
96011+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
96012 local_set(&cpu_buffer->head_page->page->commit, 0);
96013
96014 cpu_buffer->head_page->read = 0;
96015@@ -4183,18 +4183,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
96016
96017 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
96018 INIT_LIST_HEAD(&cpu_buffer->new_pages);
96019- local_set(&cpu_buffer->reader_page->write, 0);
96020- local_set(&cpu_buffer->reader_page->entries, 0);
96021+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
96022+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
96023 local_set(&cpu_buffer->reader_page->page->commit, 0);
96024 cpu_buffer->reader_page->read = 0;
96025
96026 local_set(&cpu_buffer->entries_bytes, 0);
96027- local_set(&cpu_buffer->overrun, 0);
96028- local_set(&cpu_buffer->commit_overrun, 0);
96029- local_set(&cpu_buffer->dropped_events, 0);
96030+ local_set_unchecked(&cpu_buffer->overrun, 0);
96031+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
96032+ local_set_unchecked(&cpu_buffer->dropped_events, 0);
96033 local_set(&cpu_buffer->entries, 0);
96034 local_set(&cpu_buffer->committing, 0);
96035- local_set(&cpu_buffer->commits, 0);
96036+ local_set_unchecked(&cpu_buffer->commits, 0);
96037 cpu_buffer->read = 0;
96038 cpu_buffer->read_bytes = 0;
96039
96040@@ -4595,8 +4595,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
96041 rb_init_page(bpage);
96042 bpage = reader->page;
96043 reader->page = *data_page;
96044- local_set(&reader->write, 0);
96045- local_set(&reader->entries, 0);
96046+ local_set_unchecked(&reader->write, 0);
96047+ local_set_unchecked(&reader->entries, 0);
96048 reader->read = 0;
96049 *data_page = bpage;
96050
96051diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
96052index 62c6506..5c25989 100644
96053--- a/kernel/trace/trace.c
96054+++ b/kernel/trace/trace.c
96055@@ -3500,7 +3500,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
96056 return 0;
96057 }
96058
96059-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
96060+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
96061 {
96062 /* do nothing if flag is already set */
96063 if (!!(trace_flags & mask) == !!enabled)
96064diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
96065index dd8205a..1aae87a 100644
96066--- a/kernel/trace/trace.h
96067+++ b/kernel/trace/trace.h
96068@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
96069 void trace_printk_init_buffers(void);
96070 void trace_printk_start_comm(void);
96071 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
96072-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
96073+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
96074
96075 /*
96076 * Normal trace_printk() and friends allocates special buffers
96077diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
96078index 57b67b1..66082a9 100644
96079--- a/kernel/trace/trace_clock.c
96080+++ b/kernel/trace/trace_clock.c
96081@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
96082 return now;
96083 }
96084
96085-static atomic64_t trace_counter;
96086+static atomic64_unchecked_t trace_counter;
96087
96088 /*
96089 * trace_clock_counter(): simply an atomic counter.
96090@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
96091 */
96092 u64 notrace trace_clock_counter(void)
96093 {
96094- return atomic64_add_return(1, &trace_counter);
96095+ return atomic64_inc_return_unchecked(&trace_counter);
96096 }
96097diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
96098index a9c10a3..1864f6b 100644
96099--- a/kernel/trace/trace_events.c
96100+++ b/kernel/trace/trace_events.c
96101@@ -1762,7 +1762,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
96102 return 0;
96103 }
96104
96105-struct ftrace_module_file_ops;
96106 static void __add_event_to_tracers(struct ftrace_event_call *call);
96107
96108 /* Add an additional event_call dynamically */
96109diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
96110index b6fce36..d9f11a3 100644
96111--- a/kernel/trace/trace_functions_graph.c
96112+++ b/kernel/trace/trace_functions_graph.c
96113@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
96114
96115 /* The return trace stack is full */
96116 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
96117- atomic_inc(&current->trace_overrun);
96118+ atomic_inc_unchecked(&current->trace_overrun);
96119 return -EBUSY;
96120 }
96121
96122@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
96123 *ret = current->ret_stack[index].ret;
96124 trace->func = current->ret_stack[index].func;
96125 trace->calltime = current->ret_stack[index].calltime;
96126- trace->overrun = atomic_read(&current->trace_overrun);
96127+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
96128 trace->depth = index;
96129 }
96130
96131diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
96132index 7a9ba62..2e0e4a1 100644
96133--- a/kernel/trace/trace_mmiotrace.c
96134+++ b/kernel/trace/trace_mmiotrace.c
96135@@ -24,7 +24,7 @@ struct header_iter {
96136 static struct trace_array *mmio_trace_array;
96137 static bool overrun_detected;
96138 static unsigned long prev_overruns;
96139-static atomic_t dropped_count;
96140+static atomic_unchecked_t dropped_count;
96141
96142 static void mmio_reset_data(struct trace_array *tr)
96143 {
96144@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
96145
96146 static unsigned long count_overruns(struct trace_iterator *iter)
96147 {
96148- unsigned long cnt = atomic_xchg(&dropped_count, 0);
96149+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
96150 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
96151
96152 if (over > prev_overruns)
96153@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
96154 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
96155 sizeof(*entry), 0, pc);
96156 if (!event) {
96157- atomic_inc(&dropped_count);
96158+ atomic_inc_unchecked(&dropped_count);
96159 return;
96160 }
96161 entry = ring_buffer_event_data(event);
96162@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
96163 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
96164 sizeof(*entry), 0, pc);
96165 if (!event) {
96166- atomic_inc(&dropped_count);
96167+ atomic_inc_unchecked(&dropped_count);
96168 return;
96169 }
96170 entry = ring_buffer_event_data(event);
96171diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
96172index 692bf71..6d9a9cd 100644
96173--- a/kernel/trace/trace_output.c
96174+++ b/kernel/trace/trace_output.c
96175@@ -751,14 +751,16 @@ int register_ftrace_event(struct trace_event *event)
96176 goto out;
96177 }
96178
96179+ pax_open_kernel();
96180 if (event->funcs->trace == NULL)
96181- event->funcs->trace = trace_nop_print;
96182+ *(void **)&event->funcs->trace = trace_nop_print;
96183 if (event->funcs->raw == NULL)
96184- event->funcs->raw = trace_nop_print;
96185+ *(void **)&event->funcs->raw = trace_nop_print;
96186 if (event->funcs->hex == NULL)
96187- event->funcs->hex = trace_nop_print;
96188+ *(void **)&event->funcs->hex = trace_nop_print;
96189 if (event->funcs->binary == NULL)
96190- event->funcs->binary = trace_nop_print;
96191+ *(void **)&event->funcs->binary = trace_nop_print;
96192+ pax_close_kernel();
96193
96194 key = event->type & (EVENT_HASHSIZE - 1);
96195
96196diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
96197index e694c9f..6775a38 100644
96198--- a/kernel/trace/trace_seq.c
96199+++ b/kernel/trace/trace_seq.c
96200@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
96201 return 0;
96202 }
96203
96204- seq_buf_path(&s->seq, path, "\n");
96205+ seq_buf_path(&s->seq, path, "\n\\");
96206
96207 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
96208 s->seq.len = save_len;
96209diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
96210index c3e4fcf..ef6cc43 100644
96211--- a/kernel/trace/trace_stack.c
96212+++ b/kernel/trace/trace_stack.c
96213@@ -88,7 +88,7 @@ check_stack(unsigned long ip, unsigned long *stack)
96214 return;
96215
96216 /* we do not handle interrupt stacks yet */
96217- if (!object_is_on_stack(stack))
96218+ if (!object_starts_on_stack(stack))
96219 return;
96220
96221 local_irq_save(flags);
96222diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
96223index f97f6e3..d367b48 100644
96224--- a/kernel/trace/trace_syscalls.c
96225+++ b/kernel/trace/trace_syscalls.c
96226@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
96227 int num;
96228
96229 num = ((struct syscall_metadata *)call->data)->syscall_nr;
96230+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
96231+ return -EINVAL;
96232
96233 mutex_lock(&syscall_trace_lock);
96234 if (!sys_perf_refcount_enter)
96235@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
96236 int num;
96237
96238 num = ((struct syscall_metadata *)call->data)->syscall_nr;
96239+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
96240+ return;
96241
96242 mutex_lock(&syscall_trace_lock);
96243 sys_perf_refcount_enter--;
96244@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
96245 int num;
96246
96247 num = ((struct syscall_metadata *)call->data)->syscall_nr;
96248+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
96249+ return -EINVAL;
96250
96251 mutex_lock(&syscall_trace_lock);
96252 if (!sys_perf_refcount_exit)
96253@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
96254 int num;
96255
96256 num = ((struct syscall_metadata *)call->data)->syscall_nr;
96257+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
96258+ return;
96259
96260 mutex_lock(&syscall_trace_lock);
96261 sys_perf_refcount_exit--;
96262diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
96263index 4109f83..fe1f830 100644
96264--- a/kernel/user_namespace.c
96265+++ b/kernel/user_namespace.c
96266@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
96267 !kgid_has_mapping(parent_ns, group))
96268 return -EPERM;
96269
96270+#ifdef CONFIG_GRKERNSEC
96271+ /*
96272+ * This doesn't really inspire confidence:
96273+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
96274+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
96275+ * Increases kernel attack surface in areas developers
96276+ * previously cared little about ("low importance due
96277+ * to requiring "root" capability")
96278+ * To be removed when this code receives *proper* review
96279+ */
96280+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
96281+ !capable(CAP_SETGID))
96282+ return -EPERM;
96283+#endif
96284+
96285 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
96286 if (!ns)
96287 return -ENOMEM;
96288@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
96289 if (atomic_read(&current->mm->mm_users) > 1)
96290 return -EINVAL;
96291
96292- if (current->fs->users != 1)
96293+ if (atomic_read(&current->fs->users) != 1)
96294 return -EINVAL;
96295
96296 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
96297diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
96298index c8eac43..4b5f08f 100644
96299--- a/kernel/utsname_sysctl.c
96300+++ b/kernel/utsname_sysctl.c
96301@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
96302 static int proc_do_uts_string(struct ctl_table *table, int write,
96303 void __user *buffer, size_t *lenp, loff_t *ppos)
96304 {
96305- struct ctl_table uts_table;
96306+ ctl_table_no_const uts_table;
96307 int r;
96308 memcpy(&uts_table, table, sizeof(uts_table));
96309 uts_table.data = get_uts(table, write);
96310diff --git a/kernel/watchdog.c b/kernel/watchdog.c
96311index 3174bf8..3553520 100644
96312--- a/kernel/watchdog.c
96313+++ b/kernel/watchdog.c
96314@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
96315 static void watchdog_nmi_disable(unsigned int cpu) { return; }
96316 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
96317
96318-static struct smp_hotplug_thread watchdog_threads = {
96319+static struct smp_hotplug_thread watchdog_threads __read_only = {
96320 .store = &softlockup_watchdog,
96321 .thread_should_run = watchdog_should_run,
96322 .thread_fn = watchdog,
96323diff --git a/kernel/workqueue.c b/kernel/workqueue.c
96324index 41ff75b..5ad683a 100644
96325--- a/kernel/workqueue.c
96326+++ b/kernel/workqueue.c
96327@@ -4564,7 +4564,7 @@ static void rebind_workers(struct worker_pool *pool)
96328 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
96329 worker_flags |= WORKER_REBOUND;
96330 worker_flags &= ~WORKER_UNBOUND;
96331- ACCESS_ONCE(worker->flags) = worker_flags;
96332+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
96333 }
96334
96335 spin_unlock_irq(&pool->lock);
96336diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
96337index c5cefb3..a4241e3 100644
96338--- a/lib/Kconfig.debug
96339+++ b/lib/Kconfig.debug
96340@@ -923,7 +923,7 @@ config DEBUG_MUTEXES
96341
96342 config DEBUG_WW_MUTEX_SLOWPATH
96343 bool "Wait/wound mutex debugging: Slowpath testing"
96344- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96345+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96346 select DEBUG_LOCK_ALLOC
96347 select DEBUG_SPINLOCK
96348 select DEBUG_MUTEXES
96349@@ -940,7 +940,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
96350
96351 config DEBUG_LOCK_ALLOC
96352 bool "Lock debugging: detect incorrect freeing of live locks"
96353- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96354+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96355 select DEBUG_SPINLOCK
96356 select DEBUG_MUTEXES
96357 select LOCKDEP
96358@@ -954,7 +954,7 @@ config DEBUG_LOCK_ALLOC
96359
96360 config PROVE_LOCKING
96361 bool "Lock debugging: prove locking correctness"
96362- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96363+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96364 select LOCKDEP
96365 select DEBUG_SPINLOCK
96366 select DEBUG_MUTEXES
96367@@ -1005,7 +1005,7 @@ config LOCKDEP
96368
96369 config LOCK_STAT
96370 bool "Lock usage statistics"
96371- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96372+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96373 select LOCKDEP
96374 select DEBUG_SPINLOCK
96375 select DEBUG_MUTEXES
96376@@ -1467,6 +1467,7 @@ config LATENCYTOP
96377 depends on DEBUG_KERNEL
96378 depends on STACKTRACE_SUPPORT
96379 depends on PROC_FS
96380+ depends on !GRKERNSEC_HIDESYM
96381 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
96382 select KALLSYMS
96383 select KALLSYMS_ALL
96384@@ -1483,7 +1484,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
96385 config DEBUG_STRICT_USER_COPY_CHECKS
96386 bool "Strict user copy size checks"
96387 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
96388- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
96389+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
96390 help
96391 Enabling this option turns a certain set of sanity checks for user
96392 copy operations into compile time failures.
96393@@ -1614,7 +1615,7 @@ endmenu # runtime tests
96394
96395 config PROVIDE_OHCI1394_DMA_INIT
96396 bool "Remote debugging over FireWire early on boot"
96397- depends on PCI && X86
96398+ depends on PCI && X86 && !GRKERNSEC
96399 help
96400 If you want to debug problems which hang or crash the kernel early
96401 on boot and the crashing machine has a FireWire port, you can use
96402diff --git a/lib/Makefile b/lib/Makefile
96403index 58f74d2..08e011f 100644
96404--- a/lib/Makefile
96405+++ b/lib/Makefile
96406@@ -59,7 +59,7 @@ obj-$(CONFIG_BTREE) += btree.o
96407 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
96408 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
96409 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
96410-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
96411+obj-y += list_debug.o
96412 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
96413
96414 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
96415diff --git a/lib/average.c b/lib/average.c
96416index 114d1be..ab0350c 100644
96417--- a/lib/average.c
96418+++ b/lib/average.c
96419@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
96420 {
96421 unsigned long internal = ACCESS_ONCE(avg->internal);
96422
96423- ACCESS_ONCE(avg->internal) = internal ?
96424+ ACCESS_ONCE_RW(avg->internal) = internal ?
96425 (((internal << avg->weight) - internal) +
96426 (val << avg->factor)) >> avg->weight :
96427 (val << avg->factor);
96428diff --git a/lib/bitmap.c b/lib/bitmap.c
96429index d456f4c1..29a0308 100644
96430--- a/lib/bitmap.c
96431+++ b/lib/bitmap.c
96432@@ -264,7 +264,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
96433 }
96434 EXPORT_SYMBOL(__bitmap_subset);
96435
96436-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
96437+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
96438 {
96439 unsigned int k, lim = bits/BITS_PER_LONG;
96440 int w = 0;
96441@@ -391,7 +391,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
96442 {
96443 int c, old_c, totaldigits, ndigits, nchunks, nbits;
96444 u32 chunk;
96445- const char __user __force *ubuf = (const char __user __force *)buf;
96446+ const char __user *ubuf = (const char __force_user *)buf;
96447
96448 bitmap_zero(maskp, nmaskbits);
96449
96450@@ -476,7 +476,7 @@ int bitmap_parse_user(const char __user *ubuf,
96451 {
96452 if (!access_ok(VERIFY_READ, ubuf, ulen))
96453 return -EFAULT;
96454- return __bitmap_parse((const char __force *)ubuf,
96455+ return __bitmap_parse((const char __force_kernel *)ubuf,
96456 ulen, 1, maskp, nmaskbits);
96457
96458 }
96459@@ -535,7 +535,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
96460 {
96461 unsigned a, b;
96462 int c, old_c, totaldigits;
96463- const char __user __force *ubuf = (const char __user __force *)buf;
96464+ const char __user *ubuf = (const char __force_user *)buf;
96465 int exp_digit, in_range;
96466
96467 totaldigits = c = 0;
96468@@ -630,7 +630,7 @@ int bitmap_parselist_user(const char __user *ubuf,
96469 {
96470 if (!access_ok(VERIFY_READ, ubuf, ulen))
96471 return -EFAULT;
96472- return __bitmap_parselist((const char __force *)ubuf,
96473+ return __bitmap_parselist((const char __force_kernel *)ubuf,
96474 ulen, 1, maskp, nmaskbits);
96475 }
96476 EXPORT_SYMBOL(bitmap_parselist_user);
96477diff --git a/lib/bug.c b/lib/bug.c
96478index 0c3bd95..5a615a1 100644
96479--- a/lib/bug.c
96480+++ b/lib/bug.c
96481@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
96482 return BUG_TRAP_TYPE_NONE;
96483
96484 bug = find_bug(bugaddr);
96485+ if (!bug)
96486+ return BUG_TRAP_TYPE_NONE;
96487
96488 file = NULL;
96489 line = 0;
96490diff --git a/lib/debugobjects.c b/lib/debugobjects.c
96491index 547f7f9..a6d4ba0 100644
96492--- a/lib/debugobjects.c
96493+++ b/lib/debugobjects.c
96494@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
96495 if (limit > 4)
96496 return;
96497
96498- is_on_stack = object_is_on_stack(addr);
96499+ is_on_stack = object_starts_on_stack(addr);
96500 if (is_on_stack == onstack)
96501 return;
96502
96503diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
96504index 6dd0335..1e9c239 100644
96505--- a/lib/decompress_bunzip2.c
96506+++ b/lib/decompress_bunzip2.c
96507@@ -665,7 +665,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
96508
96509 /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
96510 uncompressed data. Allocate intermediate buffer for block. */
96511- bd->dbufSize = 100000*(i-BZh0);
96512+ i -= BZh0;
96513+ bd->dbufSize = 100000 * i;
96514
96515 bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
96516 if (!bd->dbuf)
96517diff --git a/lib/div64.c b/lib/div64.c
96518index 4382ad7..08aa558 100644
96519--- a/lib/div64.c
96520+++ b/lib/div64.c
96521@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
96522 EXPORT_SYMBOL(__div64_32);
96523
96524 #ifndef div_s64_rem
96525-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
96526+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
96527 {
96528 u64 quotient;
96529
96530@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
96531 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
96532 */
96533 #ifndef div64_u64
96534-u64 div64_u64(u64 dividend, u64 divisor)
96535+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
96536 {
96537 u32 high = divisor >> 32;
96538 u64 quot;
96539diff --git a/lib/dma-debug.c b/lib/dma-debug.c
96540index 9722bd2..0d826f4 100644
96541--- a/lib/dma-debug.c
96542+++ b/lib/dma-debug.c
96543@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
96544
96545 void dma_debug_add_bus(struct bus_type *bus)
96546 {
96547- struct notifier_block *nb;
96548+ notifier_block_no_const *nb;
96549
96550 if (dma_debug_disabled())
96551 return;
96552@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
96553
96554 static void check_for_stack(struct device *dev, void *addr)
96555 {
96556- if (object_is_on_stack(addr))
96557+ if (object_starts_on_stack(addr))
96558 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
96559 "stack [addr=%p]\n", addr);
96560 }
96561diff --git a/lib/inflate.c b/lib/inflate.c
96562index 013a761..c28f3fc 100644
96563--- a/lib/inflate.c
96564+++ b/lib/inflate.c
96565@@ -269,7 +269,7 @@ static void free(void *where)
96566 malloc_ptr = free_mem_ptr;
96567 }
96568 #else
96569-#define malloc(a) kmalloc(a, GFP_KERNEL)
96570+#define malloc(a) kmalloc((a), GFP_KERNEL)
96571 #define free(a) kfree(a)
96572 #endif
96573
96574diff --git a/lib/ioremap.c b/lib/ioremap.c
96575index 0c9216c..863bd89 100644
96576--- a/lib/ioremap.c
96577+++ b/lib/ioremap.c
96578@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
96579 unsigned long next;
96580
96581 phys_addr -= addr;
96582- pmd = pmd_alloc(&init_mm, pud, addr);
96583+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
96584 if (!pmd)
96585 return -ENOMEM;
96586 do {
96587@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
96588 unsigned long next;
96589
96590 phys_addr -= addr;
96591- pud = pud_alloc(&init_mm, pgd, addr);
96592+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
96593 if (!pud)
96594 return -ENOMEM;
96595 do {
96596diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
96597index bd2bea9..6b3c95e 100644
96598--- a/lib/is_single_threaded.c
96599+++ b/lib/is_single_threaded.c
96600@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
96601 struct task_struct *p, *t;
96602 bool ret;
96603
96604+ if (!mm)
96605+ return true;
96606+
96607 if (atomic_read(&task->signal->live) != 1)
96608 return false;
96609
96610diff --git a/lib/kobject.c b/lib/kobject.c
96611index 03d4ab3..46f6374 100644
96612--- a/lib/kobject.c
96613+++ b/lib/kobject.c
96614@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
96615
96616
96617 static DEFINE_SPINLOCK(kobj_ns_type_lock);
96618-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
96619+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
96620
96621-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
96622+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
96623 {
96624 enum kobj_ns_type type = ops->type;
96625 int error;
96626diff --git a/lib/list_debug.c b/lib/list_debug.c
96627index c24c2f7..f0296f4 100644
96628--- a/lib/list_debug.c
96629+++ b/lib/list_debug.c
96630@@ -11,7 +11,9 @@
96631 #include <linux/bug.h>
96632 #include <linux/kernel.h>
96633 #include <linux/rculist.h>
96634+#include <linux/mm.h>
96635
96636+#ifdef CONFIG_DEBUG_LIST
96637 /*
96638 * Insert a new entry between two known consecutive entries.
96639 *
96640@@ -19,21 +21,40 @@
96641 * the prev/next entries already!
96642 */
96643
96644+static bool __list_add_debug(struct list_head *new,
96645+ struct list_head *prev,
96646+ struct list_head *next)
96647+{
96648+ if (unlikely(next->prev != prev)) {
96649+ printk(KERN_ERR "list_add corruption. next->prev should be "
96650+ "prev (%p), but was %p. (next=%p).\n",
96651+ prev, next->prev, next);
96652+ BUG();
96653+ return false;
96654+ }
96655+ if (unlikely(prev->next != next)) {
96656+ printk(KERN_ERR "list_add corruption. prev->next should be "
96657+ "next (%p), but was %p. (prev=%p).\n",
96658+ next, prev->next, prev);
96659+ BUG();
96660+ return false;
96661+ }
96662+ if (unlikely(new == prev || new == next)) {
96663+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
96664+ new, prev, next);
96665+ BUG();
96666+ return false;
96667+ }
96668+ return true;
96669+}
96670+
96671 void __list_add(struct list_head *new,
96672- struct list_head *prev,
96673- struct list_head *next)
96674+ struct list_head *prev,
96675+ struct list_head *next)
96676 {
96677- WARN(next->prev != prev,
96678- "list_add corruption. next->prev should be "
96679- "prev (%p), but was %p. (next=%p).\n",
96680- prev, next->prev, next);
96681- WARN(prev->next != next,
96682- "list_add corruption. prev->next should be "
96683- "next (%p), but was %p. (prev=%p).\n",
96684- next, prev->next, prev);
96685- WARN(new == prev || new == next,
96686- "list_add double add: new=%p, prev=%p, next=%p.\n",
96687- new, prev, next);
96688+ if (!__list_add_debug(new, prev, next))
96689+ return;
96690+
96691 next->prev = new;
96692 new->next = next;
96693 new->prev = prev;
96694@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
96695 }
96696 EXPORT_SYMBOL(__list_add);
96697
96698-void __list_del_entry(struct list_head *entry)
96699+static bool __list_del_entry_debug(struct list_head *entry)
96700 {
96701 struct list_head *prev, *next;
96702
96703 prev = entry->prev;
96704 next = entry->next;
96705
96706- if (WARN(next == LIST_POISON1,
96707- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
96708- entry, LIST_POISON1) ||
96709- WARN(prev == LIST_POISON2,
96710- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
96711- entry, LIST_POISON2) ||
96712- WARN(prev->next != entry,
96713- "list_del corruption. prev->next should be %p, "
96714- "but was %p\n", entry, prev->next) ||
96715- WARN(next->prev != entry,
96716- "list_del corruption. next->prev should be %p, "
96717- "but was %p\n", entry, next->prev))
96718+ if (unlikely(next == LIST_POISON1)) {
96719+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
96720+ entry, LIST_POISON1);
96721+ BUG();
96722+ return false;
96723+ }
96724+ if (unlikely(prev == LIST_POISON2)) {
96725+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
96726+ entry, LIST_POISON2);
96727+ BUG();
96728+ return false;
96729+ }
96730+ if (unlikely(entry->prev->next != entry)) {
96731+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
96732+ "but was %p\n", entry, prev->next);
96733+ BUG();
96734+ return false;
96735+ }
96736+ if (unlikely(entry->next->prev != entry)) {
96737+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
96738+ "but was %p\n", entry, next->prev);
96739+ BUG();
96740+ return false;
96741+ }
96742+ return true;
96743+}
96744+
96745+void __list_del_entry(struct list_head *entry)
96746+{
96747+ if (!__list_del_entry_debug(entry))
96748 return;
96749
96750- __list_del(prev, next);
96751+ __list_del(entry->prev, entry->next);
96752 }
96753 EXPORT_SYMBOL(__list_del_entry);
96754
96755@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
96756 void __list_add_rcu(struct list_head *new,
96757 struct list_head *prev, struct list_head *next)
96758 {
96759- WARN(next->prev != prev,
96760- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
96761- prev, next->prev, next);
96762- WARN(prev->next != next,
96763- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
96764- next, prev->next, prev);
96765+ if (!__list_add_debug(new, prev, next))
96766+ return;
96767+
96768 new->next = next;
96769 new->prev = prev;
96770 rcu_assign_pointer(list_next_rcu(prev), new);
96771 next->prev = new;
96772 }
96773 EXPORT_SYMBOL(__list_add_rcu);
96774+#endif
96775+
96776+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
96777+{
96778+#ifdef CONFIG_DEBUG_LIST
96779+ if (!__list_add_debug(new, prev, next))
96780+ return;
96781+#endif
96782+
96783+ pax_open_kernel();
96784+ next->prev = new;
96785+ new->next = next;
96786+ new->prev = prev;
96787+ prev->next = new;
96788+ pax_close_kernel();
96789+}
96790+EXPORT_SYMBOL(__pax_list_add);
96791+
96792+void pax_list_del(struct list_head *entry)
96793+{
96794+#ifdef CONFIG_DEBUG_LIST
96795+ if (!__list_del_entry_debug(entry))
96796+ return;
96797+#endif
96798+
96799+ pax_open_kernel();
96800+ __list_del(entry->prev, entry->next);
96801+ entry->next = LIST_POISON1;
96802+ entry->prev = LIST_POISON2;
96803+ pax_close_kernel();
96804+}
96805+EXPORT_SYMBOL(pax_list_del);
96806+
96807+void pax_list_del_init(struct list_head *entry)
96808+{
96809+ pax_open_kernel();
96810+ __list_del(entry->prev, entry->next);
96811+ INIT_LIST_HEAD(entry);
96812+ pax_close_kernel();
96813+}
96814+EXPORT_SYMBOL(pax_list_del_init);
96815+
96816+void __pax_list_add_rcu(struct list_head *new,
96817+ struct list_head *prev, struct list_head *next)
96818+{
96819+#ifdef CONFIG_DEBUG_LIST
96820+ if (!__list_add_debug(new, prev, next))
96821+ return;
96822+#endif
96823+
96824+ pax_open_kernel();
96825+ new->next = next;
96826+ new->prev = prev;
96827+ rcu_assign_pointer(list_next_rcu(prev), new);
96828+ next->prev = new;
96829+ pax_close_kernel();
96830+}
96831+EXPORT_SYMBOL(__pax_list_add_rcu);
96832+
96833+void pax_list_del_rcu(struct list_head *entry)
96834+{
96835+#ifdef CONFIG_DEBUG_LIST
96836+ if (!__list_del_entry_debug(entry))
96837+ return;
96838+#endif
96839+
96840+ pax_open_kernel();
96841+ __list_del(entry->prev, entry->next);
96842+ entry->next = LIST_POISON1;
96843+ entry->prev = LIST_POISON2;
96844+ pax_close_kernel();
96845+}
96846+EXPORT_SYMBOL(pax_list_del_rcu);
96847diff --git a/lib/lockref.c b/lib/lockref.c
96848index ecb9a66..a044fc5 100644
96849--- a/lib/lockref.c
96850+++ b/lib/lockref.c
96851@@ -48,13 +48,13 @@
96852 void lockref_get(struct lockref *lockref)
96853 {
96854 CMPXCHG_LOOP(
96855- new.count++;
96856+ __lockref_inc(&new);
96857 ,
96858 return;
96859 );
96860
96861 spin_lock(&lockref->lock);
96862- lockref->count++;
96863+ __lockref_inc(lockref);
96864 spin_unlock(&lockref->lock);
96865 }
96866 EXPORT_SYMBOL(lockref_get);
96867@@ -69,8 +69,8 @@ int lockref_get_not_zero(struct lockref *lockref)
96868 int retval;
96869
96870 CMPXCHG_LOOP(
96871- new.count++;
96872- if (old.count <= 0)
96873+ __lockref_inc(&new);
96874+ if (__lockref_read(&old) <= 0)
96875 return 0;
96876 ,
96877 return 1;
96878@@ -78,8 +78,8 @@ int lockref_get_not_zero(struct lockref *lockref)
96879
96880 spin_lock(&lockref->lock);
96881 retval = 0;
96882- if (lockref->count > 0) {
96883- lockref->count++;
96884+ if (__lockref_read(lockref) > 0) {
96885+ __lockref_inc(lockref);
96886 retval = 1;
96887 }
96888 spin_unlock(&lockref->lock);
96889@@ -96,17 +96,17 @@ EXPORT_SYMBOL(lockref_get_not_zero);
96890 int lockref_get_or_lock(struct lockref *lockref)
96891 {
96892 CMPXCHG_LOOP(
96893- new.count++;
96894- if (old.count <= 0)
96895+ __lockref_inc(&new);
96896+ if (__lockref_read(&old) <= 0)
96897 break;
96898 ,
96899 return 1;
96900 );
96901
96902 spin_lock(&lockref->lock);
96903- if (lockref->count <= 0)
96904+ if (__lockref_read(lockref) <= 0)
96905 return 0;
96906- lockref->count++;
96907+ __lockref_inc(lockref);
96908 spin_unlock(&lockref->lock);
96909 return 1;
96910 }
96911@@ -122,11 +122,11 @@ EXPORT_SYMBOL(lockref_get_or_lock);
96912 int lockref_put_return(struct lockref *lockref)
96913 {
96914 CMPXCHG_LOOP(
96915- new.count--;
96916- if (old.count <= 0)
96917+ __lockref_dec(&new);
96918+ if (__lockref_read(&old) <= 0)
96919 return -1;
96920 ,
96921- return new.count;
96922+ return __lockref_read(&new);
96923 );
96924 return -1;
96925 }
96926@@ -140,17 +140,17 @@ EXPORT_SYMBOL(lockref_put_return);
96927 int lockref_put_or_lock(struct lockref *lockref)
96928 {
96929 CMPXCHG_LOOP(
96930- new.count--;
96931- if (old.count <= 1)
96932+ __lockref_dec(&new);
96933+ if (__lockref_read(&old) <= 1)
96934 break;
96935 ,
96936 return 1;
96937 );
96938
96939 spin_lock(&lockref->lock);
96940- if (lockref->count <= 1)
96941+ if (__lockref_read(lockref) <= 1)
96942 return 0;
96943- lockref->count--;
96944+ __lockref_dec(lockref);
96945 spin_unlock(&lockref->lock);
96946 return 1;
96947 }
96948@@ -163,7 +163,7 @@ EXPORT_SYMBOL(lockref_put_or_lock);
96949 void lockref_mark_dead(struct lockref *lockref)
96950 {
96951 assert_spin_locked(&lockref->lock);
96952- lockref->count = -128;
96953+ __lockref_set(lockref, -128);
96954 }
96955 EXPORT_SYMBOL(lockref_mark_dead);
96956
96957@@ -177,8 +177,8 @@ int lockref_get_not_dead(struct lockref *lockref)
96958 int retval;
96959
96960 CMPXCHG_LOOP(
96961- new.count++;
96962- if (old.count < 0)
96963+ __lockref_inc(&new);
96964+ if (__lockref_read(&old) < 0)
96965 return 0;
96966 ,
96967 return 1;
96968@@ -186,8 +186,8 @@ int lockref_get_not_dead(struct lockref *lockref)
96969
96970 spin_lock(&lockref->lock);
96971 retval = 0;
96972- if (lockref->count >= 0) {
96973- lockref->count++;
96974+ if (__lockref_read(lockref) >= 0) {
96975+ __lockref_inc(lockref);
96976 retval = 1;
96977 }
96978 spin_unlock(&lockref->lock);
96979diff --git a/lib/nlattr.c b/lib/nlattr.c
96980index f5907d2..36072be 100644
96981--- a/lib/nlattr.c
96982+++ b/lib/nlattr.c
96983@@ -278,6 +278,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
96984 {
96985 int minlen = min_t(int, count, nla_len(src));
96986
96987+ BUG_ON(minlen < 0);
96988+
96989 memcpy(dest, nla_data(src), minlen);
96990 if (count > minlen)
96991 memset(dest + minlen, 0, count - minlen);
96992diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
96993index 6111bcb..02e816b 100644
96994--- a/lib/percpu-refcount.c
96995+++ b/lib/percpu-refcount.c
96996@@ -31,7 +31,7 @@
96997 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
96998 */
96999
97000-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
97001+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
97002
97003 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
97004
97005diff --git a/lib/radix-tree.c b/lib/radix-tree.c
97006index 3d2aa27..a472f20 100644
97007--- a/lib/radix-tree.c
97008+++ b/lib/radix-tree.c
97009@@ -67,7 +67,7 @@ struct radix_tree_preload {
97010 int nr;
97011 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
97012 };
97013-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
97014+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
97015
97016 static inline void *ptr_to_indirect(void *ptr)
97017 {
97018diff --git a/lib/random32.c b/lib/random32.c
97019index 0bee183..526f12f 100644
97020--- a/lib/random32.c
97021+++ b/lib/random32.c
97022@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
97023 }
97024 #endif
97025
97026-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
97027+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
97028
97029 /**
97030 * prandom_u32_state - seeded pseudo-random number generator.
97031diff --git a/lib/rbtree.c b/lib/rbtree.c
97032index c16c81a..4dcbda1 100644
97033--- a/lib/rbtree.c
97034+++ b/lib/rbtree.c
97035@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
97036 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
97037
97038 static const struct rb_augment_callbacks dummy_callbacks = {
97039- dummy_propagate, dummy_copy, dummy_rotate
97040+ .propagate = dummy_propagate,
97041+ .copy = dummy_copy,
97042+ .rotate = dummy_rotate
97043 };
97044
97045 void rb_insert_color(struct rb_node *node, struct rb_root *root)
97046diff --git a/lib/show_mem.c b/lib/show_mem.c
97047index adc98e18..0ce83c2 100644
97048--- a/lib/show_mem.c
97049+++ b/lib/show_mem.c
97050@@ -49,6 +49,6 @@ void show_mem(unsigned int filter)
97051 quicklist_total_size());
97052 #endif
97053 #ifdef CONFIG_MEMORY_FAILURE
97054- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
97055+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
97056 #endif
97057 }
97058diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
97059index e0af6ff..fcc9f15 100644
97060--- a/lib/strncpy_from_user.c
97061+++ b/lib/strncpy_from_user.c
97062@@ -22,7 +22,7 @@
97063 */
97064 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
97065 {
97066- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97067+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97068 long res = 0;
97069
97070 /*
97071diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
97072index a28df52..3d55877 100644
97073--- a/lib/strnlen_user.c
97074+++ b/lib/strnlen_user.c
97075@@ -26,7 +26,7 @@
97076 */
97077 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
97078 {
97079- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97080+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97081 long align, res = 0;
97082 unsigned long c;
97083
97084diff --git a/lib/swiotlb.c b/lib/swiotlb.c
97085index 4abda07..b9d3765 100644
97086--- a/lib/swiotlb.c
97087+++ b/lib/swiotlb.c
97088@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
97089
97090 void
97091 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
97092- dma_addr_t dev_addr)
97093+ dma_addr_t dev_addr, struct dma_attrs *attrs)
97094 {
97095 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
97096
97097diff --git a/lib/usercopy.c b/lib/usercopy.c
97098index 4f5b1dd..7cab418 100644
97099--- a/lib/usercopy.c
97100+++ b/lib/usercopy.c
97101@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
97102 WARN(1, "Buffer overflow detected!\n");
97103 }
97104 EXPORT_SYMBOL(copy_from_user_overflow);
97105+
97106+void copy_to_user_overflow(void)
97107+{
97108+ WARN(1, "Buffer overflow detected!\n");
97109+}
97110+EXPORT_SYMBOL(copy_to_user_overflow);
97111diff --git a/lib/vsprintf.c b/lib/vsprintf.c
97112index b235c96..343ffc1 100644
97113--- a/lib/vsprintf.c
97114+++ b/lib/vsprintf.c
97115@@ -16,6 +16,9 @@
97116 * - scnprintf and vscnprintf
97117 */
97118
97119+#ifdef CONFIG_GRKERNSEC_HIDESYM
97120+#define __INCLUDED_BY_HIDESYM 1
97121+#endif
97122 #include <stdarg.h>
97123 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
97124 #include <linux/types.h>
97125@@ -626,7 +629,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
97126 #ifdef CONFIG_KALLSYMS
97127 if (*fmt == 'B')
97128 sprint_backtrace(sym, value);
97129- else if (*fmt != 'f' && *fmt != 's')
97130+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
97131 sprint_symbol(sym, value);
97132 else
97133 sprint_symbol_no_offset(sym, value);
97134@@ -1322,7 +1325,11 @@ char *address_val(char *buf, char *end, const void *addr,
97135 return number(buf, end, num, spec);
97136 }
97137
97138+#ifdef CONFIG_GRKERNSEC_HIDESYM
97139+int kptr_restrict __read_mostly = 2;
97140+#else
97141 int kptr_restrict __read_mostly;
97142+#endif
97143
97144 /*
97145 * Show a '%p' thing. A kernel extension is that the '%p' is followed
97146@@ -1333,8 +1340,10 @@ int kptr_restrict __read_mostly;
97147 *
97148 * - 'F' For symbolic function descriptor pointers with offset
97149 * - 'f' For simple symbolic function names without offset
97150+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
97151 * - 'S' For symbolic direct pointers with offset
97152 * - 's' For symbolic direct pointers without offset
97153+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
97154 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
97155 * - 'B' For backtraced symbolic direct pointers with offset
97156 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
97157@@ -1417,12 +1426,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
97158
97159 if (!ptr && *fmt != 'K') {
97160 /*
97161- * Print (null) with the same width as a pointer so it makes
97162+ * Print (nil) with the same width as a pointer so it makes
97163 * tabular output look nice.
97164 */
97165 if (spec.field_width == -1)
97166 spec.field_width = default_width;
97167- return string(buf, end, "(null)", spec);
97168+ return string(buf, end, "(nil)", spec);
97169 }
97170
97171 switch (*fmt) {
97172@@ -1432,6 +1441,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
97173 /* Fallthrough */
97174 case 'S':
97175 case 's':
97176+#ifdef CONFIG_GRKERNSEC_HIDESYM
97177+ break;
97178+#else
97179+ return symbol_string(buf, end, ptr, spec, fmt);
97180+#endif
97181+ case 'X':
97182+ ptr = dereference_function_descriptor(ptr);
97183+ case 'A':
97184 case 'B':
97185 return symbol_string(buf, end, ptr, spec, fmt);
97186 case 'R':
97187@@ -1496,6 +1513,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
97188 va_end(va);
97189 return buf;
97190 }
97191+ case 'P':
97192+ break;
97193 case 'K':
97194 /*
97195 * %pK cannot be used in IRQ context because its test
97196@@ -1553,6 +1572,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
97197 ((const struct file *)ptr)->f_path.dentry,
97198 spec, fmt);
97199 }
97200+
97201+#ifdef CONFIG_GRKERNSEC_HIDESYM
97202+ /* 'P' = approved pointers to copy to userland,
97203+ as in the /proc/kallsyms case, as we make it display nothing
97204+ for non-root users, and the real contents for root users
97205+ 'X' = approved simple symbols
97206+ Also ignore 'K' pointers, since we force their NULLing for non-root users
97207+ above
97208+ */
97209+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
97210+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
97211+ dump_stack();
97212+ ptr = NULL;
97213+ }
97214+#endif
97215+
97216 spec.flags |= SMALL;
97217 if (spec.field_width == -1) {
97218 spec.field_width = default_width;
97219@@ -2254,11 +2289,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
97220 typeof(type) value; \
97221 if (sizeof(type) == 8) { \
97222 args = PTR_ALIGN(args, sizeof(u32)); \
97223- *(u32 *)&value = *(u32 *)args; \
97224- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
97225+ *(u32 *)&value = *(const u32 *)args; \
97226+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
97227 } else { \
97228 args = PTR_ALIGN(args, sizeof(type)); \
97229- value = *(typeof(type) *)args; \
97230+ value = *(const typeof(type) *)args; \
97231 } \
97232 args += sizeof(type); \
97233 value; \
97234@@ -2321,7 +2356,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
97235 case FORMAT_TYPE_STR: {
97236 const char *str_arg = args;
97237 args += strlen(str_arg) + 1;
97238- str = string(str, end, (char *)str_arg, spec);
97239+ str = string(str, end, str_arg, spec);
97240 break;
97241 }
97242
97243diff --git a/localversion-grsec b/localversion-grsec
97244new file mode 100644
97245index 0000000..7cd6065
97246--- /dev/null
97247+++ b/localversion-grsec
97248@@ -0,0 +1 @@
97249+-grsec
97250diff --git a/mm/Kconfig b/mm/Kconfig
97251index a03131b..1b1bafb 100644
97252--- a/mm/Kconfig
97253+++ b/mm/Kconfig
97254@@ -342,10 +342,11 @@ config KSM
97255 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
97256
97257 config DEFAULT_MMAP_MIN_ADDR
97258- int "Low address space to protect from user allocation"
97259+ int "Low address space to protect from user allocation"
97260 depends on MMU
97261- default 4096
97262- help
97263+ default 32768 if ALPHA || ARM || PARISC || SPARC32
97264+ default 65536
97265+ help
97266 This is the portion of low virtual memory which should be protected
97267 from userspace allocation. Keeping a user from writing to low pages
97268 can help reduce the impact of kernel NULL pointer bugs.
97269@@ -376,7 +377,7 @@ config MEMORY_FAILURE
97270
97271 config HWPOISON_INJECT
97272 tristate "HWPoison pages injector"
97273- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
97274+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
97275 select PROC_PAGE_MONITOR
97276
97277 config NOMMU_INITIAL_TRIM_EXCESS
97278diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
97279index 957d3da..1d34e20 100644
97280--- a/mm/Kconfig.debug
97281+++ b/mm/Kconfig.debug
97282@@ -10,6 +10,7 @@ config PAGE_EXTENSION
97283 config DEBUG_PAGEALLOC
97284 bool "Debug page memory allocations"
97285 depends on DEBUG_KERNEL
97286+ depends on !PAX_MEMORY_SANITIZE
97287 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
97288 depends on !KMEMCHECK
97289 select PAGE_EXTENSION
97290diff --git a/mm/backing-dev.c b/mm/backing-dev.c
97291index 6dc4580..e031ec1 100644
97292--- a/mm/backing-dev.c
97293+++ b/mm/backing-dev.c
97294@@ -12,7 +12,7 @@
97295 #include <linux/device.h>
97296 #include <trace/events/writeback.h>
97297
97298-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
97299+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
97300
97301 struct backing_dev_info noop_backing_dev_info = {
97302 .name = "noop",
97303@@ -474,7 +474,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
97304 return err;
97305
97306 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
97307- atomic_long_inc_return(&bdi_seq));
97308+ atomic_long_inc_return_unchecked(&bdi_seq));
97309 if (err) {
97310 bdi_destroy(bdi);
97311 return err;
97312diff --git a/mm/filemap.c b/mm/filemap.c
97313index ad72420..0a20ef2 100644
97314--- a/mm/filemap.c
97315+++ b/mm/filemap.c
97316@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
97317 struct address_space *mapping = file->f_mapping;
97318
97319 if (!mapping->a_ops->readpage)
97320- return -ENOEXEC;
97321+ return -ENODEV;
97322 file_accessed(file);
97323 vma->vm_ops = &generic_file_vm_ops;
97324 return 0;
97325@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
97326 *pos = i_size_read(inode);
97327
97328 if (limit != RLIM_INFINITY) {
97329+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
97330 if (*pos >= limit) {
97331 send_sig(SIGXFSZ, current, 0);
97332 return -EFBIG;
97333diff --git a/mm/gup.c b/mm/gup.c
97334index a6e24e2..72dd2cf 100644
97335--- a/mm/gup.c
97336+++ b/mm/gup.c
97337@@ -265,11 +265,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
97338 unsigned int fault_flags = 0;
97339 int ret;
97340
97341- /* For mlock, just skip the stack guard page. */
97342- if ((*flags & FOLL_MLOCK) &&
97343- (stack_guard_page_start(vma, address) ||
97344- stack_guard_page_end(vma, address + PAGE_SIZE)))
97345- return -ENOENT;
97346 if (*flags & FOLL_WRITE)
97347 fault_flags |= FAULT_FLAG_WRITE;
97348 if (nonblocking)
97349@@ -435,14 +430,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
97350 if (!(gup_flags & FOLL_FORCE))
97351 gup_flags |= FOLL_NUMA;
97352
97353- do {
97354+ while (nr_pages) {
97355 struct page *page;
97356 unsigned int foll_flags = gup_flags;
97357 unsigned int page_increm;
97358
97359 /* first iteration or cross vma bound */
97360 if (!vma || start >= vma->vm_end) {
97361- vma = find_extend_vma(mm, start);
97362+ vma = find_vma(mm, start);
97363 if (!vma && in_gate_area(mm, start)) {
97364 int ret;
97365 ret = get_gate_page(mm, start & PAGE_MASK,
97366@@ -454,7 +449,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
97367 goto next_page;
97368 }
97369
97370- if (!vma || check_vma_flags(vma, gup_flags))
97371+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
97372 return i ? : -EFAULT;
97373 if (is_vm_hugetlb_page(vma)) {
97374 i = follow_hugetlb_page(mm, vma, pages, vmas,
97375@@ -509,7 +504,7 @@ next_page:
97376 i += page_increm;
97377 start += page_increm * PAGE_SIZE;
97378 nr_pages -= page_increm;
97379- } while (nr_pages);
97380+ }
97381 return i;
97382 }
97383 EXPORT_SYMBOL(__get_user_pages);
97384diff --git a/mm/highmem.c b/mm/highmem.c
97385index 123bcd3..0de52ba 100644
97386--- a/mm/highmem.c
97387+++ b/mm/highmem.c
97388@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
97389 * So no dangers, even with speculative execution.
97390 */
97391 page = pte_page(pkmap_page_table[i]);
97392+ pax_open_kernel();
97393 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
97394-
97395+ pax_close_kernel();
97396 set_page_address(page, NULL);
97397 need_flush = 1;
97398 }
97399@@ -259,9 +260,11 @@ start:
97400 }
97401 }
97402 vaddr = PKMAP_ADDR(last_pkmap_nr);
97403+
97404+ pax_open_kernel();
97405 set_pte_at(&init_mm, vaddr,
97406 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
97407-
97408+ pax_close_kernel();
97409 pkmap_count[last_pkmap_nr] = 1;
97410 set_page_address(page, (void *)vaddr);
97411
97412diff --git a/mm/hugetlb.c b/mm/hugetlb.c
97413index caad3c5..4f68807 100644
97414--- a/mm/hugetlb.c
97415+++ b/mm/hugetlb.c
97416@@ -2260,6 +2260,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
97417 struct ctl_table *table, int write,
97418 void __user *buffer, size_t *length, loff_t *ppos)
97419 {
97420+ ctl_table_no_const t;
97421 struct hstate *h = &default_hstate;
97422 unsigned long tmp = h->max_huge_pages;
97423 int ret;
97424@@ -2267,9 +2268,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
97425 if (!hugepages_supported())
97426 return -ENOTSUPP;
97427
97428- table->data = &tmp;
97429- table->maxlen = sizeof(unsigned long);
97430- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
97431+ t = *table;
97432+ t.data = &tmp;
97433+ t.maxlen = sizeof(unsigned long);
97434+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
97435 if (ret)
97436 goto out;
97437
97438@@ -2304,6 +2306,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
97439 struct hstate *h = &default_hstate;
97440 unsigned long tmp;
97441 int ret;
97442+ ctl_table_no_const hugetlb_table;
97443
97444 if (!hugepages_supported())
97445 return -ENOTSUPP;
97446@@ -2313,9 +2316,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
97447 if (write && hstate_is_gigantic(h))
97448 return -EINVAL;
97449
97450- table->data = &tmp;
97451- table->maxlen = sizeof(unsigned long);
97452- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
97453+ hugetlb_table = *table;
97454+ hugetlb_table.data = &tmp;
97455+ hugetlb_table.maxlen = sizeof(unsigned long);
97456+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
97457 if (ret)
97458 goto out;
97459
97460@@ -2800,6 +2804,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
97461 i_mmap_unlock_write(mapping);
97462 }
97463
97464+#ifdef CONFIG_PAX_SEGMEXEC
97465+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
97466+{
97467+ struct mm_struct *mm = vma->vm_mm;
97468+ struct vm_area_struct *vma_m;
97469+ unsigned long address_m;
97470+ pte_t *ptep_m;
97471+
97472+ vma_m = pax_find_mirror_vma(vma);
97473+ if (!vma_m)
97474+ return;
97475+
97476+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
97477+ address_m = address + SEGMEXEC_TASK_SIZE;
97478+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
97479+ get_page(page_m);
97480+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
97481+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
97482+}
97483+#endif
97484+
97485 /*
97486 * Hugetlb_cow() should be called with page lock of the original hugepage held.
97487 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
97488@@ -2912,6 +2937,11 @@ retry_avoidcopy:
97489 make_huge_pte(vma, new_page, 1));
97490 page_remove_rmap(old_page);
97491 hugepage_add_new_anon_rmap(new_page, vma, address);
97492+
97493+#ifdef CONFIG_PAX_SEGMEXEC
97494+ pax_mirror_huge_pte(vma, address, new_page);
97495+#endif
97496+
97497 /* Make the old page be freed below */
97498 new_page = old_page;
97499 }
97500@@ -3072,6 +3102,10 @@ retry:
97501 && (vma->vm_flags & VM_SHARED)));
97502 set_huge_pte_at(mm, address, ptep, new_pte);
97503
97504+#ifdef CONFIG_PAX_SEGMEXEC
97505+ pax_mirror_huge_pte(vma, address, page);
97506+#endif
97507+
97508 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
97509 /* Optimization, do the COW without a second fault */
97510 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
97511@@ -3139,6 +3173,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97512 struct address_space *mapping;
97513 int need_wait_lock = 0;
97514
97515+#ifdef CONFIG_PAX_SEGMEXEC
97516+ struct vm_area_struct *vma_m;
97517+#endif
97518+
97519 address &= huge_page_mask(h);
97520
97521 ptep = huge_pte_offset(mm, address);
97522@@ -3152,6 +3190,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97523 VM_FAULT_SET_HINDEX(hstate_index(h));
97524 }
97525
97526+#ifdef CONFIG_PAX_SEGMEXEC
97527+ vma_m = pax_find_mirror_vma(vma);
97528+ if (vma_m) {
97529+ unsigned long address_m;
97530+
97531+ if (vma->vm_start > vma_m->vm_start) {
97532+ address_m = address;
97533+ address -= SEGMEXEC_TASK_SIZE;
97534+ vma = vma_m;
97535+ h = hstate_vma(vma);
97536+ } else
97537+ address_m = address + SEGMEXEC_TASK_SIZE;
97538+
97539+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
97540+ return VM_FAULT_OOM;
97541+ address_m &= HPAGE_MASK;
97542+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
97543+ }
97544+#endif
97545+
97546 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
97547 if (!ptep)
97548 return VM_FAULT_OOM;
97549diff --git a/mm/internal.h b/mm/internal.h
97550index a96da5b..42ebd54 100644
97551--- a/mm/internal.h
97552+++ b/mm/internal.h
97553@@ -156,6 +156,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
97554
97555 extern int __isolate_free_page(struct page *page, unsigned int order);
97556 extern void __free_pages_bootmem(struct page *page, unsigned int order);
97557+extern void free_compound_page(struct page *page);
97558 extern void prep_compound_page(struct page *page, unsigned long order);
97559 #ifdef CONFIG_MEMORY_FAILURE
97560 extern bool is_free_buddy_page(struct page *page);
97561@@ -411,7 +412,7 @@ extern u32 hwpoison_filter_enable;
97562
97563 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
97564 unsigned long, unsigned long,
97565- unsigned long, unsigned long);
97566+ unsigned long, unsigned long) __intentional_overflow(-1);
97567
97568 extern void set_pageblock_order(void);
97569 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
97570diff --git a/mm/kmemleak.c b/mm/kmemleak.c
97571index 5405aff..483406d 100644
97572--- a/mm/kmemleak.c
97573+++ b/mm/kmemleak.c
97574@@ -365,7 +365,7 @@ static void print_unreferenced(struct seq_file *seq,
97575
97576 for (i = 0; i < object->trace_len; i++) {
97577 void *ptr = (void *)object->trace[i];
97578- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
97579+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
97580 }
97581 }
97582
97583@@ -1911,7 +1911,7 @@ static int __init kmemleak_late_init(void)
97584 return -ENOMEM;
97585 }
97586
97587- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
97588+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
97589 &kmemleak_fops);
97590 if (!dentry)
97591 pr_warning("Failed to create the debugfs kmemleak file\n");
97592diff --git a/mm/maccess.c b/mm/maccess.c
97593index d53adf9..03a24bf 100644
97594--- a/mm/maccess.c
97595+++ b/mm/maccess.c
97596@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
97597 set_fs(KERNEL_DS);
97598 pagefault_disable();
97599 ret = __copy_from_user_inatomic(dst,
97600- (__force const void __user *)src, size);
97601+ (const void __force_user *)src, size);
97602 pagefault_enable();
97603 set_fs(old_fs);
97604
97605@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
97606
97607 set_fs(KERNEL_DS);
97608 pagefault_disable();
97609- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
97610+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
97611 pagefault_enable();
97612 set_fs(old_fs);
97613
97614diff --git a/mm/madvise.c b/mm/madvise.c
97615index d551475..8fdd7f3 100644
97616--- a/mm/madvise.c
97617+++ b/mm/madvise.c
97618@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
97619 pgoff_t pgoff;
97620 unsigned long new_flags = vma->vm_flags;
97621
97622+#ifdef CONFIG_PAX_SEGMEXEC
97623+ struct vm_area_struct *vma_m;
97624+#endif
97625+
97626 switch (behavior) {
97627 case MADV_NORMAL:
97628 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
97629@@ -126,6 +130,13 @@ success:
97630 /*
97631 * vm_flags is protected by the mmap_sem held in write mode.
97632 */
97633+
97634+#ifdef CONFIG_PAX_SEGMEXEC
97635+ vma_m = pax_find_mirror_vma(vma);
97636+ if (vma_m)
97637+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
97638+#endif
97639+
97640 vma->vm_flags = new_flags;
97641
97642 out:
97643@@ -277,11 +288,27 @@ static long madvise_dontneed(struct vm_area_struct *vma,
97644 struct vm_area_struct **prev,
97645 unsigned long start, unsigned long end)
97646 {
97647+
97648+#ifdef CONFIG_PAX_SEGMEXEC
97649+ struct vm_area_struct *vma_m;
97650+#endif
97651+
97652 *prev = vma;
97653 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
97654 return -EINVAL;
97655
97656 zap_page_range(vma, start, end - start, NULL);
97657+
97658+#ifdef CONFIG_PAX_SEGMEXEC
97659+ vma_m = pax_find_mirror_vma(vma);
97660+ if (vma_m) {
97661+ if (vma_m->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
97662+ return -EINVAL;
97663+
97664+ zap_page_range(vma_m, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
97665+ }
97666+#endif
97667+
97668 return 0;
97669 }
97670
97671@@ -484,6 +511,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
97672 if (end < start)
97673 return error;
97674
97675+#ifdef CONFIG_PAX_SEGMEXEC
97676+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
97677+ if (end > SEGMEXEC_TASK_SIZE)
97678+ return error;
97679+ } else
97680+#endif
97681+
97682+ if (end > TASK_SIZE)
97683+ return error;
97684+
97685 error = 0;
97686 if (end == start)
97687 return error;
97688diff --git a/mm/memory-failure.c b/mm/memory-failure.c
97689index 72a5224..51ba846 100644
97690--- a/mm/memory-failure.c
97691+++ b/mm/memory-failure.c
97692@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
97693
97694 int sysctl_memory_failure_recovery __read_mostly = 1;
97695
97696-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
97697+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
97698
97699 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
97700
97701@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
97702 pfn, t->comm, t->pid);
97703 si.si_signo = SIGBUS;
97704 si.si_errno = 0;
97705- si.si_addr = (void *)addr;
97706+ si.si_addr = (void __user *)addr;
97707 #ifdef __ARCH_SI_TRAPNO
97708 si.si_trapno = trapno;
97709 #endif
97710@@ -779,7 +779,7 @@ static struct page_state {
97711 unsigned long res;
97712 char *msg;
97713 int (*action)(struct page *p, unsigned long pfn);
97714-} error_states[] = {
97715+} __do_const error_states[] = {
97716 { reserved, reserved, "reserved kernel", me_kernel },
97717 /*
97718 * free pages are specially detected outside this table:
97719@@ -1087,7 +1087,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
97720 nr_pages = 1 << compound_order(hpage);
97721 else /* normal page or thp */
97722 nr_pages = 1;
97723- atomic_long_add(nr_pages, &num_poisoned_pages);
97724+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
97725
97726 /*
97727 * We need/can do nothing about count=0 pages.
97728@@ -1116,7 +1116,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
97729 if (PageHWPoison(hpage)) {
97730 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
97731 || (p != hpage && TestSetPageHWPoison(hpage))) {
97732- atomic_long_sub(nr_pages, &num_poisoned_pages);
97733+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
97734 unlock_page(hpage);
97735 return 0;
97736 }
97737@@ -1184,14 +1184,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
97738 */
97739 if (!PageHWPoison(p)) {
97740 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
97741- atomic_long_sub(nr_pages, &num_poisoned_pages);
97742+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
97743 put_page(hpage);
97744 res = 0;
97745 goto out;
97746 }
97747 if (hwpoison_filter(p)) {
97748 if (TestClearPageHWPoison(p))
97749- atomic_long_sub(nr_pages, &num_poisoned_pages);
97750+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
97751 unlock_page(hpage);
97752 put_page(hpage);
97753 return 0;
97754@@ -1421,7 +1421,7 @@ int unpoison_memory(unsigned long pfn)
97755 return 0;
97756 }
97757 if (TestClearPageHWPoison(p))
97758- atomic_long_dec(&num_poisoned_pages);
97759+ atomic_long_dec_unchecked(&num_poisoned_pages);
97760 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
97761 return 0;
97762 }
97763@@ -1435,7 +1435,7 @@ int unpoison_memory(unsigned long pfn)
97764 */
97765 if (TestClearPageHWPoison(page)) {
97766 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
97767- atomic_long_sub(nr_pages, &num_poisoned_pages);
97768+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
97769 freeit = 1;
97770 if (PageHuge(page))
97771 clear_page_hwpoison_huge_page(page);
97772@@ -1560,11 +1560,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
97773 if (PageHuge(page)) {
97774 set_page_hwpoison_huge_page(hpage);
97775 dequeue_hwpoisoned_huge_page(hpage);
97776- atomic_long_add(1 << compound_order(hpage),
97777+ atomic_long_add_unchecked(1 << compound_order(hpage),
97778 &num_poisoned_pages);
97779 } else {
97780 SetPageHWPoison(page);
97781- atomic_long_inc(&num_poisoned_pages);
97782+ atomic_long_inc_unchecked(&num_poisoned_pages);
97783 }
97784 }
97785 return ret;
97786@@ -1603,7 +1603,7 @@ static int __soft_offline_page(struct page *page, int flags)
97787 put_page(page);
97788 pr_info("soft_offline: %#lx: invalidated\n", pfn);
97789 SetPageHWPoison(page);
97790- atomic_long_inc(&num_poisoned_pages);
97791+ atomic_long_inc_unchecked(&num_poisoned_pages);
97792 return 0;
97793 }
97794
97795@@ -1652,7 +1652,7 @@ static int __soft_offline_page(struct page *page, int flags)
97796 if (!is_free_buddy_page(page))
97797 pr_info("soft offline: %#lx: page leaked\n",
97798 pfn);
97799- atomic_long_inc(&num_poisoned_pages);
97800+ atomic_long_inc_unchecked(&num_poisoned_pages);
97801 }
97802 } else {
97803 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
97804@@ -1722,11 +1722,11 @@ int soft_offline_page(struct page *page, int flags)
97805 if (PageHuge(page)) {
97806 set_page_hwpoison_huge_page(hpage);
97807 if (!dequeue_hwpoisoned_huge_page(hpage))
97808- atomic_long_add(1 << compound_order(hpage),
97809+ atomic_long_add_unchecked(1 << compound_order(hpage),
97810 &num_poisoned_pages);
97811 } else {
97812 if (!TestSetPageHWPoison(page))
97813- atomic_long_inc(&num_poisoned_pages);
97814+ atomic_long_inc_unchecked(&num_poisoned_pages);
97815 }
97816 }
97817 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
97818diff --git a/mm/memory.c b/mm/memory.c
97819index 97839f5..4bc5530 100644
97820--- a/mm/memory.c
97821+++ b/mm/memory.c
97822@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
97823 free_pte_range(tlb, pmd, addr);
97824 } while (pmd++, addr = next, addr != end);
97825
97826+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
97827 start &= PUD_MASK;
97828 if (start < floor)
97829 return;
97830@@ -429,6 +430,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
97831 pud_clear(pud);
97832 pmd_free_tlb(tlb, pmd, start);
97833 mm_dec_nr_pmds(tlb->mm);
97834+#endif
97835 }
97836
97837 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
97838@@ -448,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
97839 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
97840 } while (pud++, addr = next, addr != end);
97841
97842+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
97843 start &= PGDIR_MASK;
97844 if (start < floor)
97845 return;
97846@@ -462,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
97847 pud = pud_offset(pgd, start);
97848 pgd_clear(pgd);
97849 pud_free_tlb(tlb, pud, start);
97850+#endif
97851+
97852 }
97853
97854 /*
97855@@ -691,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
97856 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
97857 */
97858 if (vma->vm_ops)
97859- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
97860+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
97861 vma->vm_ops->fault);
97862 if (vma->vm_file)
97863- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
97864+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
97865 vma->vm_file->f_op->mmap);
97866 dump_stack();
97867 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
97868@@ -1464,6 +1469,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
97869 page_add_file_rmap(page);
97870 set_pte_at(mm, addr, pte, mk_pte(page, prot));
97871
97872+#ifdef CONFIG_PAX_SEGMEXEC
97873+ pax_mirror_file_pte(vma, addr, page, ptl);
97874+#endif
97875+
97876 retval = 0;
97877 pte_unmap_unlock(pte, ptl);
97878 return retval;
97879@@ -1508,9 +1517,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
97880 if (!page_count(page))
97881 return -EINVAL;
97882 if (!(vma->vm_flags & VM_MIXEDMAP)) {
97883+
97884+#ifdef CONFIG_PAX_SEGMEXEC
97885+ struct vm_area_struct *vma_m;
97886+#endif
97887+
97888 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
97889 BUG_ON(vma->vm_flags & VM_PFNMAP);
97890 vma->vm_flags |= VM_MIXEDMAP;
97891+
97892+#ifdef CONFIG_PAX_SEGMEXEC
97893+ vma_m = pax_find_mirror_vma(vma);
97894+ if (vma_m)
97895+ vma_m->vm_flags |= VM_MIXEDMAP;
97896+#endif
97897+
97898 }
97899 return insert_page(vma, addr, page, vma->vm_page_prot);
97900 }
97901@@ -1593,6 +1614,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
97902 unsigned long pfn)
97903 {
97904 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
97905+ BUG_ON(vma->vm_mirror);
97906
97907 if (addr < vma->vm_start || addr >= vma->vm_end)
97908 return -EFAULT;
97909@@ -1840,7 +1862,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
97910
97911 BUG_ON(pud_huge(*pud));
97912
97913- pmd = pmd_alloc(mm, pud, addr);
97914+ pmd = (mm == &init_mm) ?
97915+ pmd_alloc_kernel(mm, pud, addr) :
97916+ pmd_alloc(mm, pud, addr);
97917 if (!pmd)
97918 return -ENOMEM;
97919 do {
97920@@ -1860,7 +1884,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
97921 unsigned long next;
97922 int err;
97923
97924- pud = pud_alloc(mm, pgd, addr);
97925+ pud = (mm == &init_mm) ?
97926+ pud_alloc_kernel(mm, pgd, addr) :
97927+ pud_alloc(mm, pgd, addr);
97928 if (!pud)
97929 return -ENOMEM;
97930 do {
97931@@ -1982,6 +2008,185 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
97932 return ret;
97933 }
97934
97935+#ifdef CONFIG_PAX_SEGMEXEC
97936+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
97937+{
97938+ struct mm_struct *mm = vma->vm_mm;
97939+ spinlock_t *ptl;
97940+ pte_t *pte, entry;
97941+
97942+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
97943+ entry = *pte;
97944+ if (!pte_present(entry)) {
97945+ if (!pte_none(entry)) {
97946+ free_swap_and_cache(pte_to_swp_entry(entry));
97947+ pte_clear_not_present_full(mm, address, pte, 0);
97948+ }
97949+ } else {
97950+ struct page *page;
97951+
97952+ flush_cache_page(vma, address, pte_pfn(entry));
97953+ entry = ptep_clear_flush(vma, address, pte);
97954+ BUG_ON(pte_dirty(entry));
97955+ page = vm_normal_page(vma, address, entry);
97956+ if (page) {
97957+ update_hiwater_rss(mm);
97958+ if (PageAnon(page))
97959+ dec_mm_counter_fast(mm, MM_ANONPAGES);
97960+ else
97961+ dec_mm_counter_fast(mm, MM_FILEPAGES);
97962+ page_remove_rmap(page);
97963+ page_cache_release(page);
97964+ }
97965+ }
97966+ pte_unmap_unlock(pte, ptl);
97967+}
97968+
97969+/* PaX: if vma is mirrored, synchronize the mirror's PTE
97970+ *
97971+ * the ptl of the lower mapped page is held on entry and is not released on exit
97972+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
97973+ */
97974+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
97975+{
97976+ struct mm_struct *mm = vma->vm_mm;
97977+ unsigned long address_m;
97978+ spinlock_t *ptl_m;
97979+ struct vm_area_struct *vma_m;
97980+ pmd_t *pmd_m;
97981+ pte_t *pte_m, entry_m;
97982+
97983+ BUG_ON(!page_m || !PageAnon(page_m));
97984+
97985+ vma_m = pax_find_mirror_vma(vma);
97986+ if (!vma_m)
97987+ return;
97988+
97989+ BUG_ON(!PageLocked(page_m));
97990+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
97991+ address_m = address + SEGMEXEC_TASK_SIZE;
97992+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
97993+ pte_m = pte_offset_map(pmd_m, address_m);
97994+ ptl_m = pte_lockptr(mm, pmd_m);
97995+ if (ptl != ptl_m) {
97996+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
97997+ if (!pte_none(*pte_m))
97998+ goto out;
97999+ }
98000+
98001+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
98002+ page_cache_get(page_m);
98003+ page_add_anon_rmap(page_m, vma_m, address_m);
98004+ inc_mm_counter_fast(mm, MM_ANONPAGES);
98005+ set_pte_at(mm, address_m, pte_m, entry_m);
98006+ update_mmu_cache(vma_m, address_m, pte_m);
98007+out:
98008+ if (ptl != ptl_m)
98009+ spin_unlock(ptl_m);
98010+ pte_unmap(pte_m);
98011+ unlock_page(page_m);
98012+}
98013+
98014+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
98015+{
98016+ struct mm_struct *mm = vma->vm_mm;
98017+ unsigned long address_m;
98018+ spinlock_t *ptl_m;
98019+ struct vm_area_struct *vma_m;
98020+ pmd_t *pmd_m;
98021+ pte_t *pte_m, entry_m;
98022+
98023+ BUG_ON(!page_m || PageAnon(page_m));
98024+
98025+ vma_m = pax_find_mirror_vma(vma);
98026+ if (!vma_m)
98027+ return;
98028+
98029+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
98030+ address_m = address + SEGMEXEC_TASK_SIZE;
98031+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
98032+ pte_m = pte_offset_map(pmd_m, address_m);
98033+ ptl_m = pte_lockptr(mm, pmd_m);
98034+ if (ptl != ptl_m) {
98035+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
98036+ if (!pte_none(*pte_m))
98037+ goto out;
98038+ }
98039+
98040+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
98041+ page_cache_get(page_m);
98042+ page_add_file_rmap(page_m);
98043+ inc_mm_counter_fast(mm, MM_FILEPAGES);
98044+ set_pte_at(mm, address_m, pte_m, entry_m);
98045+ update_mmu_cache(vma_m, address_m, pte_m);
98046+out:
98047+ if (ptl != ptl_m)
98048+ spin_unlock(ptl_m);
98049+ pte_unmap(pte_m);
98050+}
98051+
98052+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
98053+{
98054+ struct mm_struct *mm = vma->vm_mm;
98055+ unsigned long address_m;
98056+ spinlock_t *ptl_m;
98057+ struct vm_area_struct *vma_m;
98058+ pmd_t *pmd_m;
98059+ pte_t *pte_m, entry_m;
98060+
98061+ vma_m = pax_find_mirror_vma(vma);
98062+ if (!vma_m)
98063+ return;
98064+
98065+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
98066+ address_m = address + SEGMEXEC_TASK_SIZE;
98067+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
98068+ pte_m = pte_offset_map(pmd_m, address_m);
98069+ ptl_m = pte_lockptr(mm, pmd_m);
98070+ if (ptl != ptl_m) {
98071+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
98072+ if (!pte_none(*pte_m))
98073+ goto out;
98074+ }
98075+
98076+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
98077+ set_pte_at(mm, address_m, pte_m, entry_m);
98078+out:
98079+ if (ptl != ptl_m)
98080+ spin_unlock(ptl_m);
98081+ pte_unmap(pte_m);
98082+}
98083+
98084+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
98085+{
98086+ struct page *page_m;
98087+ pte_t entry;
98088+
98089+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
98090+ goto out;
98091+
98092+ entry = *pte;
98093+ page_m = vm_normal_page(vma, address, entry);
98094+ if (!page_m)
98095+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
98096+ else if (PageAnon(page_m)) {
98097+ if (pax_find_mirror_vma(vma)) {
98098+ pte_unmap_unlock(pte, ptl);
98099+ lock_page(page_m);
98100+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
98101+ if (pte_same(entry, *pte))
98102+ pax_mirror_anon_pte(vma, address, page_m, ptl);
98103+ else
98104+ unlock_page(page_m);
98105+ }
98106+ } else
98107+ pax_mirror_file_pte(vma, address, page_m, ptl);
98108+
98109+out:
98110+ pte_unmap_unlock(pte, ptl);
98111+}
98112+#endif
98113+
98114 /*
98115 * This routine handles present pages, when users try to write
98116 * to a shared page. It is done by copying the page to a new address
98117@@ -2172,6 +2377,12 @@ gotten:
98118 */
98119 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
98120 if (likely(pte_same(*page_table, orig_pte))) {
98121+
98122+#ifdef CONFIG_PAX_SEGMEXEC
98123+ if (pax_find_mirror_vma(vma))
98124+ BUG_ON(!trylock_page(new_page));
98125+#endif
98126+
98127 if (old_page) {
98128 if (!PageAnon(old_page)) {
98129 dec_mm_counter_fast(mm, MM_FILEPAGES);
98130@@ -2225,6 +2436,10 @@ gotten:
98131 page_remove_rmap(old_page);
98132 }
98133
98134+#ifdef CONFIG_PAX_SEGMEXEC
98135+ pax_mirror_anon_pte(vma, address, new_page, ptl);
98136+#endif
98137+
98138 /* Free the old page.. */
98139 new_page = old_page;
98140 ret |= VM_FAULT_WRITE;
98141@@ -2483,6 +2698,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
98142 swap_free(entry);
98143 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
98144 try_to_free_swap(page);
98145+
98146+#ifdef CONFIG_PAX_SEGMEXEC
98147+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
98148+#endif
98149+
98150 unlock_page(page);
98151 if (page != swapcache) {
98152 /*
98153@@ -2506,6 +2726,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
98154
98155 /* No need to invalidate - it was non-present before */
98156 update_mmu_cache(vma, address, page_table);
98157+
98158+#ifdef CONFIG_PAX_SEGMEXEC
98159+ pax_mirror_anon_pte(vma, address, page, ptl);
98160+#endif
98161+
98162 unlock:
98163 pte_unmap_unlock(page_table, ptl);
98164 out:
98165@@ -2525,40 +2750,6 @@ out_release:
98166 }
98167
98168 /*
98169- * This is like a special single-page "expand_{down|up}wards()",
98170- * except we must first make sure that 'address{-|+}PAGE_SIZE'
98171- * doesn't hit another vma.
98172- */
98173-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
98174-{
98175- address &= PAGE_MASK;
98176- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
98177- struct vm_area_struct *prev = vma->vm_prev;
98178-
98179- /*
98180- * Is there a mapping abutting this one below?
98181- *
98182- * That's only ok if it's the same stack mapping
98183- * that has gotten split..
98184- */
98185- if (prev && prev->vm_end == address)
98186- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
98187-
98188- return expand_downwards(vma, address - PAGE_SIZE);
98189- }
98190- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
98191- struct vm_area_struct *next = vma->vm_next;
98192-
98193- /* As VM_GROWSDOWN but s/below/above/ */
98194- if (next && next->vm_start == address + PAGE_SIZE)
98195- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
98196-
98197- return expand_upwards(vma, address + PAGE_SIZE);
98198- }
98199- return 0;
98200-}
98201-
98202-/*
98203 * We enter with non-exclusive mmap_sem (to exclude vma changes,
98204 * but allow concurrent faults), and pte mapped but not yet locked.
98205 * We return with mmap_sem still held, but pte unmapped and unlocked.
98206@@ -2568,27 +2759,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
98207 unsigned int flags)
98208 {
98209 struct mem_cgroup *memcg;
98210- struct page *page;
98211+ struct page *page = NULL;
98212 spinlock_t *ptl;
98213 pte_t entry;
98214
98215- pte_unmap(page_table);
98216-
98217- /* Check if we need to add a guard page to the stack */
98218- if (check_stack_guard_page(vma, address) < 0)
98219- return VM_FAULT_SIGSEGV;
98220-
98221- /* Use the zero-page for reads */
98222 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
98223 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
98224 vma->vm_page_prot));
98225- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
98226+ ptl = pte_lockptr(mm, pmd);
98227+ spin_lock(ptl);
98228 if (!pte_none(*page_table))
98229 goto unlock;
98230 goto setpte;
98231 }
98232
98233 /* Allocate our own private page. */
98234+ pte_unmap(page_table);
98235+
98236 if (unlikely(anon_vma_prepare(vma)))
98237 goto oom;
98238 page = alloc_zeroed_user_highpage_movable(vma, address);
98239@@ -2612,6 +2799,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
98240 if (!pte_none(*page_table))
98241 goto release;
98242
98243+#ifdef CONFIG_PAX_SEGMEXEC
98244+ if (pax_find_mirror_vma(vma))
98245+ BUG_ON(!trylock_page(page));
98246+#endif
98247+
98248 inc_mm_counter_fast(mm, MM_ANONPAGES);
98249 page_add_new_anon_rmap(page, vma, address);
98250 mem_cgroup_commit_charge(page, memcg, false);
98251@@ -2621,6 +2813,12 @@ setpte:
98252
98253 /* No need to invalidate - it was non-present before */
98254 update_mmu_cache(vma, address, page_table);
98255+
98256+#ifdef CONFIG_PAX_SEGMEXEC
98257+ if (page)
98258+ pax_mirror_anon_pte(vma, address, page, ptl);
98259+#endif
98260+
98261 unlock:
98262 pte_unmap_unlock(page_table, ptl);
98263 return 0;
98264@@ -2853,6 +3051,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
98265 return ret;
98266 }
98267 do_set_pte(vma, address, fault_page, pte, false, false);
98268+
98269+#ifdef CONFIG_PAX_SEGMEXEC
98270+ pax_mirror_file_pte(vma, address, fault_page, ptl);
98271+#endif
98272+
98273 unlock_page(fault_page);
98274 unlock_out:
98275 pte_unmap_unlock(pte, ptl);
98276@@ -2904,7 +3107,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
98277 }
98278 goto uncharge_out;
98279 }
98280+
98281+#ifdef CONFIG_PAX_SEGMEXEC
98282+ if (pax_find_mirror_vma(vma))
98283+ BUG_ON(!trylock_page(new_page));
98284+#endif
98285+
98286 do_set_pte(vma, address, new_page, pte, true, true);
98287+
98288+#ifdef CONFIG_PAX_SEGMEXEC
98289+ pax_mirror_anon_pte(vma, address, new_page, ptl);
98290+#endif
98291+
98292 mem_cgroup_commit_charge(new_page, memcg, false);
98293 lru_cache_add_active_or_unevictable(new_page, vma);
98294 pte_unmap_unlock(pte, ptl);
98295@@ -2962,6 +3176,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
98296 return ret;
98297 }
98298 do_set_pte(vma, address, fault_page, pte, true, false);
98299+
98300+#ifdef CONFIG_PAX_SEGMEXEC
98301+ pax_mirror_file_pte(vma, address, fault_page, ptl);
98302+#endif
98303+
98304 pte_unmap_unlock(pte, ptl);
98305
98306 if (set_page_dirty(fault_page))
98307@@ -3185,6 +3404,12 @@ static int handle_pte_fault(struct mm_struct *mm,
98308 if (flags & FAULT_FLAG_WRITE)
98309 flush_tlb_fix_spurious_fault(vma, address);
98310 }
98311+
98312+#ifdef CONFIG_PAX_SEGMEXEC
98313+ pax_mirror_pte(vma, address, pte, pmd, ptl);
98314+ return 0;
98315+#endif
98316+
98317 unlock:
98318 pte_unmap_unlock(pte, ptl);
98319 return 0;
98320@@ -3204,9 +3429,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
98321 pmd_t *pmd;
98322 pte_t *pte;
98323
98324+#ifdef CONFIG_PAX_SEGMEXEC
98325+ struct vm_area_struct *vma_m;
98326+#endif
98327+
98328 if (unlikely(is_vm_hugetlb_page(vma)))
98329 return hugetlb_fault(mm, vma, address, flags);
98330
98331+#ifdef CONFIG_PAX_SEGMEXEC
98332+ vma_m = pax_find_mirror_vma(vma);
98333+ if (vma_m) {
98334+ unsigned long address_m;
98335+ pgd_t *pgd_m;
98336+ pud_t *pud_m;
98337+ pmd_t *pmd_m;
98338+
98339+ if (vma->vm_start > vma_m->vm_start) {
98340+ address_m = address;
98341+ address -= SEGMEXEC_TASK_SIZE;
98342+ vma = vma_m;
98343+ } else
98344+ address_m = address + SEGMEXEC_TASK_SIZE;
98345+
98346+ pgd_m = pgd_offset(mm, address_m);
98347+ pud_m = pud_alloc(mm, pgd_m, address_m);
98348+ if (!pud_m)
98349+ return VM_FAULT_OOM;
98350+ pmd_m = pmd_alloc(mm, pud_m, address_m);
98351+ if (!pmd_m)
98352+ return VM_FAULT_OOM;
98353+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
98354+ return VM_FAULT_OOM;
98355+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
98356+ }
98357+#endif
98358+
98359 pgd = pgd_offset(mm, address);
98360 pud = pud_alloc(mm, pgd, address);
98361 if (!pud)
98362@@ -3341,6 +3598,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
98363 spin_unlock(&mm->page_table_lock);
98364 return 0;
98365 }
98366+
98367+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
98368+{
98369+ pud_t *new = pud_alloc_one(mm, address);
98370+ if (!new)
98371+ return -ENOMEM;
98372+
98373+ smp_wmb(); /* See comment in __pte_alloc */
98374+
98375+ spin_lock(&mm->page_table_lock);
98376+ if (pgd_present(*pgd)) /* Another has populated it */
98377+ pud_free(mm, new);
98378+ else
98379+ pgd_populate_kernel(mm, pgd, new);
98380+ spin_unlock(&mm->page_table_lock);
98381+ return 0;
98382+}
98383 #endif /* __PAGETABLE_PUD_FOLDED */
98384
98385 #ifndef __PAGETABLE_PMD_FOLDED
98386@@ -3373,6 +3647,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
98387 spin_unlock(&mm->page_table_lock);
98388 return 0;
98389 }
98390+
98391+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
98392+{
98393+ pmd_t *new = pmd_alloc_one(mm, address);
98394+ if (!new)
98395+ return -ENOMEM;
98396+
98397+ smp_wmb(); /* See comment in __pte_alloc */
98398+
98399+ spin_lock(&mm->page_table_lock);
98400+#ifndef __ARCH_HAS_4LEVEL_HACK
98401+ if (!pud_present(*pud)) {
98402+ mm_inc_nr_pmds(mm);
98403+ pud_populate_kernel(mm, pud, new);
98404+ } else /* Another has populated it */
98405+ pmd_free(mm, new);
98406+#else
98407+ if (!pgd_present(*pud)) {
98408+ mm_inc_nr_pmds(mm);
98409+ pgd_populate_kernel(mm, pud, new);
98410+ } else /* Another has populated it */
98411+ pmd_free(mm, new);
98412+#endif /* __ARCH_HAS_4LEVEL_HACK */
98413+ spin_unlock(&mm->page_table_lock);
98414+ return 0;
98415+}
98416 #endif /* __PAGETABLE_PMD_FOLDED */
98417
98418 static int __follow_pte(struct mm_struct *mm, unsigned long address,
98419@@ -3482,8 +3782,8 @@ out:
98420 return ret;
98421 }
98422
98423-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
98424- void *buf, int len, int write)
98425+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
98426+ void *buf, size_t len, int write)
98427 {
98428 resource_size_t phys_addr;
98429 unsigned long prot = 0;
98430@@ -3509,8 +3809,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
98431 * Access another process' address space as given in mm. If non-NULL, use the
98432 * given task for page fault accounting.
98433 */
98434-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98435- unsigned long addr, void *buf, int len, int write)
98436+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98437+ unsigned long addr, void *buf, size_t len, int write)
98438 {
98439 struct vm_area_struct *vma;
98440 void *old_buf = buf;
98441@@ -3518,7 +3818,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98442 down_read(&mm->mmap_sem);
98443 /* ignore errors, just check how much was successfully transferred */
98444 while (len) {
98445- int bytes, ret, offset;
98446+ ssize_t bytes, ret, offset;
98447 void *maddr;
98448 struct page *page = NULL;
98449
98450@@ -3579,8 +3879,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98451 *
98452 * The caller must hold a reference on @mm.
98453 */
98454-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98455- void *buf, int len, int write)
98456+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
98457+ void *buf, size_t len, int write)
98458 {
98459 return __access_remote_vm(NULL, mm, addr, buf, len, write);
98460 }
98461@@ -3590,11 +3890,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98462 * Source/target buffer must be kernel space,
98463 * Do not walk the page table directly, use get_user_pages
98464 */
98465-int access_process_vm(struct task_struct *tsk, unsigned long addr,
98466- void *buf, int len, int write)
98467+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
98468+ void *buf, size_t len, int write)
98469 {
98470 struct mm_struct *mm;
98471- int ret;
98472+ ssize_t ret;
98473
98474 mm = get_task_mm(tsk);
98475 if (!mm)
98476diff --git a/mm/mempolicy.c b/mm/mempolicy.c
98477index de5dc5e..68a4ea3 100644
98478--- a/mm/mempolicy.c
98479+++ b/mm/mempolicy.c
98480@@ -703,6 +703,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
98481 unsigned long vmstart;
98482 unsigned long vmend;
98483
98484+#ifdef CONFIG_PAX_SEGMEXEC
98485+ struct vm_area_struct *vma_m;
98486+#endif
98487+
98488 vma = find_vma(mm, start);
98489 if (!vma || vma->vm_start > start)
98490 return -EFAULT;
98491@@ -746,6 +750,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
98492 err = vma_replace_policy(vma, new_pol);
98493 if (err)
98494 goto out;
98495+
98496+#ifdef CONFIG_PAX_SEGMEXEC
98497+ vma_m = pax_find_mirror_vma(vma);
98498+ if (vma_m) {
98499+ err = vma_replace_policy(vma_m, new_pol);
98500+ if (err)
98501+ goto out;
98502+ }
98503+#endif
98504+
98505 }
98506
98507 out:
98508@@ -1160,6 +1174,17 @@ static long do_mbind(unsigned long start, unsigned long len,
98509
98510 if (end < start)
98511 return -EINVAL;
98512+
98513+#ifdef CONFIG_PAX_SEGMEXEC
98514+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
98515+ if (end > SEGMEXEC_TASK_SIZE)
98516+ return -EINVAL;
98517+ } else
98518+#endif
98519+
98520+ if (end > TASK_SIZE)
98521+ return -EINVAL;
98522+
98523 if (end == start)
98524 return 0;
98525
98526@@ -1385,8 +1410,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
98527 */
98528 tcred = __task_cred(task);
98529 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
98530- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
98531- !capable(CAP_SYS_NICE)) {
98532+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
98533 rcu_read_unlock();
98534 err = -EPERM;
98535 goto out_put;
98536@@ -1417,6 +1441,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
98537 goto out;
98538 }
98539
98540+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
98541+ if (mm != current->mm &&
98542+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
98543+ mmput(mm);
98544+ err = -EPERM;
98545+ goto out;
98546+ }
98547+#endif
98548+
98549 err = do_migrate_pages(mm, old, new,
98550 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
98551
98552diff --git a/mm/migrate.c b/mm/migrate.c
98553index 85e0426..be49beb 100644
98554--- a/mm/migrate.c
98555+++ b/mm/migrate.c
98556@@ -1472,8 +1472,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
98557 */
98558 tcred = __task_cred(task);
98559 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
98560- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
98561- !capable(CAP_SYS_NICE)) {
98562+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
98563 rcu_read_unlock();
98564 err = -EPERM;
98565 goto out;
98566diff --git a/mm/mlock.c b/mm/mlock.c
98567index 8a54cd2..92f1747 100644
98568--- a/mm/mlock.c
98569+++ b/mm/mlock.c
98570@@ -14,6 +14,7 @@
98571 #include <linux/pagevec.h>
98572 #include <linux/mempolicy.h>
98573 #include <linux/syscalls.h>
98574+#include <linux/security.h>
98575 #include <linux/sched.h>
98576 #include <linux/export.h>
98577 #include <linux/rmap.h>
98578@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
98579 {
98580 unsigned long nstart, end, tmp;
98581 struct vm_area_struct * vma, * prev;
98582- int error;
98583+ int error = 0;
98584
98585 VM_BUG_ON(start & ~PAGE_MASK);
98586 VM_BUG_ON(len != PAGE_ALIGN(len));
98587@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
98588 return -EINVAL;
98589 if (end == start)
98590 return 0;
98591+ if (end > TASK_SIZE)
98592+ return -EINVAL;
98593+
98594 vma = find_vma(current->mm, start);
98595 if (!vma || vma->vm_start > start)
98596 return -ENOMEM;
98597@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
98598 for (nstart = start ; ; ) {
98599 vm_flags_t newflags;
98600
98601+#ifdef CONFIG_PAX_SEGMEXEC
98602+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
98603+ break;
98604+#endif
98605+
98606 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
98607
98608 newflags = vma->vm_flags & ~VM_LOCKED;
98609@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
98610 locked += current->mm->locked_vm;
98611
98612 /* check against resource limits */
98613+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
98614 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
98615 error = do_mlock(start, len, 1);
98616
98617@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
98618 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
98619 vm_flags_t newflags;
98620
98621+#ifdef CONFIG_PAX_SEGMEXEC
98622+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
98623+ break;
98624+#endif
98625+
98626 newflags = vma->vm_flags & ~VM_LOCKED;
98627 if (flags & MCL_CURRENT)
98628 newflags |= VM_LOCKED;
98629@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
98630 lock_limit >>= PAGE_SHIFT;
98631
98632 ret = -ENOMEM;
98633+
98634+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
98635+
98636 down_write(&current->mm->mmap_sem);
98637-
98638 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
98639 capable(CAP_IPC_LOCK))
98640 ret = do_mlockall(flags);
98641diff --git a/mm/mm_init.c b/mm/mm_init.c
98642index 5f420f7..dd42fb1b 100644
98643--- a/mm/mm_init.c
98644+++ b/mm/mm_init.c
98645@@ -177,7 +177,7 @@ static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
98646 return NOTIFY_OK;
98647 }
98648
98649-static struct notifier_block compute_batch_nb __meminitdata = {
98650+static struct notifier_block compute_batch_nb __meminitconst = {
98651 .notifier_call = mm_compute_batch_notifier,
98652 .priority = IPC_CALLBACK_PRI, /* use lowest priority */
98653 };
98654diff --git a/mm/mmap.c b/mm/mmap.c
98655index 9ec50a3..0476e2d 100644
98656--- a/mm/mmap.c
98657+++ b/mm/mmap.c
98658@@ -41,6 +41,7 @@
98659 #include <linux/notifier.h>
98660 #include <linux/memory.h>
98661 #include <linux/printk.h>
98662+#include <linux/random.h>
98663
98664 #include <asm/uaccess.h>
98665 #include <asm/cacheflush.h>
98666@@ -57,6 +58,16 @@
98667 #define arch_rebalance_pgtables(addr, len) (addr)
98668 #endif
98669
98670+static inline void verify_mm_writelocked(struct mm_struct *mm)
98671+{
98672+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
98673+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
98674+ up_read(&mm->mmap_sem);
98675+ BUG();
98676+ }
98677+#endif
98678+}
98679+
98680 static void unmap_region(struct mm_struct *mm,
98681 struct vm_area_struct *vma, struct vm_area_struct *prev,
98682 unsigned long start, unsigned long end);
98683@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
98684 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
98685 *
98686 */
98687-pgprot_t protection_map[16] = {
98688+pgprot_t protection_map[16] __read_only = {
98689 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
98690 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
98691 };
98692
98693-pgprot_t vm_get_page_prot(unsigned long vm_flags)
98694+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
98695 {
98696- return __pgprot(pgprot_val(protection_map[vm_flags &
98697+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
98698 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
98699 pgprot_val(arch_vm_get_page_prot(vm_flags)));
98700+
98701+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
98702+ if (!(__supported_pte_mask & _PAGE_NX) &&
98703+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
98704+ (vm_flags & (VM_READ | VM_WRITE)))
98705+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
98706+#endif
98707+
98708+ return prot;
98709 }
98710 EXPORT_SYMBOL(vm_get_page_prot);
98711
98712@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
98713 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
98714 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
98715 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
98716+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
98717 /*
98718 * Make sure vm_committed_as in one cacheline and not cacheline shared with
98719 * other variables. It can be updated by several CPUs frequently.
98720@@ -271,6 +292,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
98721 struct vm_area_struct *next = vma->vm_next;
98722
98723 might_sleep();
98724+ BUG_ON(vma->vm_mirror);
98725 if (vma->vm_ops && vma->vm_ops->close)
98726 vma->vm_ops->close(vma);
98727 if (vma->vm_file)
98728@@ -284,6 +306,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
98729
98730 SYSCALL_DEFINE1(brk, unsigned long, brk)
98731 {
98732+ unsigned long rlim;
98733 unsigned long retval;
98734 unsigned long newbrk, oldbrk;
98735 struct mm_struct *mm = current->mm;
98736@@ -314,7 +337,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
98737 * segment grow beyond its set limit the in case where the limit is
98738 * not page aligned -Ram Gupta
98739 */
98740- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
98741+ rlim = rlimit(RLIMIT_DATA);
98742+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
98743+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
98744+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
98745+ rlim = 4096 * PAGE_SIZE;
98746+#endif
98747+ if (check_data_rlimit(rlim, brk, mm->start_brk,
98748 mm->end_data, mm->start_data))
98749 goto out;
98750
98751@@ -967,6 +996,12 @@ static int
98752 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
98753 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
98754 {
98755+
98756+#ifdef CONFIG_PAX_SEGMEXEC
98757+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
98758+ return 0;
98759+#endif
98760+
98761 if (is_mergeable_vma(vma, file, vm_flags) &&
98762 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
98763 if (vma->vm_pgoff == vm_pgoff)
98764@@ -986,6 +1021,12 @@ static int
98765 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
98766 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
98767 {
98768+
98769+#ifdef CONFIG_PAX_SEGMEXEC
98770+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
98771+ return 0;
98772+#endif
98773+
98774 if (is_mergeable_vma(vma, file, vm_flags) &&
98775 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
98776 pgoff_t vm_pglen;
98777@@ -1035,6 +1076,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
98778 struct vm_area_struct *area, *next;
98779 int err;
98780
98781+#ifdef CONFIG_PAX_SEGMEXEC
98782+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
98783+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
98784+
98785+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
98786+#endif
98787+
98788 /*
98789 * We later require that vma->vm_flags == vm_flags,
98790 * so this tests vma->vm_flags & VM_SPECIAL, too.
98791@@ -1050,6 +1098,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
98792 if (next && next->vm_end == end) /* cases 6, 7, 8 */
98793 next = next->vm_next;
98794
98795+#ifdef CONFIG_PAX_SEGMEXEC
98796+ if (prev)
98797+ prev_m = pax_find_mirror_vma(prev);
98798+ if (area)
98799+ area_m = pax_find_mirror_vma(area);
98800+ if (next)
98801+ next_m = pax_find_mirror_vma(next);
98802+#endif
98803+
98804 /*
98805 * Can it merge with the predecessor?
98806 */
98807@@ -1069,9 +1126,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
98808 /* cases 1, 6 */
98809 err = vma_adjust(prev, prev->vm_start,
98810 next->vm_end, prev->vm_pgoff, NULL);
98811- } else /* cases 2, 5, 7 */
98812+
98813+#ifdef CONFIG_PAX_SEGMEXEC
98814+ if (!err && prev_m)
98815+ err = vma_adjust(prev_m, prev_m->vm_start,
98816+ next_m->vm_end, prev_m->vm_pgoff, NULL);
98817+#endif
98818+
98819+ } else { /* cases 2, 5, 7 */
98820 err = vma_adjust(prev, prev->vm_start,
98821 end, prev->vm_pgoff, NULL);
98822+
98823+#ifdef CONFIG_PAX_SEGMEXEC
98824+ if (!err && prev_m)
98825+ err = vma_adjust(prev_m, prev_m->vm_start,
98826+ end_m, prev_m->vm_pgoff, NULL);
98827+#endif
98828+
98829+ }
98830 if (err)
98831 return NULL;
98832 khugepaged_enter_vma_merge(prev, vm_flags);
98833@@ -1085,12 +1157,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
98834 mpol_equal(policy, vma_policy(next)) &&
98835 can_vma_merge_before(next, vm_flags,
98836 anon_vma, file, pgoff+pglen)) {
98837- if (prev && addr < prev->vm_end) /* case 4 */
98838+ if (prev && addr < prev->vm_end) { /* case 4 */
98839 err = vma_adjust(prev, prev->vm_start,
98840 addr, prev->vm_pgoff, NULL);
98841- else /* cases 3, 8 */
98842+
98843+#ifdef CONFIG_PAX_SEGMEXEC
98844+ if (!err && prev_m)
98845+ err = vma_adjust(prev_m, prev_m->vm_start,
98846+ addr_m, prev_m->vm_pgoff, NULL);
98847+#endif
98848+
98849+ } else { /* cases 3, 8 */
98850 err = vma_adjust(area, addr, next->vm_end,
98851 next->vm_pgoff - pglen, NULL);
98852+
98853+#ifdef CONFIG_PAX_SEGMEXEC
98854+ if (!err && area_m)
98855+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
98856+ next_m->vm_pgoff - pglen, NULL);
98857+#endif
98858+
98859+ }
98860 if (err)
98861 return NULL;
98862 khugepaged_enter_vma_merge(area, vm_flags);
98863@@ -1199,8 +1286,10 @@ none:
98864 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
98865 struct file *file, long pages)
98866 {
98867- const unsigned long stack_flags
98868- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
98869+
98870+#ifdef CONFIG_PAX_RANDMMAP
98871+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
98872+#endif
98873
98874 mm->total_vm += pages;
98875
98876@@ -1208,7 +1297,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
98877 mm->shared_vm += pages;
98878 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
98879 mm->exec_vm += pages;
98880- } else if (flags & stack_flags)
98881+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
98882 mm->stack_vm += pages;
98883 }
98884 #endif /* CONFIG_PROC_FS */
98885@@ -1238,6 +1327,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
98886 locked += mm->locked_vm;
98887 lock_limit = rlimit(RLIMIT_MEMLOCK);
98888 lock_limit >>= PAGE_SHIFT;
98889+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
98890 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
98891 return -EAGAIN;
98892 }
98893@@ -1264,7 +1354,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
98894 * (the exception is when the underlying filesystem is noexec
98895 * mounted, in which case we dont add PROT_EXEC.)
98896 */
98897- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98898+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98899 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
98900 prot |= PROT_EXEC;
98901
98902@@ -1290,7 +1380,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
98903 /* Obtain the address to map to. we verify (or select) it and ensure
98904 * that it represents a valid section of the address space.
98905 */
98906- addr = get_unmapped_area(file, addr, len, pgoff, flags);
98907+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
98908 if (addr & ~PAGE_MASK)
98909 return addr;
98910
98911@@ -1301,6 +1391,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
98912 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
98913 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
98914
98915+#ifdef CONFIG_PAX_MPROTECT
98916+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98917+
98918+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
98919+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
98920+ mm->binfmt->handle_mmap)
98921+ mm->binfmt->handle_mmap(file);
98922+#endif
98923+
98924+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98925+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
98926+ gr_log_rwxmmap(file);
98927+
98928+#ifdef CONFIG_PAX_EMUPLT
98929+ vm_flags &= ~VM_EXEC;
98930+#else
98931+ return -EPERM;
98932+#endif
98933+
98934+ }
98935+
98936+ if (!(vm_flags & VM_EXEC))
98937+ vm_flags &= ~VM_MAYEXEC;
98938+#else
98939+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98940+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98941+#endif
98942+ else
98943+ vm_flags &= ~VM_MAYWRITE;
98944+ }
98945+#endif
98946+
98947+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
98948+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
98949+ vm_flags &= ~VM_PAGEEXEC;
98950+#endif
98951+
98952 if (flags & MAP_LOCKED)
98953 if (!can_do_mlock())
98954 return -EPERM;
98955@@ -1388,6 +1515,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
98956 vm_flags |= VM_NORESERVE;
98957 }
98958
98959+ if (!gr_acl_handle_mmap(file, prot))
98960+ return -EACCES;
98961+
98962 addr = mmap_region(file, addr, len, vm_flags, pgoff);
98963 if (!IS_ERR_VALUE(addr) &&
98964 ((vm_flags & VM_LOCKED) ||
98965@@ -1481,7 +1611,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
98966 vm_flags_t vm_flags = vma->vm_flags;
98967
98968 /* If it was private or non-writable, the write bit is already clear */
98969- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
98970+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
98971 return 0;
98972
98973 /* The backer wishes to know when pages are first written to? */
98974@@ -1532,7 +1662,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
98975 struct rb_node **rb_link, *rb_parent;
98976 unsigned long charged = 0;
98977
98978+#ifdef CONFIG_PAX_SEGMEXEC
98979+ struct vm_area_struct *vma_m = NULL;
98980+#endif
98981+
98982+ /*
98983+ * mm->mmap_sem is required to protect against another thread
98984+ * changing the mappings in case we sleep.
98985+ */
98986+ verify_mm_writelocked(mm);
98987+
98988 /* Check against address space limit. */
98989+
98990+#ifdef CONFIG_PAX_RANDMMAP
98991+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
98992+#endif
98993+
98994 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
98995 unsigned long nr_pages;
98996
98997@@ -1551,11 +1696,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
98998
98999 /* Clear old maps */
99000 error = -ENOMEM;
99001-munmap_back:
99002 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
99003 if (do_munmap(mm, addr, len))
99004 return -ENOMEM;
99005- goto munmap_back;
99006+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
99007 }
99008
99009 /*
99010@@ -1586,6 +1730,16 @@ munmap_back:
99011 goto unacct_error;
99012 }
99013
99014+#ifdef CONFIG_PAX_SEGMEXEC
99015+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
99016+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
99017+ if (!vma_m) {
99018+ error = -ENOMEM;
99019+ goto free_vma;
99020+ }
99021+ }
99022+#endif
99023+
99024 vma->vm_mm = mm;
99025 vma->vm_start = addr;
99026 vma->vm_end = addr + len;
99027@@ -1616,6 +1770,13 @@ munmap_back:
99028 if (error)
99029 goto unmap_and_free_vma;
99030
99031+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
99032+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
99033+ vma->vm_flags |= VM_PAGEEXEC;
99034+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
99035+ }
99036+#endif
99037+
99038 /* Can addr have changed??
99039 *
99040 * Answer: Yes, several device drivers can do it in their
99041@@ -1634,6 +1795,12 @@ munmap_back:
99042 }
99043
99044 vma_link(mm, vma, prev, rb_link, rb_parent);
99045+
99046+#ifdef CONFIG_PAX_SEGMEXEC
99047+ if (vma_m)
99048+ BUG_ON(pax_mirror_vma(vma_m, vma));
99049+#endif
99050+
99051 /* Once vma denies write, undo our temporary denial count */
99052 if (file) {
99053 if (vm_flags & VM_SHARED)
99054@@ -1646,6 +1813,7 @@ out:
99055 perf_event_mmap(vma);
99056
99057 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
99058+ track_exec_limit(mm, addr, addr + len, vm_flags);
99059 if (vm_flags & VM_LOCKED) {
99060 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
99061 vma == get_gate_vma(current->mm)))
99062@@ -1683,6 +1851,12 @@ allow_write_and_free_vma:
99063 if (vm_flags & VM_DENYWRITE)
99064 allow_write_access(file);
99065 free_vma:
99066+
99067+#ifdef CONFIG_PAX_SEGMEXEC
99068+ if (vma_m)
99069+ kmem_cache_free(vm_area_cachep, vma_m);
99070+#endif
99071+
99072 kmem_cache_free(vm_area_cachep, vma);
99073 unacct_error:
99074 if (charged)
99075@@ -1690,7 +1864,63 @@ unacct_error:
99076 return error;
99077 }
99078
99079-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
99080+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
99081+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
99082+{
99083+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
99084+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
99085+
99086+ return 0;
99087+}
99088+#endif
99089+
99090+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
99091+{
99092+ if (!vma) {
99093+#ifdef CONFIG_STACK_GROWSUP
99094+ if (addr > sysctl_heap_stack_gap)
99095+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
99096+ else
99097+ vma = find_vma(current->mm, 0);
99098+ if (vma && (vma->vm_flags & VM_GROWSUP))
99099+ return false;
99100+#endif
99101+ return true;
99102+ }
99103+
99104+ if (addr + len > vma->vm_start)
99105+ return false;
99106+
99107+ if (vma->vm_flags & VM_GROWSDOWN)
99108+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
99109+#ifdef CONFIG_STACK_GROWSUP
99110+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
99111+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
99112+#endif
99113+ else if (offset)
99114+ return offset <= vma->vm_start - addr - len;
99115+
99116+ return true;
99117+}
99118+
99119+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
99120+{
99121+ if (vma->vm_start < len)
99122+ return -ENOMEM;
99123+
99124+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
99125+ if (offset <= vma->vm_start - len)
99126+ return vma->vm_start - len - offset;
99127+ else
99128+ return -ENOMEM;
99129+ }
99130+
99131+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
99132+ return vma->vm_start - len - sysctl_heap_stack_gap;
99133+ return -ENOMEM;
99134+}
99135+
99136+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
99137 {
99138 /*
99139 * We implement the search by looking for an rbtree node that
99140@@ -1738,11 +1968,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
99141 }
99142 }
99143
99144- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
99145+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
99146 check_current:
99147 /* Check if current node has a suitable gap */
99148 if (gap_start > high_limit)
99149 return -ENOMEM;
99150+
99151+ if (gap_end - gap_start > info->threadstack_offset)
99152+ gap_start += info->threadstack_offset;
99153+ else
99154+ gap_start = gap_end;
99155+
99156+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
99157+ if (gap_end - gap_start > sysctl_heap_stack_gap)
99158+ gap_start += sysctl_heap_stack_gap;
99159+ else
99160+ gap_start = gap_end;
99161+ }
99162+ if (vma->vm_flags & VM_GROWSDOWN) {
99163+ if (gap_end - gap_start > sysctl_heap_stack_gap)
99164+ gap_end -= sysctl_heap_stack_gap;
99165+ else
99166+ gap_end = gap_start;
99167+ }
99168 if (gap_end >= low_limit && gap_end - gap_start >= length)
99169 goto found;
99170
99171@@ -1792,7 +2040,7 @@ found:
99172 return gap_start;
99173 }
99174
99175-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
99176+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
99177 {
99178 struct mm_struct *mm = current->mm;
99179 struct vm_area_struct *vma;
99180@@ -1846,6 +2094,24 @@ check_current:
99181 gap_end = vma->vm_start;
99182 if (gap_end < low_limit)
99183 return -ENOMEM;
99184+
99185+ if (gap_end - gap_start > info->threadstack_offset)
99186+ gap_end -= info->threadstack_offset;
99187+ else
99188+ gap_end = gap_start;
99189+
99190+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
99191+ if (gap_end - gap_start > sysctl_heap_stack_gap)
99192+ gap_start += sysctl_heap_stack_gap;
99193+ else
99194+ gap_start = gap_end;
99195+ }
99196+ if (vma->vm_flags & VM_GROWSDOWN) {
99197+ if (gap_end - gap_start > sysctl_heap_stack_gap)
99198+ gap_end -= sysctl_heap_stack_gap;
99199+ else
99200+ gap_end = gap_start;
99201+ }
99202 if (gap_start <= high_limit && gap_end - gap_start >= length)
99203 goto found;
99204
99205@@ -1909,6 +2175,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
99206 struct mm_struct *mm = current->mm;
99207 struct vm_area_struct *vma;
99208 struct vm_unmapped_area_info info;
99209+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
99210
99211 if (len > TASK_SIZE - mmap_min_addr)
99212 return -ENOMEM;
99213@@ -1916,11 +2183,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
99214 if (flags & MAP_FIXED)
99215 return addr;
99216
99217+#ifdef CONFIG_PAX_RANDMMAP
99218+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
99219+#endif
99220+
99221 if (addr) {
99222 addr = PAGE_ALIGN(addr);
99223 vma = find_vma(mm, addr);
99224 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
99225- (!vma || addr + len <= vma->vm_start))
99226+ check_heap_stack_gap(vma, addr, len, offset))
99227 return addr;
99228 }
99229
99230@@ -1929,6 +2200,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
99231 info.low_limit = mm->mmap_base;
99232 info.high_limit = TASK_SIZE;
99233 info.align_mask = 0;
99234+ info.threadstack_offset = offset;
99235 return vm_unmapped_area(&info);
99236 }
99237 #endif
99238@@ -1947,6 +2219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
99239 struct mm_struct *mm = current->mm;
99240 unsigned long addr = addr0;
99241 struct vm_unmapped_area_info info;
99242+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
99243
99244 /* requested length too big for entire address space */
99245 if (len > TASK_SIZE - mmap_min_addr)
99246@@ -1955,12 +2228,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
99247 if (flags & MAP_FIXED)
99248 return addr;
99249
99250+#ifdef CONFIG_PAX_RANDMMAP
99251+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
99252+#endif
99253+
99254 /* requesting a specific address */
99255 if (addr) {
99256 addr = PAGE_ALIGN(addr);
99257 vma = find_vma(mm, addr);
99258 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
99259- (!vma || addr + len <= vma->vm_start))
99260+ check_heap_stack_gap(vma, addr, len, offset))
99261 return addr;
99262 }
99263
99264@@ -1969,6 +2246,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
99265 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
99266 info.high_limit = mm->mmap_base;
99267 info.align_mask = 0;
99268+ info.threadstack_offset = offset;
99269 addr = vm_unmapped_area(&info);
99270
99271 /*
99272@@ -1981,6 +2259,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
99273 VM_BUG_ON(addr != -ENOMEM);
99274 info.flags = 0;
99275 info.low_limit = TASK_UNMAPPED_BASE;
99276+
99277+#ifdef CONFIG_PAX_RANDMMAP
99278+ if (mm->pax_flags & MF_PAX_RANDMMAP)
99279+ info.low_limit += mm->delta_mmap;
99280+#endif
99281+
99282 info.high_limit = TASK_SIZE;
99283 addr = vm_unmapped_area(&info);
99284 }
99285@@ -2081,6 +2365,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
99286 return vma;
99287 }
99288
99289+#ifdef CONFIG_PAX_SEGMEXEC
99290+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
99291+{
99292+ struct vm_area_struct *vma_m;
99293+
99294+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
99295+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
99296+ BUG_ON(vma->vm_mirror);
99297+ return NULL;
99298+ }
99299+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
99300+ vma_m = vma->vm_mirror;
99301+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
99302+ BUG_ON(vma->vm_file != vma_m->vm_file);
99303+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
99304+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
99305+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
99306+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
99307+ return vma_m;
99308+}
99309+#endif
99310+
99311 /*
99312 * Verify that the stack growth is acceptable and
99313 * update accounting. This is shared with both the
99314@@ -2098,8 +2404,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
99315
99316 /* Stack limit test */
99317 actual_size = size;
99318- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
99319- actual_size -= PAGE_SIZE;
99320+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
99321 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
99322 return -ENOMEM;
99323
99324@@ -2110,6 +2415,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
99325 locked = mm->locked_vm + grow;
99326 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
99327 limit >>= PAGE_SHIFT;
99328+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
99329 if (locked > limit && !capable(CAP_IPC_LOCK))
99330 return -ENOMEM;
99331 }
99332@@ -2139,37 +2445,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
99333 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
99334 * vma is the last one with address > vma->vm_end. Have to extend vma.
99335 */
99336+#ifndef CONFIG_IA64
99337+static
99338+#endif
99339 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
99340 {
99341 int error;
99342+ bool locknext;
99343
99344 if (!(vma->vm_flags & VM_GROWSUP))
99345 return -EFAULT;
99346
99347+ /* Also guard against wrapping around to address 0. */
99348+ if (address < PAGE_ALIGN(address+1))
99349+ address = PAGE_ALIGN(address+1);
99350+ else
99351+ return -ENOMEM;
99352+
99353 /*
99354 * We must make sure the anon_vma is allocated
99355 * so that the anon_vma locking is not a noop.
99356 */
99357 if (unlikely(anon_vma_prepare(vma)))
99358 return -ENOMEM;
99359+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
99360+ if (locknext && anon_vma_prepare(vma->vm_next))
99361+ return -ENOMEM;
99362 vma_lock_anon_vma(vma);
99363+ if (locknext)
99364+ vma_lock_anon_vma(vma->vm_next);
99365
99366 /*
99367 * vma->vm_start/vm_end cannot change under us because the caller
99368 * is required to hold the mmap_sem in read mode. We need the
99369- * anon_vma lock to serialize against concurrent expand_stacks.
99370- * Also guard against wrapping around to address 0.
99371+ * anon_vma locks to serialize against concurrent expand_stacks
99372+ * and expand_upwards.
99373 */
99374- if (address < PAGE_ALIGN(address+4))
99375- address = PAGE_ALIGN(address+4);
99376- else {
99377- vma_unlock_anon_vma(vma);
99378- return -ENOMEM;
99379- }
99380 error = 0;
99381
99382 /* Somebody else might have raced and expanded it already */
99383- if (address > vma->vm_end) {
99384+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
99385+ error = -ENOMEM;
99386+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
99387 unsigned long size, grow;
99388
99389 size = address - vma->vm_start;
99390@@ -2204,6 +2521,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
99391 }
99392 }
99393 }
99394+ if (locknext)
99395+ vma_unlock_anon_vma(vma->vm_next);
99396 vma_unlock_anon_vma(vma);
99397 khugepaged_enter_vma_merge(vma, vma->vm_flags);
99398 validate_mm(vma->vm_mm);
99399@@ -2218,6 +2537,8 @@ int expand_downwards(struct vm_area_struct *vma,
99400 unsigned long address)
99401 {
99402 int error;
99403+ bool lockprev = false;
99404+ struct vm_area_struct *prev;
99405
99406 /*
99407 * We must make sure the anon_vma is allocated
99408@@ -2231,6 +2552,15 @@ int expand_downwards(struct vm_area_struct *vma,
99409 if (error)
99410 return error;
99411
99412+ prev = vma->vm_prev;
99413+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
99414+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
99415+#endif
99416+ if (lockprev && anon_vma_prepare(prev))
99417+ return -ENOMEM;
99418+ if (lockprev)
99419+ vma_lock_anon_vma(prev);
99420+
99421 vma_lock_anon_vma(vma);
99422
99423 /*
99424@@ -2240,9 +2570,17 @@ int expand_downwards(struct vm_area_struct *vma,
99425 */
99426
99427 /* Somebody else might have raced and expanded it already */
99428- if (address < vma->vm_start) {
99429+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
99430+ error = -ENOMEM;
99431+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
99432 unsigned long size, grow;
99433
99434+#ifdef CONFIG_PAX_SEGMEXEC
99435+ struct vm_area_struct *vma_m;
99436+
99437+ vma_m = pax_find_mirror_vma(vma);
99438+#endif
99439+
99440 size = vma->vm_end - address;
99441 grow = (vma->vm_start - address) >> PAGE_SHIFT;
99442
99443@@ -2267,13 +2605,27 @@ int expand_downwards(struct vm_area_struct *vma,
99444 vma->vm_pgoff -= grow;
99445 anon_vma_interval_tree_post_update_vma(vma);
99446 vma_gap_update(vma);
99447+
99448+#ifdef CONFIG_PAX_SEGMEXEC
99449+ if (vma_m) {
99450+ anon_vma_interval_tree_pre_update_vma(vma_m);
99451+ vma_m->vm_start -= grow << PAGE_SHIFT;
99452+ vma_m->vm_pgoff -= grow;
99453+ anon_vma_interval_tree_post_update_vma(vma_m);
99454+ vma_gap_update(vma_m);
99455+ }
99456+#endif
99457+
99458 spin_unlock(&vma->vm_mm->page_table_lock);
99459
99460+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
99461 perf_event_mmap(vma);
99462 }
99463 }
99464 }
99465 vma_unlock_anon_vma(vma);
99466+ if (lockprev)
99467+ vma_unlock_anon_vma(prev);
99468 khugepaged_enter_vma_merge(vma, vma->vm_flags);
99469 validate_mm(vma->vm_mm);
99470 return error;
99471@@ -2373,6 +2725,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
99472 do {
99473 long nrpages = vma_pages(vma);
99474
99475+#ifdef CONFIG_PAX_SEGMEXEC
99476+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
99477+ vma = remove_vma(vma);
99478+ continue;
99479+ }
99480+#endif
99481+
99482 if (vma->vm_flags & VM_ACCOUNT)
99483 nr_accounted += nrpages;
99484 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
99485@@ -2417,6 +2776,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
99486 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
99487 vma->vm_prev = NULL;
99488 do {
99489+
99490+#ifdef CONFIG_PAX_SEGMEXEC
99491+ if (vma->vm_mirror) {
99492+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
99493+ vma->vm_mirror->vm_mirror = NULL;
99494+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
99495+ vma->vm_mirror = NULL;
99496+ }
99497+#endif
99498+
99499 vma_rb_erase(vma, &mm->mm_rb);
99500 mm->map_count--;
99501 tail_vma = vma;
99502@@ -2444,14 +2813,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99503 struct vm_area_struct *new;
99504 int err = -ENOMEM;
99505
99506+#ifdef CONFIG_PAX_SEGMEXEC
99507+ struct vm_area_struct *vma_m, *new_m = NULL;
99508+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
99509+#endif
99510+
99511 if (is_vm_hugetlb_page(vma) && (addr &
99512 ~(huge_page_mask(hstate_vma(vma)))))
99513 return -EINVAL;
99514
99515+#ifdef CONFIG_PAX_SEGMEXEC
99516+ vma_m = pax_find_mirror_vma(vma);
99517+#endif
99518+
99519 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
99520 if (!new)
99521 goto out_err;
99522
99523+#ifdef CONFIG_PAX_SEGMEXEC
99524+ if (vma_m) {
99525+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
99526+ if (!new_m) {
99527+ kmem_cache_free(vm_area_cachep, new);
99528+ goto out_err;
99529+ }
99530+ }
99531+#endif
99532+
99533 /* most fields are the same, copy all, and then fixup */
99534 *new = *vma;
99535
99536@@ -2464,6 +2852,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99537 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
99538 }
99539
99540+#ifdef CONFIG_PAX_SEGMEXEC
99541+ if (vma_m) {
99542+ *new_m = *vma_m;
99543+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
99544+ new_m->vm_mirror = new;
99545+ new->vm_mirror = new_m;
99546+
99547+ if (new_below)
99548+ new_m->vm_end = addr_m;
99549+ else {
99550+ new_m->vm_start = addr_m;
99551+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
99552+ }
99553+ }
99554+#endif
99555+
99556 err = vma_dup_policy(vma, new);
99557 if (err)
99558 goto out_free_vma;
99559@@ -2484,6 +2888,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99560 else
99561 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
99562
99563+#ifdef CONFIG_PAX_SEGMEXEC
99564+ if (!err && vma_m) {
99565+ struct mempolicy *pol = vma_policy(new);
99566+
99567+ if (anon_vma_clone(new_m, vma_m))
99568+ goto out_free_mpol;
99569+
99570+ mpol_get(pol);
99571+ set_vma_policy(new_m, pol);
99572+
99573+ if (new_m->vm_file)
99574+ get_file(new_m->vm_file);
99575+
99576+ if (new_m->vm_ops && new_m->vm_ops->open)
99577+ new_m->vm_ops->open(new_m);
99578+
99579+ if (new_below)
99580+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
99581+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
99582+ else
99583+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
99584+
99585+ if (err) {
99586+ if (new_m->vm_ops && new_m->vm_ops->close)
99587+ new_m->vm_ops->close(new_m);
99588+ if (new_m->vm_file)
99589+ fput(new_m->vm_file);
99590+ mpol_put(pol);
99591+ }
99592+ }
99593+#endif
99594+
99595 /* Success. */
99596 if (!err)
99597 return 0;
99598@@ -2493,10 +2929,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99599 new->vm_ops->close(new);
99600 if (new->vm_file)
99601 fput(new->vm_file);
99602- unlink_anon_vmas(new);
99603 out_free_mpol:
99604 mpol_put(vma_policy(new));
99605 out_free_vma:
99606+
99607+#ifdef CONFIG_PAX_SEGMEXEC
99608+ if (new_m) {
99609+ unlink_anon_vmas(new_m);
99610+ kmem_cache_free(vm_area_cachep, new_m);
99611+ }
99612+#endif
99613+
99614+ unlink_anon_vmas(new);
99615 kmem_cache_free(vm_area_cachep, new);
99616 out_err:
99617 return err;
99618@@ -2509,6 +2953,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99619 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99620 unsigned long addr, int new_below)
99621 {
99622+
99623+#ifdef CONFIG_PAX_SEGMEXEC
99624+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
99625+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
99626+ if (mm->map_count >= sysctl_max_map_count-1)
99627+ return -ENOMEM;
99628+ } else
99629+#endif
99630+
99631 if (mm->map_count >= sysctl_max_map_count)
99632 return -ENOMEM;
99633
99634@@ -2520,11 +2973,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99635 * work. This now handles partial unmappings.
99636 * Jeremy Fitzhardinge <jeremy@goop.org>
99637 */
99638+#ifdef CONFIG_PAX_SEGMEXEC
99639 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
99640 {
99641+ int ret = __do_munmap(mm, start, len);
99642+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
99643+ return ret;
99644+
99645+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
99646+}
99647+
99648+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
99649+#else
99650+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
99651+#endif
99652+{
99653 unsigned long end;
99654 struct vm_area_struct *vma, *prev, *last;
99655
99656+ /*
99657+ * mm->mmap_sem is required to protect against another thread
99658+ * changing the mappings in case we sleep.
99659+ */
99660+ verify_mm_writelocked(mm);
99661+
99662 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
99663 return -EINVAL;
99664
99665@@ -2602,6 +3074,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
99666 /* Fix up all other VM information */
99667 remove_vma_list(mm, vma);
99668
99669+ track_exec_limit(mm, start, end, 0UL);
99670+
99671 return 0;
99672 }
99673
99674@@ -2610,6 +3084,13 @@ int vm_munmap(unsigned long start, size_t len)
99675 int ret;
99676 struct mm_struct *mm = current->mm;
99677
99678+
99679+#ifdef CONFIG_PAX_SEGMEXEC
99680+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
99681+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
99682+ return -EINVAL;
99683+#endif
99684+
99685 down_write(&mm->mmap_sem);
99686 ret = do_munmap(mm, start, len);
99687 up_write(&mm->mmap_sem);
99688@@ -2656,6 +3137,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
99689 down_write(&mm->mmap_sem);
99690 vma = find_vma(mm, start);
99691
99692+#ifdef CONFIG_PAX_SEGMEXEC
99693+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
99694+ goto out;
99695+#endif
99696+
99697 if (!vma || !(vma->vm_flags & VM_SHARED))
99698 goto out;
99699
99700@@ -2692,16 +3178,6 @@ out:
99701 return ret;
99702 }
99703
99704-static inline void verify_mm_writelocked(struct mm_struct *mm)
99705-{
99706-#ifdef CONFIG_DEBUG_VM
99707- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
99708- WARN_ON(1);
99709- up_read(&mm->mmap_sem);
99710- }
99711-#endif
99712-}
99713-
99714 /*
99715 * this is really a simplified "do_mmap". it only handles
99716 * anonymous maps. eventually we may be able to do some
99717@@ -2715,6 +3191,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
99718 struct rb_node **rb_link, *rb_parent;
99719 pgoff_t pgoff = addr >> PAGE_SHIFT;
99720 int error;
99721+ unsigned long charged;
99722
99723 len = PAGE_ALIGN(len);
99724 if (!len)
99725@@ -2722,10 +3199,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
99726
99727 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
99728
99729+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
99730+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
99731+ flags &= ~VM_EXEC;
99732+
99733+#ifdef CONFIG_PAX_MPROTECT
99734+ if (mm->pax_flags & MF_PAX_MPROTECT)
99735+ flags &= ~VM_MAYEXEC;
99736+#endif
99737+
99738+ }
99739+#endif
99740+
99741 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
99742 if (error & ~PAGE_MASK)
99743 return error;
99744
99745+ charged = len >> PAGE_SHIFT;
99746+
99747 error = mlock_future_check(mm, mm->def_flags, len);
99748 if (error)
99749 return error;
99750@@ -2739,21 +3230,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
99751 /*
99752 * Clear old maps. this also does some error checking for us
99753 */
99754- munmap_back:
99755 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
99756 if (do_munmap(mm, addr, len))
99757 return -ENOMEM;
99758- goto munmap_back;
99759+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
99760 }
99761
99762 /* Check against address space limits *after* clearing old maps... */
99763- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
99764+ if (!may_expand_vm(mm, charged))
99765 return -ENOMEM;
99766
99767 if (mm->map_count > sysctl_max_map_count)
99768 return -ENOMEM;
99769
99770- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
99771+ if (security_vm_enough_memory_mm(mm, charged))
99772 return -ENOMEM;
99773
99774 /* Can we just expand an old private anonymous mapping? */
99775@@ -2767,7 +3257,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
99776 */
99777 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
99778 if (!vma) {
99779- vm_unacct_memory(len >> PAGE_SHIFT);
99780+ vm_unacct_memory(charged);
99781 return -ENOMEM;
99782 }
99783
99784@@ -2781,10 +3271,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
99785 vma_link(mm, vma, prev, rb_link, rb_parent);
99786 out:
99787 perf_event_mmap(vma);
99788- mm->total_vm += len >> PAGE_SHIFT;
99789+ mm->total_vm += charged;
99790 if (flags & VM_LOCKED)
99791- mm->locked_vm += (len >> PAGE_SHIFT);
99792+ mm->locked_vm += charged;
99793 vma->vm_flags |= VM_SOFTDIRTY;
99794+ track_exec_limit(mm, addr, addr + len, flags);
99795 return addr;
99796 }
99797
99798@@ -2846,6 +3337,7 @@ void exit_mmap(struct mm_struct *mm)
99799 while (vma) {
99800 if (vma->vm_flags & VM_ACCOUNT)
99801 nr_accounted += vma_pages(vma);
99802+ vma->vm_mirror = NULL;
99803 vma = remove_vma(vma);
99804 }
99805 vm_unacct_memory(nr_accounted);
99806@@ -2860,6 +3352,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
99807 struct vm_area_struct *prev;
99808 struct rb_node **rb_link, *rb_parent;
99809
99810+#ifdef CONFIG_PAX_SEGMEXEC
99811+ struct vm_area_struct *vma_m = NULL;
99812+#endif
99813+
99814+ if (security_mmap_addr(vma->vm_start))
99815+ return -EPERM;
99816+
99817 /*
99818 * The vm_pgoff of a purely anonymous vma should be irrelevant
99819 * until its first write fault, when page's anon_vma and index
99820@@ -2883,7 +3382,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
99821 security_vm_enough_memory_mm(mm, vma_pages(vma)))
99822 return -ENOMEM;
99823
99824+#ifdef CONFIG_PAX_SEGMEXEC
99825+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
99826+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
99827+ if (!vma_m)
99828+ return -ENOMEM;
99829+ }
99830+#endif
99831+
99832 vma_link(mm, vma, prev, rb_link, rb_parent);
99833+
99834+#ifdef CONFIG_PAX_SEGMEXEC
99835+ if (vma_m)
99836+ BUG_ON(pax_mirror_vma(vma_m, vma));
99837+#endif
99838+
99839 return 0;
99840 }
99841
99842@@ -2902,6 +3415,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
99843 struct rb_node **rb_link, *rb_parent;
99844 bool faulted_in_anon_vma = true;
99845
99846+ BUG_ON(vma->vm_mirror);
99847+
99848 /*
99849 * If anonymous vma has not yet been faulted, update new pgoff
99850 * to match new location, to increase its chance of merging.
99851@@ -2966,6 +3481,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
99852 return NULL;
99853 }
99854
99855+#ifdef CONFIG_PAX_SEGMEXEC
99856+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
99857+{
99858+ struct vm_area_struct *prev_m;
99859+ struct rb_node **rb_link_m, *rb_parent_m;
99860+ struct mempolicy *pol_m;
99861+
99862+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
99863+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
99864+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
99865+ *vma_m = *vma;
99866+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
99867+ if (anon_vma_clone(vma_m, vma))
99868+ return -ENOMEM;
99869+ pol_m = vma_policy(vma_m);
99870+ mpol_get(pol_m);
99871+ set_vma_policy(vma_m, pol_m);
99872+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
99873+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
99874+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
99875+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
99876+ if (vma_m->vm_file)
99877+ get_file(vma_m->vm_file);
99878+ if (vma_m->vm_ops && vma_m->vm_ops->open)
99879+ vma_m->vm_ops->open(vma_m);
99880+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
99881+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
99882+ vma_m->vm_mirror = vma;
99883+ vma->vm_mirror = vma_m;
99884+ return 0;
99885+}
99886+#endif
99887+
99888 /*
99889 * Return true if the calling process may expand its vm space by the passed
99890 * number of pages
99891@@ -2977,6 +3525,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
99892
99893 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
99894
99895+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
99896 if (cur + npages > lim)
99897 return 0;
99898 return 1;
99899@@ -3059,6 +3608,22 @@ static struct vm_area_struct *__install_special_mapping(
99900 vma->vm_start = addr;
99901 vma->vm_end = addr + len;
99902
99903+#ifdef CONFIG_PAX_MPROTECT
99904+ if (mm->pax_flags & MF_PAX_MPROTECT) {
99905+#ifndef CONFIG_PAX_MPROTECT_COMPAT
99906+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
99907+ return ERR_PTR(-EPERM);
99908+ if (!(vm_flags & VM_EXEC))
99909+ vm_flags &= ~VM_MAYEXEC;
99910+#else
99911+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
99912+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
99913+#endif
99914+ else
99915+ vm_flags &= ~VM_MAYWRITE;
99916+ }
99917+#endif
99918+
99919 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
99920 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
99921
99922diff --git a/mm/mprotect.c b/mm/mprotect.c
99923index 8858483..8145fa5 100644
99924--- a/mm/mprotect.c
99925+++ b/mm/mprotect.c
99926@@ -24,10 +24,18 @@
99927 #include <linux/migrate.h>
99928 #include <linux/perf_event.h>
99929 #include <linux/ksm.h>
99930+#include <linux/sched/sysctl.h>
99931+
99932+#ifdef CONFIG_PAX_MPROTECT
99933+#include <linux/elf.h>
99934+#include <linux/binfmts.h>
99935+#endif
99936+
99937 #include <asm/uaccess.h>
99938 #include <asm/pgtable.h>
99939 #include <asm/cacheflush.h>
99940 #include <asm/tlbflush.h>
99941+#include <asm/mmu_context.h>
99942
99943 /*
99944 * For a prot_numa update we only hold mmap_sem for read so there is a
99945@@ -252,6 +260,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
99946 return pages;
99947 }
99948
99949+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
99950+/* called while holding the mmap semaphor for writing except stack expansion */
99951+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
99952+{
99953+ unsigned long oldlimit, newlimit = 0UL;
99954+
99955+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
99956+ return;
99957+
99958+ spin_lock(&mm->page_table_lock);
99959+ oldlimit = mm->context.user_cs_limit;
99960+ if ((prot & VM_EXEC) && oldlimit < end)
99961+ /* USER_CS limit moved up */
99962+ newlimit = end;
99963+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
99964+ /* USER_CS limit moved down */
99965+ newlimit = start;
99966+
99967+ if (newlimit) {
99968+ mm->context.user_cs_limit = newlimit;
99969+
99970+#ifdef CONFIG_SMP
99971+ wmb();
99972+ cpus_clear(mm->context.cpu_user_cs_mask);
99973+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
99974+#endif
99975+
99976+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
99977+ }
99978+ spin_unlock(&mm->page_table_lock);
99979+ if (newlimit == end) {
99980+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
99981+
99982+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
99983+ if (is_vm_hugetlb_page(vma))
99984+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
99985+ else
99986+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
99987+ }
99988+}
99989+#endif
99990+
99991 int
99992 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
99993 unsigned long start, unsigned long end, unsigned long newflags)
99994@@ -264,11 +314,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
99995 int error;
99996 int dirty_accountable = 0;
99997
99998+#ifdef CONFIG_PAX_SEGMEXEC
99999+ struct vm_area_struct *vma_m = NULL;
100000+ unsigned long start_m, end_m;
100001+
100002+ start_m = start + SEGMEXEC_TASK_SIZE;
100003+ end_m = end + SEGMEXEC_TASK_SIZE;
100004+#endif
100005+
100006 if (newflags == oldflags) {
100007 *pprev = vma;
100008 return 0;
100009 }
100010
100011+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
100012+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
100013+
100014+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
100015+ return -ENOMEM;
100016+
100017+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
100018+ return -ENOMEM;
100019+ }
100020+
100021 /*
100022 * If we make a private mapping writable we increase our commit;
100023 * but (without finer accounting) cannot reduce our commit if we
100024@@ -285,6 +353,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
100025 }
100026 }
100027
100028+#ifdef CONFIG_PAX_SEGMEXEC
100029+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
100030+ if (start != vma->vm_start) {
100031+ error = split_vma(mm, vma, start, 1);
100032+ if (error)
100033+ goto fail;
100034+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
100035+ *pprev = (*pprev)->vm_next;
100036+ }
100037+
100038+ if (end != vma->vm_end) {
100039+ error = split_vma(mm, vma, end, 0);
100040+ if (error)
100041+ goto fail;
100042+ }
100043+
100044+ if (pax_find_mirror_vma(vma)) {
100045+ error = __do_munmap(mm, start_m, end_m - start_m);
100046+ if (error)
100047+ goto fail;
100048+ } else {
100049+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
100050+ if (!vma_m) {
100051+ error = -ENOMEM;
100052+ goto fail;
100053+ }
100054+ vma->vm_flags = newflags;
100055+ error = pax_mirror_vma(vma_m, vma);
100056+ if (error) {
100057+ vma->vm_flags = oldflags;
100058+ goto fail;
100059+ }
100060+ }
100061+ }
100062+#endif
100063+
100064 /*
100065 * First try to merge with previous and/or next vma.
100066 */
100067@@ -315,7 +419,19 @@ success:
100068 * vm_flags and vm_page_prot are protected by the mmap_sem
100069 * held in write mode.
100070 */
100071+
100072+#ifdef CONFIG_PAX_SEGMEXEC
100073+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
100074+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
100075+#endif
100076+
100077 vma->vm_flags = newflags;
100078+
100079+#ifdef CONFIG_PAX_MPROTECT
100080+ if (mm->binfmt && mm->binfmt->handle_mprotect)
100081+ mm->binfmt->handle_mprotect(vma, newflags);
100082+#endif
100083+
100084 dirty_accountable = vma_wants_writenotify(vma);
100085 vma_set_page_prot(vma);
100086
100087@@ -351,6 +467,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
100088 end = start + len;
100089 if (end <= start)
100090 return -ENOMEM;
100091+
100092+#ifdef CONFIG_PAX_SEGMEXEC
100093+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
100094+ if (end > SEGMEXEC_TASK_SIZE)
100095+ return -EINVAL;
100096+ } else
100097+#endif
100098+
100099+ if (end > TASK_SIZE)
100100+ return -EINVAL;
100101+
100102 if (!arch_validate_prot(prot))
100103 return -EINVAL;
100104
100105@@ -358,7 +485,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
100106 /*
100107 * Does the application expect PROT_READ to imply PROT_EXEC:
100108 */
100109- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
100110+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
100111 prot |= PROT_EXEC;
100112
100113 vm_flags = calc_vm_prot_bits(prot);
100114@@ -390,6 +517,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
100115 if (start > vma->vm_start)
100116 prev = vma;
100117
100118+#ifdef CONFIG_PAX_MPROTECT
100119+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
100120+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
100121+#endif
100122+
100123 for (nstart = start ; ; ) {
100124 unsigned long newflags;
100125
100126@@ -400,6 +532,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
100127
100128 /* newflags >> 4 shift VM_MAY% in place of VM_% */
100129 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
100130+ if (prot & (PROT_WRITE | PROT_EXEC))
100131+ gr_log_rwxmprotect(vma);
100132+
100133+ error = -EACCES;
100134+ goto out;
100135+ }
100136+
100137+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
100138 error = -EACCES;
100139 goto out;
100140 }
100141@@ -414,6 +554,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
100142 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
100143 if (error)
100144 goto out;
100145+
100146+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
100147+
100148 nstart = tmp;
100149
100150 if (nstart < prev->vm_end)
100151diff --git a/mm/mremap.c b/mm/mremap.c
100152index 2dc44b1..caa1819 100644
100153--- a/mm/mremap.c
100154+++ b/mm/mremap.c
100155@@ -142,6 +142,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
100156 continue;
100157 pte = ptep_get_and_clear(mm, old_addr, old_pte);
100158 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
100159+
100160+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
100161+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
100162+ pte = pte_exprotect(pte);
100163+#endif
100164+
100165 pte = move_soft_dirty_pte(pte);
100166 set_pte_at(mm, new_addr, new_pte, pte);
100167 }
100168@@ -350,6 +356,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
100169 if (is_vm_hugetlb_page(vma))
100170 goto Einval;
100171
100172+#ifdef CONFIG_PAX_SEGMEXEC
100173+ if (pax_find_mirror_vma(vma))
100174+ goto Einval;
100175+#endif
100176+
100177 /* We can't remap across vm area boundaries */
100178 if (old_len > vma->vm_end - addr)
100179 goto Efault;
100180@@ -405,20 +416,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
100181 unsigned long ret = -EINVAL;
100182 unsigned long charged = 0;
100183 unsigned long map_flags;
100184+ unsigned long pax_task_size = TASK_SIZE;
100185
100186 if (new_addr & ~PAGE_MASK)
100187 goto out;
100188
100189- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
100190+#ifdef CONFIG_PAX_SEGMEXEC
100191+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
100192+ pax_task_size = SEGMEXEC_TASK_SIZE;
100193+#endif
100194+
100195+ pax_task_size -= PAGE_SIZE;
100196+
100197+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
100198 goto out;
100199
100200 /* Check if the location we're moving into overlaps the
100201 * old location at all, and fail if it does.
100202 */
100203- if ((new_addr <= addr) && (new_addr+new_len) > addr)
100204- goto out;
100205-
100206- if ((addr <= new_addr) && (addr+old_len) > new_addr)
100207+ if (addr + old_len > new_addr && new_addr + new_len > addr)
100208 goto out;
100209
100210 ret = do_munmap(mm, new_addr, new_len);
100211@@ -487,6 +503,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
100212 unsigned long ret = -EINVAL;
100213 unsigned long charged = 0;
100214 bool locked = false;
100215+ unsigned long pax_task_size = TASK_SIZE;
100216
100217 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
100218 return ret;
100219@@ -508,6 +525,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
100220 if (!new_len)
100221 return ret;
100222
100223+#ifdef CONFIG_PAX_SEGMEXEC
100224+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
100225+ pax_task_size = SEGMEXEC_TASK_SIZE;
100226+#endif
100227+
100228+ pax_task_size -= PAGE_SIZE;
100229+
100230+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
100231+ old_len > pax_task_size || addr > pax_task_size-old_len)
100232+ return ret;
100233+
100234 down_write(&current->mm->mmap_sem);
100235
100236 if (flags & MREMAP_FIXED) {
100237@@ -558,6 +586,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
100238 new_addr = addr;
100239 }
100240 ret = addr;
100241+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
100242 goto out;
100243 }
100244 }
100245@@ -581,7 +610,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
100246 goto out;
100247 }
100248
100249+ map_flags = vma->vm_flags;
100250 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
100251+ if (!(ret & ~PAGE_MASK)) {
100252+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
100253+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
100254+ }
100255 }
100256 out:
100257 if (ret & ~PAGE_MASK)
100258diff --git a/mm/nommu.c b/mm/nommu.c
100259index 3fba2dc..fdad748 100644
100260--- a/mm/nommu.c
100261+++ b/mm/nommu.c
100262@@ -72,7 +72,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
100263 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
100264 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
100265 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
100266-int heap_stack_gap = 0;
100267
100268 atomic_long_t mmap_pages_allocated;
100269
100270@@ -892,15 +891,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
100271 EXPORT_SYMBOL(find_vma);
100272
100273 /*
100274- * find a VMA
100275- * - we don't extend stack VMAs under NOMMU conditions
100276- */
100277-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
100278-{
100279- return find_vma(mm, addr);
100280-}
100281-
100282-/*
100283 * expand a stack to a given address
100284 * - not supported under NOMMU conditions
100285 */
100286@@ -1585,6 +1575,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
100287
100288 /* most fields are the same, copy all, and then fixup */
100289 *new = *vma;
100290+ INIT_LIST_HEAD(&new->anon_vma_chain);
100291 *region = *vma->vm_region;
100292 new->vm_region = region;
100293
100294@@ -2007,8 +1998,8 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
100295 }
100296 EXPORT_SYMBOL(filemap_map_pages);
100297
100298-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100299- unsigned long addr, void *buf, int len, int write)
100300+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100301+ unsigned long addr, void *buf, size_t len, int write)
100302 {
100303 struct vm_area_struct *vma;
100304
100305@@ -2049,8 +2040,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100306 *
100307 * The caller must hold a reference on @mm.
100308 */
100309-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
100310- void *buf, int len, int write)
100311+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
100312+ void *buf, size_t len, int write)
100313 {
100314 return __access_remote_vm(NULL, mm, addr, buf, len, write);
100315 }
100316@@ -2059,7 +2050,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
100317 * Access another process' address space.
100318 * - source/target buffer must be kernel space
100319 */
100320-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
100321+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
100322 {
100323 struct mm_struct *mm;
100324
100325diff --git a/mm/page-writeback.c b/mm/page-writeback.c
100326index ad05f2f..cee723a 100644
100327--- a/mm/page-writeback.c
100328+++ b/mm/page-writeback.c
100329@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
100330 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
100331 * - the bdi dirty thresh drops quickly due to change of JBOD workload
100332 */
100333-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
100334+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
100335 unsigned long thresh,
100336 unsigned long bg_thresh,
100337 unsigned long dirty,
100338diff --git a/mm/page_alloc.c b/mm/page_alloc.c
100339index 40e2942..0eb29a2 100644
100340--- a/mm/page_alloc.c
100341+++ b/mm/page_alloc.c
100342@@ -61,6 +61,7 @@
100343 #include <linux/hugetlb.h>
100344 #include <linux/sched/rt.h>
100345 #include <linux/page_owner.h>
100346+#include <linux/random.h>
100347
100348 #include <asm/sections.h>
100349 #include <asm/tlbflush.h>
100350@@ -357,7 +358,7 @@ out:
100351 * This usage means that zero-order pages may not be compound.
100352 */
100353
100354-static void free_compound_page(struct page *page)
100355+void free_compound_page(struct page *page)
100356 {
100357 __free_pages_ok(page, compound_order(page));
100358 }
100359@@ -480,7 +481,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
100360 __mod_zone_freepage_state(zone, (1 << order), migratetype);
100361 }
100362 #else
100363-struct page_ext_operations debug_guardpage_ops = { NULL, };
100364+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
100365 static inline void set_page_guard(struct zone *zone, struct page *page,
100366 unsigned int order, int migratetype) {}
100367 static inline void clear_page_guard(struct zone *zone, struct page *page,
100368@@ -783,6 +784,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
100369 bool compound = PageCompound(page);
100370 int i, bad = 0;
100371
100372+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100373+ unsigned long index = 1UL << order;
100374+#endif
100375+
100376 VM_BUG_ON_PAGE(PageTail(page), page);
100377 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
100378
100379@@ -809,6 +814,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
100380 debug_check_no_obj_freed(page_address(page),
100381 PAGE_SIZE << order);
100382 }
100383+
100384+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100385+ for (; index; --index)
100386+ sanitize_highpage(page + index - 1);
100387+#endif
100388+
100389 arch_free_page(page, order);
100390 kernel_map_pages(page, 1 << order, 0);
100391
100392@@ -832,6 +843,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
100393 local_irq_restore(flags);
100394 }
100395
100396+#ifdef CONFIG_PAX_LATENT_ENTROPY
100397+bool __meminitdata extra_latent_entropy;
100398+
100399+static int __init setup_pax_extra_latent_entropy(char *str)
100400+{
100401+ extra_latent_entropy = true;
100402+ return 0;
100403+}
100404+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
100405+
100406+volatile u64 latent_entropy __latent_entropy;
100407+EXPORT_SYMBOL(latent_entropy);
100408+#endif
100409+
100410 void __init __free_pages_bootmem(struct page *page, unsigned int order)
100411 {
100412 unsigned int nr_pages = 1 << order;
100413@@ -847,6 +872,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
100414 __ClearPageReserved(p);
100415 set_page_count(p, 0);
100416
100417+#ifdef CONFIG_PAX_LATENT_ENTROPY
100418+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
100419+ u64 hash = 0;
100420+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
100421+ const u64 *data = lowmem_page_address(page);
100422+
100423+ for (index = 0; index < end; index++)
100424+ hash ^= hash + data[index];
100425+ latent_entropy ^= hash;
100426+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
100427+ }
100428+#endif
100429+
100430 page_zone(page)->managed_pages += nr_pages;
100431 set_page_refcounted(page);
100432 __free_pages(page, order);
100433@@ -974,8 +1012,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
100434 kernel_map_pages(page, 1 << order, 1);
100435 kasan_alloc_pages(page, order);
100436
100437+#ifndef CONFIG_PAX_MEMORY_SANITIZE
100438 if (gfp_flags & __GFP_ZERO)
100439 prep_zero_page(page, order, gfp_flags);
100440+#endif
100441
100442 if (order && (gfp_flags & __GFP_COMP))
100443 prep_compound_page(page, order);
100444@@ -1699,7 +1739,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
100445 }
100446
100447 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
100448- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
100449+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
100450 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
100451 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
100452
100453@@ -2018,7 +2058,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
100454 do {
100455 mod_zone_page_state(zone, NR_ALLOC_BATCH,
100456 high_wmark_pages(zone) - low_wmark_pages(zone) -
100457- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
100458+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
100459 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
100460 } while (zone++ != preferred_zone);
100461 }
100462@@ -5738,7 +5778,7 @@ static void __setup_per_zone_wmarks(void)
100463
100464 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
100465 high_wmark_pages(zone) - low_wmark_pages(zone) -
100466- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
100467+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
100468
100469 setup_zone_migrate_reserve(zone);
100470 spin_unlock_irqrestore(&zone->lock, flags);
100471diff --git a/mm/percpu.c b/mm/percpu.c
100472index 73c97a5..508ee25 100644
100473--- a/mm/percpu.c
100474+++ b/mm/percpu.c
100475@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
100476 static unsigned int pcpu_high_unit_cpu __read_mostly;
100477
100478 /* the address of the first chunk which starts with the kernel static area */
100479-void *pcpu_base_addr __read_mostly;
100480+void *pcpu_base_addr __read_only;
100481 EXPORT_SYMBOL_GPL(pcpu_base_addr);
100482
100483 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
100484diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
100485index b159769..d07037f 100644
100486--- a/mm/process_vm_access.c
100487+++ b/mm/process_vm_access.c
100488@@ -13,6 +13,7 @@
100489 #include <linux/uio.h>
100490 #include <linux/sched.h>
100491 #include <linux/highmem.h>
100492+#include <linux/security.h>
100493 #include <linux/ptrace.h>
100494 #include <linux/slab.h>
100495 #include <linux/syscalls.h>
100496@@ -154,19 +155,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
100497 ssize_t iov_len;
100498 size_t total_len = iov_iter_count(iter);
100499
100500+ return -ENOSYS; // PaX: until properly audited
100501+
100502 /*
100503 * Work out how many pages of struct pages we're going to need
100504 * when eventually calling get_user_pages
100505 */
100506 for (i = 0; i < riovcnt; i++) {
100507 iov_len = rvec[i].iov_len;
100508- if (iov_len > 0) {
100509- nr_pages_iov = ((unsigned long)rvec[i].iov_base
100510- + iov_len)
100511- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
100512- / PAGE_SIZE + 1;
100513- nr_pages = max(nr_pages, nr_pages_iov);
100514- }
100515+ if (iov_len <= 0)
100516+ continue;
100517+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
100518+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
100519+ nr_pages = max(nr_pages, nr_pages_iov);
100520 }
100521
100522 if (nr_pages == 0)
100523@@ -194,6 +195,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
100524 goto free_proc_pages;
100525 }
100526
100527+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
100528+ rc = -EPERM;
100529+ goto put_task_struct;
100530+ }
100531+
100532 mm = mm_access(task, PTRACE_MODE_ATTACH);
100533 if (!mm || IS_ERR(mm)) {
100534 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
100535diff --git a/mm/rmap.c b/mm/rmap.c
100536index c161a14..8a069bb 100644
100537--- a/mm/rmap.c
100538+++ b/mm/rmap.c
100539@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
100540 struct anon_vma *anon_vma = vma->anon_vma;
100541 struct anon_vma_chain *avc;
100542
100543+#ifdef CONFIG_PAX_SEGMEXEC
100544+ struct anon_vma_chain *avc_m = NULL;
100545+#endif
100546+
100547 might_sleep();
100548 if (unlikely(!anon_vma)) {
100549 struct mm_struct *mm = vma->vm_mm;
100550@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
100551 if (!avc)
100552 goto out_enomem;
100553
100554+#ifdef CONFIG_PAX_SEGMEXEC
100555+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
100556+ if (!avc_m)
100557+ goto out_enomem_free_avc;
100558+#endif
100559+
100560 anon_vma = find_mergeable_anon_vma(vma);
100561 allocated = NULL;
100562 if (!anon_vma) {
100563@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
100564 /* page_table_lock to protect against threads */
100565 spin_lock(&mm->page_table_lock);
100566 if (likely(!vma->anon_vma)) {
100567+
100568+#ifdef CONFIG_PAX_SEGMEXEC
100569+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
100570+
100571+ if (vma_m) {
100572+ BUG_ON(vma_m->anon_vma);
100573+ vma_m->anon_vma = anon_vma;
100574+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
100575+ anon_vma->degree++;
100576+ avc_m = NULL;
100577+ }
100578+#endif
100579+
100580 vma->anon_vma = anon_vma;
100581 anon_vma_chain_link(vma, avc, anon_vma);
100582 /* vma reference or self-parent link for new root */
100583@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
100584
100585 if (unlikely(allocated))
100586 put_anon_vma(allocated);
100587+
100588+#ifdef CONFIG_PAX_SEGMEXEC
100589+ if (unlikely(avc_m))
100590+ anon_vma_chain_free(avc_m);
100591+#endif
100592+
100593 if (unlikely(avc))
100594 anon_vma_chain_free(avc);
100595 }
100596 return 0;
100597
100598 out_enomem_free_avc:
100599+
100600+#ifdef CONFIG_PAX_SEGMEXEC
100601+ if (avc_m)
100602+ anon_vma_chain_free(avc_m);
100603+#endif
100604+
100605 anon_vma_chain_free(avc);
100606 out_enomem:
100607 return -ENOMEM;
100608@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
100609 * good chance of avoiding scanning the whole hierarchy when it searches where
100610 * page is mapped.
100611 */
100612-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
100613+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
100614 {
100615 struct anon_vma_chain *avc, *pavc;
100616 struct anon_vma *root = NULL;
100617@@ -303,7 +338,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
100618 * the corresponding VMA in the parent process is attached to.
100619 * Returns 0 on success, non-zero on failure.
100620 */
100621-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
100622+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
100623 {
100624 struct anon_vma_chain *avc;
100625 struct anon_vma *anon_vma;
100626@@ -423,8 +458,10 @@ static void anon_vma_ctor(void *data)
100627 void __init anon_vma_init(void)
100628 {
100629 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
100630- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
100631- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
100632+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
100633+ anon_vma_ctor);
100634+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
100635+ SLAB_PANIC|SLAB_NO_SANITIZE);
100636 }
100637
100638 /*
100639diff --git a/mm/shmem.c b/mm/shmem.c
100640index cf2d0ca..ec06b8b 100644
100641--- a/mm/shmem.c
100642+++ b/mm/shmem.c
100643@@ -33,7 +33,7 @@
100644 #include <linux/swap.h>
100645 #include <linux/aio.h>
100646
100647-static struct vfsmount *shm_mnt;
100648+struct vfsmount *shm_mnt;
100649
100650 #ifdef CONFIG_SHMEM
100651 /*
100652@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
100653 #define BOGO_DIRENT_SIZE 20
100654
100655 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
100656-#define SHORT_SYMLINK_LEN 128
100657+#define SHORT_SYMLINK_LEN 64
100658
100659 /*
100660 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
100661@@ -2555,6 +2555,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
100662 static int shmem_xattr_validate(const char *name)
100663 {
100664 struct { const char *prefix; size_t len; } arr[] = {
100665+
100666+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
100667+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
100668+#endif
100669+
100670 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
100671 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
100672 };
100673@@ -2610,6 +2615,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
100674 if (err)
100675 return err;
100676
100677+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
100678+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
100679+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
100680+ return -EOPNOTSUPP;
100681+ if (size > 8)
100682+ return -EINVAL;
100683+ }
100684+#endif
100685+
100686 return simple_xattr_set(&info->xattrs, name, value, size, flags);
100687 }
100688
100689@@ -2993,8 +3007,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
100690 int err = -ENOMEM;
100691
100692 /* Round up to L1_CACHE_BYTES to resist false sharing */
100693- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
100694- L1_CACHE_BYTES), GFP_KERNEL);
100695+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
100696 if (!sbinfo)
100697 return -ENOMEM;
100698
100699diff --git a/mm/slab.c b/mm/slab.c
100700index c4b89ea..20990be 100644
100701--- a/mm/slab.c
100702+++ b/mm/slab.c
100703@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
100704 if ((x)->max_freeable < i) \
100705 (x)->max_freeable = i; \
100706 } while (0)
100707-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
100708-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
100709-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
100710-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
100711+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
100712+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
100713+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
100714+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
100715+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
100716+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
100717 #else
100718 #define STATS_INC_ACTIVE(x) do { } while (0)
100719 #define STATS_DEC_ACTIVE(x) do { } while (0)
100720@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
100721 #define STATS_INC_ALLOCMISS(x) do { } while (0)
100722 #define STATS_INC_FREEHIT(x) do { } while (0)
100723 #define STATS_INC_FREEMISS(x) do { } while (0)
100724+#define STATS_INC_SANITIZED(x) do { } while (0)
100725+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
100726 #endif
100727
100728 #if DEBUG
100729@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
100730 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
100731 */
100732 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
100733- const struct page *page, void *obj)
100734+ const struct page *page, const void *obj)
100735 {
100736 u32 offset = (obj - page->s_mem);
100737 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
100738@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
100739 * structures first. Without this, further allocations will bug.
100740 */
100741 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
100742- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
100743+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
100744 slab_state = PARTIAL_NODE;
100745
100746 slab_early_init = 0;
100747@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
100748
100749 cachep = find_mergeable(size, align, flags, name, ctor);
100750 if (cachep) {
100751- cachep->refcount++;
100752+ atomic_inc(&cachep->refcount);
100753
100754 /*
100755 * Adjust the object sizes so that we clear
100756@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
100757 struct array_cache *ac = cpu_cache_get(cachep);
100758
100759 check_irq_off();
100760+
100761+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100762+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
100763+ STATS_INC_NOT_SANITIZED(cachep);
100764+ else {
100765+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
100766+
100767+ if (cachep->ctor)
100768+ cachep->ctor(objp);
100769+
100770+ STATS_INC_SANITIZED(cachep);
100771+ }
100772+#endif
100773+
100774 kmemleak_free_recursive(objp, cachep->flags);
100775 objp = cache_free_debugcheck(cachep, objp, caller);
100776
100777@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
100778 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
100779 }
100780
100781-void *__kmalloc_node(size_t size, gfp_t flags, int node)
100782+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
100783 {
100784 return __do_kmalloc_node(size, flags, node, _RET_IP_);
100785 }
100786@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
100787 * @flags: the type of memory to allocate (see kmalloc).
100788 * @caller: function caller for debug tracking of the caller
100789 */
100790-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
100791+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
100792 unsigned long caller)
100793 {
100794 struct kmem_cache *cachep;
100795@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
100796
100797 if (unlikely(ZERO_OR_NULL_PTR(objp)))
100798 return;
100799+ VM_BUG_ON(!virt_addr_valid(objp));
100800 local_irq_save(flags);
100801 kfree_debugcheck(objp);
100802 c = virt_to_cache(objp);
100803@@ -3981,14 +4000,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
100804 }
100805 /* cpu stats */
100806 {
100807- unsigned long allochit = atomic_read(&cachep->allochit);
100808- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
100809- unsigned long freehit = atomic_read(&cachep->freehit);
100810- unsigned long freemiss = atomic_read(&cachep->freemiss);
100811+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
100812+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
100813+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
100814+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
100815
100816 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
100817 allochit, allocmiss, freehit, freemiss);
100818 }
100819+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100820+ {
100821+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
100822+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
100823+
100824+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
100825+ }
100826+#endif
100827 #endif
100828 }
100829
100830@@ -4196,13 +4223,69 @@ static const struct file_operations proc_slabstats_operations = {
100831 static int __init slab_proc_init(void)
100832 {
100833 #ifdef CONFIG_DEBUG_SLAB_LEAK
100834- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
100835+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
100836 #endif
100837 return 0;
100838 }
100839 module_init(slab_proc_init);
100840 #endif
100841
100842+bool is_usercopy_object(const void *ptr)
100843+{
100844+ struct page *page;
100845+ struct kmem_cache *cachep;
100846+
100847+ if (ZERO_OR_NULL_PTR(ptr))
100848+ return false;
100849+
100850+ if (!slab_is_available())
100851+ return false;
100852+
100853+ if (!virt_addr_valid(ptr))
100854+ return false;
100855+
100856+ page = virt_to_head_page(ptr);
100857+
100858+ if (!PageSlab(page))
100859+ return false;
100860+
100861+ cachep = page->slab_cache;
100862+ return cachep->flags & SLAB_USERCOPY;
100863+}
100864+
100865+#ifdef CONFIG_PAX_USERCOPY
100866+const char *check_heap_object(const void *ptr, unsigned long n)
100867+{
100868+ struct page *page;
100869+ struct kmem_cache *cachep;
100870+ unsigned int objnr;
100871+ unsigned long offset;
100872+
100873+ if (ZERO_OR_NULL_PTR(ptr))
100874+ return "<null>";
100875+
100876+ if (!virt_addr_valid(ptr))
100877+ return NULL;
100878+
100879+ page = virt_to_head_page(ptr);
100880+
100881+ if (!PageSlab(page))
100882+ return NULL;
100883+
100884+ cachep = page->slab_cache;
100885+ if (!(cachep->flags & SLAB_USERCOPY))
100886+ return cachep->name;
100887+
100888+ objnr = obj_to_index(cachep, page, ptr);
100889+ BUG_ON(objnr >= cachep->num);
100890+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
100891+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
100892+ return NULL;
100893+
100894+ return cachep->name;
100895+}
100896+#endif
100897+
100898 /**
100899 * ksize - get the actual amount of memory allocated for a given object
100900 * @objp: Pointer to the object
100901diff --git a/mm/slab.h b/mm/slab.h
100902index 4c3ac12..7b2e470 100644
100903--- a/mm/slab.h
100904+++ b/mm/slab.h
100905@@ -22,7 +22,7 @@ struct kmem_cache {
100906 unsigned int align; /* Alignment as calculated */
100907 unsigned long flags; /* Active flags on the slab */
100908 const char *name; /* Slab name for sysfs */
100909- int refcount; /* Use counter */
100910+ atomic_t refcount; /* Use counter */
100911 void (*ctor)(void *); /* Called on object slot creation */
100912 struct list_head list; /* List of all slab caches on the system */
100913 };
100914@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
100915 /* The slab cache that manages slab cache information */
100916 extern struct kmem_cache *kmem_cache;
100917
100918+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100919+#ifdef CONFIG_X86_64
100920+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
100921+#else
100922+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
100923+#endif
100924+enum pax_sanitize_mode {
100925+ PAX_SANITIZE_SLAB_OFF = 0,
100926+ PAX_SANITIZE_SLAB_FAST,
100927+ PAX_SANITIZE_SLAB_FULL,
100928+};
100929+extern enum pax_sanitize_mode pax_sanitize_slab;
100930+#endif
100931+
100932 unsigned long calculate_alignment(unsigned long flags,
100933 unsigned long align, unsigned long size);
100934
100935@@ -114,7 +128,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
100936
100937 /* Legal flag mask for kmem_cache_create(), for various configurations */
100938 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
100939- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
100940+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
100941+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
100942
100943 #if defined(CONFIG_DEBUG_SLAB)
100944 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
100945@@ -315,6 +330,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
100946 return s;
100947
100948 page = virt_to_head_page(x);
100949+
100950+ BUG_ON(!PageSlab(page));
100951+
100952 cachep = page->slab_cache;
100953 if (slab_equal_or_root(cachep, s))
100954 return cachep;
100955diff --git a/mm/slab_common.c b/mm/slab_common.c
100956index 999bb34..9843aea 100644
100957--- a/mm/slab_common.c
100958+++ b/mm/slab_common.c
100959@@ -25,11 +25,35 @@
100960
100961 #include "slab.h"
100962
100963-enum slab_state slab_state;
100964+enum slab_state slab_state __read_only;
100965 LIST_HEAD(slab_caches);
100966 DEFINE_MUTEX(slab_mutex);
100967 struct kmem_cache *kmem_cache;
100968
100969+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100970+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
100971+static int __init pax_sanitize_slab_setup(char *str)
100972+{
100973+ if (!str)
100974+ return 0;
100975+
100976+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
100977+ pr_info("PaX slab sanitization: %s\n", "disabled");
100978+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
100979+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
100980+ pr_info("PaX slab sanitization: %s\n", "fast");
100981+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
100982+ } else if (!strcmp(str, "full")) {
100983+ pr_info("PaX slab sanitization: %s\n", "full");
100984+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
100985+ } else
100986+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
100987+
100988+ return 0;
100989+}
100990+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
100991+#endif
100992+
100993 /*
100994 * Set of flags that will prevent slab merging
100995 */
100996@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
100997 * Merge control. If this is set then no merging of slab caches will occur.
100998 * (Could be removed. This was introduced to pacify the merge skeptics.)
100999 */
101000-static int slab_nomerge;
101001+static int slab_nomerge = 1;
101002
101003 static int __init setup_slab_nomerge(char *str)
101004 {
101005@@ -217,7 +241,7 @@ int slab_unmergeable(struct kmem_cache *s)
101006 /*
101007 * We may have set a slab to be unmergeable during bootstrap.
101008 */
101009- if (s->refcount < 0)
101010+ if (atomic_read(&s->refcount) < 0)
101011 return 1;
101012
101013 return 0;
101014@@ -321,7 +345,7 @@ do_kmem_cache_create(const char *name, size_t object_size, size_t size,
101015 if (err)
101016 goto out_free_cache;
101017
101018- s->refcount = 1;
101019+ atomic_set(&s->refcount, 1);
101020 list_add(&s->list, &slab_caches);
101021 out:
101022 if (err)
101023@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
101024 */
101025 flags &= CACHE_CREATE_MASK;
101026
101027+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101028+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
101029+ flags |= SLAB_NO_SANITIZE;
101030+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
101031+ flags &= ~SLAB_NO_SANITIZE;
101032+#endif
101033+
101034 s = __kmem_cache_alias(name, size, align, flags, ctor);
101035 if (s)
101036 goto out_unlock;
101037@@ -456,7 +487,7 @@ static void do_kmem_cache_release(struct list_head *release,
101038 rcu_barrier();
101039
101040 list_for_each_entry_safe(s, s2, release, list) {
101041-#ifdef SLAB_SUPPORTS_SYSFS
101042+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
101043 sysfs_slab_remove(s);
101044 #else
101045 slab_kmem_cache_release(s);
101046@@ -625,8 +656,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
101047
101048 mutex_lock(&slab_mutex);
101049
101050- s->refcount--;
101051- if (s->refcount)
101052+ if (!atomic_dec_and_test(&s->refcount))
101053 goto out_unlock;
101054
101055 for_each_memcg_cache_safe(c, c2, s) {
101056@@ -691,7 +721,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
101057 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
101058 name, size, err);
101059
101060- s->refcount = -1; /* Exempt from merging for now */
101061+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
101062 }
101063
101064 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
101065@@ -704,7 +734,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
101066
101067 create_boot_cache(s, name, size, flags);
101068 list_add(&s->list, &slab_caches);
101069- s->refcount = 1;
101070+ atomic_set(&s->refcount, 1);
101071 return s;
101072 }
101073
101074@@ -716,6 +746,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
101075 EXPORT_SYMBOL(kmalloc_dma_caches);
101076 #endif
101077
101078+#ifdef CONFIG_PAX_USERCOPY_SLABS
101079+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
101080+EXPORT_SYMBOL(kmalloc_usercopy_caches);
101081+#endif
101082+
101083 /*
101084 * Conversion table for small slabs sizes / 8 to the index in the
101085 * kmalloc array. This is necessary for slabs < 192 since we have non power
101086@@ -780,6 +815,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
101087 return kmalloc_dma_caches[index];
101088
101089 #endif
101090+
101091+#ifdef CONFIG_PAX_USERCOPY_SLABS
101092+ if (unlikely((flags & GFP_USERCOPY)))
101093+ return kmalloc_usercopy_caches[index];
101094+
101095+#endif
101096+
101097 return kmalloc_caches[index];
101098 }
101099
101100@@ -836,7 +878,7 @@ void __init create_kmalloc_caches(unsigned long flags)
101101 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
101102 if (!kmalloc_caches[i]) {
101103 kmalloc_caches[i] = create_kmalloc_cache(NULL,
101104- 1 << i, flags);
101105+ 1 << i, SLAB_USERCOPY | flags);
101106 }
101107
101108 /*
101109@@ -845,10 +887,10 @@ void __init create_kmalloc_caches(unsigned long flags)
101110 * earlier power of two caches
101111 */
101112 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
101113- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
101114+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
101115
101116 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
101117- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
101118+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
101119 }
101120
101121 /* Kmalloc array is now usable */
101122@@ -881,6 +923,23 @@ void __init create_kmalloc_caches(unsigned long flags)
101123 }
101124 }
101125 #endif
101126+
101127+#ifdef CONFIG_PAX_USERCOPY_SLABS
101128+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
101129+ struct kmem_cache *s = kmalloc_caches[i];
101130+
101131+ if (s) {
101132+ int size = kmalloc_size(i);
101133+ char *n = kasprintf(GFP_NOWAIT,
101134+ "usercopy-kmalloc-%d", size);
101135+
101136+ BUG_ON(!n);
101137+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
101138+ size, SLAB_USERCOPY | flags);
101139+ }
101140+ }
101141+#endif
101142+
101143 }
101144 #endif /* !CONFIG_SLOB */
101145
101146@@ -940,6 +999,9 @@ static void print_slabinfo_header(struct seq_file *m)
101147 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
101148 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
101149 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
101150+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101151+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
101152+#endif
101153 #endif
101154 seq_putc(m, '\n');
101155 }
101156@@ -1069,7 +1131,7 @@ static int __init slab_proc_init(void)
101157 module_init(slab_proc_init);
101158 #endif /* CONFIG_SLABINFO */
101159
101160-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
101161+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
101162 gfp_t flags)
101163 {
101164 void *ret;
101165diff --git a/mm/slob.c b/mm/slob.c
101166index 94a7fed..cf3fb1a 100644
101167--- a/mm/slob.c
101168+++ b/mm/slob.c
101169@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
101170 /*
101171 * Return the size of a slob block.
101172 */
101173-static slobidx_t slob_units(slob_t *s)
101174+static slobidx_t slob_units(const slob_t *s)
101175 {
101176 if (s->units > 0)
101177 return s->units;
101178@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
101179 /*
101180 * Return the next free slob block pointer after this one.
101181 */
101182-static slob_t *slob_next(slob_t *s)
101183+static slob_t *slob_next(const slob_t *s)
101184 {
101185 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
101186 slobidx_t next;
101187@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
101188 /*
101189 * Returns true if s is the last free block in its page.
101190 */
101191-static int slob_last(slob_t *s)
101192+static int slob_last(const slob_t *s)
101193 {
101194 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
101195 }
101196
101197-static void *slob_new_pages(gfp_t gfp, int order, int node)
101198+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
101199 {
101200- void *page;
101201+ struct page *page;
101202
101203 #ifdef CONFIG_NUMA
101204 if (node != NUMA_NO_NODE)
101205@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
101206 if (!page)
101207 return NULL;
101208
101209- return page_address(page);
101210+ __SetPageSlab(page);
101211+ return page;
101212 }
101213
101214-static void slob_free_pages(void *b, int order)
101215+static void slob_free_pages(struct page *sp, int order)
101216 {
101217 if (current->reclaim_state)
101218 current->reclaim_state->reclaimed_slab += 1 << order;
101219- free_pages((unsigned long)b, order);
101220+ __ClearPageSlab(sp);
101221+ page_mapcount_reset(sp);
101222+ sp->private = 0;
101223+ __free_pages(sp, order);
101224 }
101225
101226 /*
101227@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
101228
101229 /* Not enough space: must allocate a new page */
101230 if (!b) {
101231- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
101232- if (!b)
101233+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
101234+ if (!sp)
101235 return NULL;
101236- sp = virt_to_page(b);
101237- __SetPageSlab(sp);
101238+ b = page_address(sp);
101239
101240 spin_lock_irqsave(&slob_lock, flags);
101241 sp->units = SLOB_UNITS(PAGE_SIZE);
101242 sp->freelist = b;
101243+ sp->private = 0;
101244 INIT_LIST_HEAD(&sp->lru);
101245 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
101246 set_slob_page_free(sp, slob_list);
101247@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
101248 /*
101249 * slob_free: entry point into the slob allocator.
101250 */
101251-static void slob_free(void *block, int size)
101252+static void slob_free(struct kmem_cache *c, void *block, int size)
101253 {
101254 struct page *sp;
101255 slob_t *prev, *next, *b = (slob_t *)block;
101256@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
101257 if (slob_page_free(sp))
101258 clear_slob_page_free(sp);
101259 spin_unlock_irqrestore(&slob_lock, flags);
101260- __ClearPageSlab(sp);
101261- page_mapcount_reset(sp);
101262- slob_free_pages(b, 0);
101263+ slob_free_pages(sp, 0);
101264 return;
101265 }
101266
101267+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101268+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
101269+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
101270+#endif
101271+
101272 if (!slob_page_free(sp)) {
101273 /* This slob page is about to become partially free. Easy! */
101274 sp->units = units;
101275@@ -424,11 +431,10 @@ out:
101276 */
101277
101278 static __always_inline void *
101279-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
101280+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
101281 {
101282- unsigned int *m;
101283- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
101284- void *ret;
101285+ slob_t *m;
101286+ void *ret = NULL;
101287
101288 gfp &= gfp_allowed_mask;
101289
101290@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
101291
101292 if (!m)
101293 return NULL;
101294- *m = size;
101295+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
101296+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
101297+ m[0].units = size;
101298+ m[1].units = align;
101299 ret = (void *)m + align;
101300
101301 trace_kmalloc_node(caller, ret,
101302 size, size + align, gfp, node);
101303 } else {
101304 unsigned int order = get_order(size);
101305+ struct page *page;
101306
101307 if (likely(order))
101308 gfp |= __GFP_COMP;
101309- ret = slob_new_pages(gfp, order, node);
101310+ page = slob_new_pages(gfp, order, node);
101311+ if (page) {
101312+ ret = page_address(page);
101313+ page->private = size;
101314+ }
101315
101316 trace_kmalloc_node(caller, ret,
101317 size, PAGE_SIZE << order, gfp, node);
101318 }
101319
101320- kmemleak_alloc(ret, size, 1, gfp);
101321 return ret;
101322 }
101323
101324-void *__kmalloc(size_t size, gfp_t gfp)
101325+static __always_inline void *
101326+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
101327+{
101328+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
101329+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
101330+
101331+ if (!ZERO_OR_NULL_PTR(ret))
101332+ kmemleak_alloc(ret, size, 1, gfp);
101333+ return ret;
101334+}
101335+
101336+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
101337 {
101338 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
101339 }
101340@@ -491,34 +515,112 @@ void kfree(const void *block)
101341 return;
101342 kmemleak_free(block);
101343
101344+ VM_BUG_ON(!virt_addr_valid(block));
101345 sp = virt_to_page(block);
101346- if (PageSlab(sp)) {
101347+ VM_BUG_ON(!PageSlab(sp));
101348+ if (!sp->private) {
101349 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
101350- unsigned int *m = (unsigned int *)(block - align);
101351- slob_free(m, *m + align);
101352- } else
101353+ slob_t *m = (slob_t *)(block - align);
101354+ slob_free(NULL, m, m[0].units + align);
101355+ } else {
101356+ __ClearPageSlab(sp);
101357+ page_mapcount_reset(sp);
101358+ sp->private = 0;
101359 __free_pages(sp, compound_order(sp));
101360+ }
101361 }
101362 EXPORT_SYMBOL(kfree);
101363
101364+bool is_usercopy_object(const void *ptr)
101365+{
101366+ if (!slab_is_available())
101367+ return false;
101368+
101369+ // PAX: TODO
101370+
101371+ return false;
101372+}
101373+
101374+#ifdef CONFIG_PAX_USERCOPY
101375+const char *check_heap_object(const void *ptr, unsigned long n)
101376+{
101377+ struct page *page;
101378+ const slob_t *free;
101379+ const void *base;
101380+ unsigned long flags;
101381+
101382+ if (ZERO_OR_NULL_PTR(ptr))
101383+ return "<null>";
101384+
101385+ if (!virt_addr_valid(ptr))
101386+ return NULL;
101387+
101388+ page = virt_to_head_page(ptr);
101389+ if (!PageSlab(page))
101390+ return NULL;
101391+
101392+ if (page->private) {
101393+ base = page;
101394+ if (base <= ptr && n <= page->private - (ptr - base))
101395+ return NULL;
101396+ return "<slob>";
101397+ }
101398+
101399+ /* some tricky double walking to find the chunk */
101400+ spin_lock_irqsave(&slob_lock, flags);
101401+ base = (void *)((unsigned long)ptr & PAGE_MASK);
101402+ free = page->freelist;
101403+
101404+ while (!slob_last(free) && (void *)free <= ptr) {
101405+ base = free + slob_units(free);
101406+ free = slob_next(free);
101407+ }
101408+
101409+ while (base < (void *)free) {
101410+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
101411+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
101412+ int offset;
101413+
101414+ if (ptr < base + align)
101415+ break;
101416+
101417+ offset = ptr - base - align;
101418+ if (offset >= m) {
101419+ base += size;
101420+ continue;
101421+ }
101422+
101423+ if (n > m - offset)
101424+ break;
101425+
101426+ spin_unlock_irqrestore(&slob_lock, flags);
101427+ return NULL;
101428+ }
101429+
101430+ spin_unlock_irqrestore(&slob_lock, flags);
101431+ return "<slob>";
101432+}
101433+#endif
101434+
101435 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
101436 size_t ksize(const void *block)
101437 {
101438 struct page *sp;
101439 int align;
101440- unsigned int *m;
101441+ slob_t *m;
101442
101443 BUG_ON(!block);
101444 if (unlikely(block == ZERO_SIZE_PTR))
101445 return 0;
101446
101447 sp = virt_to_page(block);
101448- if (unlikely(!PageSlab(sp)))
101449- return PAGE_SIZE << compound_order(sp);
101450+ VM_BUG_ON(!PageSlab(sp));
101451+ if (sp->private)
101452+ return sp->private;
101453
101454 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
101455- m = (unsigned int *)(block - align);
101456- return SLOB_UNITS(*m) * SLOB_UNIT;
101457+ m = (slob_t *)(block - align);
101458+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
101459 }
101460 EXPORT_SYMBOL(ksize);
101461
101462@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
101463
101464 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
101465 {
101466- void *b;
101467+ void *b = NULL;
101468
101469 flags &= gfp_allowed_mask;
101470
101471 lockdep_trace_alloc(flags);
101472
101473+#ifdef CONFIG_PAX_USERCOPY_SLABS
101474+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
101475+#else
101476 if (c->size < PAGE_SIZE) {
101477 b = slob_alloc(c->size, flags, c->align, node);
101478 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
101479 SLOB_UNITS(c->size) * SLOB_UNIT,
101480 flags, node);
101481 } else {
101482- b = slob_new_pages(flags, get_order(c->size), node);
101483+ struct page *sp;
101484+
101485+ sp = slob_new_pages(flags, get_order(c->size), node);
101486+ if (sp) {
101487+ b = page_address(sp);
101488+ sp->private = c->size;
101489+ }
101490 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
101491 PAGE_SIZE << get_order(c->size),
101492 flags, node);
101493 }
101494+#endif
101495
101496 if (b && c->ctor)
101497 c->ctor(b);
101498@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
101499 EXPORT_SYMBOL(kmem_cache_alloc);
101500
101501 #ifdef CONFIG_NUMA
101502-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
101503+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
101504 {
101505 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
101506 }
101507@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
101508 EXPORT_SYMBOL(kmem_cache_alloc_node);
101509 #endif
101510
101511-static void __kmem_cache_free(void *b, int size)
101512+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
101513 {
101514- if (size < PAGE_SIZE)
101515- slob_free(b, size);
101516+ struct page *sp;
101517+
101518+ sp = virt_to_page(b);
101519+ BUG_ON(!PageSlab(sp));
101520+ if (!sp->private)
101521+ slob_free(c, b, size);
101522 else
101523- slob_free_pages(b, get_order(size));
101524+ slob_free_pages(sp, get_order(size));
101525 }
101526
101527 static void kmem_rcu_free(struct rcu_head *head)
101528@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
101529 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
101530 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
101531
101532- __kmem_cache_free(b, slob_rcu->size);
101533+ __kmem_cache_free(NULL, b, slob_rcu->size);
101534 }
101535
101536 void kmem_cache_free(struct kmem_cache *c, void *b)
101537 {
101538+ int size = c->size;
101539+
101540+#ifdef CONFIG_PAX_USERCOPY_SLABS
101541+ if (size + c->align < PAGE_SIZE) {
101542+ size += c->align;
101543+ b -= c->align;
101544+ }
101545+#endif
101546+
101547 kmemleak_free_recursive(b, c->flags);
101548 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
101549 struct slob_rcu *slob_rcu;
101550- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
101551- slob_rcu->size = c->size;
101552+ slob_rcu = b + (size - sizeof(struct slob_rcu));
101553+ slob_rcu->size = size;
101554 call_rcu(&slob_rcu->head, kmem_rcu_free);
101555 } else {
101556- __kmem_cache_free(b, c->size);
101557+ __kmem_cache_free(c, b, size);
101558 }
101559
101560+#ifdef CONFIG_PAX_USERCOPY_SLABS
101561+ trace_kfree(_RET_IP_, b);
101562+#else
101563 trace_kmem_cache_free(_RET_IP_, b);
101564+#endif
101565+
101566 }
101567 EXPORT_SYMBOL(kmem_cache_free);
101568
101569diff --git a/mm/slub.c b/mm/slub.c
101570index 82c4737..55c316a 100644
101571--- a/mm/slub.c
101572+++ b/mm/slub.c
101573@@ -198,7 +198,7 @@ struct track {
101574
101575 enum track_item { TRACK_ALLOC, TRACK_FREE };
101576
101577-#ifdef CONFIG_SYSFS
101578+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
101579 static int sysfs_slab_add(struct kmem_cache *);
101580 static int sysfs_slab_alias(struct kmem_cache *, const char *);
101581 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
101582@@ -556,7 +556,7 @@ static void print_track(const char *s, struct track *t)
101583 if (!t->addr)
101584 return;
101585
101586- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
101587+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
101588 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
101589 #ifdef CONFIG_STACKTRACE
101590 {
101591@@ -2709,6 +2709,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
101592
101593 slab_free_hook(s, x);
101594
101595+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101596+ if (!(s->flags & SLAB_NO_SANITIZE)) {
101597+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
101598+ if (s->ctor)
101599+ s->ctor(x);
101600+ }
101601+#endif
101602+
101603 redo:
101604 /*
101605 * Determine the currently cpus per cpu slab.
101606@@ -3050,6 +3058,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
101607 s->inuse = size;
101608
101609 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
101610+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101611+ (!(flags & SLAB_NO_SANITIZE)) ||
101612+#endif
101613 s->ctor)) {
101614 /*
101615 * Relocate free pointer after the object if it is not
101616@@ -3304,7 +3315,7 @@ static int __init setup_slub_min_objects(char *str)
101617
101618 __setup("slub_min_objects=", setup_slub_min_objects);
101619
101620-void *__kmalloc(size_t size, gfp_t flags)
101621+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
101622 {
101623 struct kmem_cache *s;
101624 void *ret;
101625@@ -3342,7 +3353,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
101626 return ptr;
101627 }
101628
101629-void *__kmalloc_node(size_t size, gfp_t flags, int node)
101630+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
101631 {
101632 struct kmem_cache *s;
101633 void *ret;
101634@@ -3390,6 +3401,59 @@ static size_t __ksize(const void *object)
101635 return slab_ksize(page->slab_cache);
101636 }
101637
101638+bool is_usercopy_object(const void *ptr)
101639+{
101640+ struct page *page;
101641+ struct kmem_cache *s;
101642+
101643+ if (ZERO_OR_NULL_PTR(ptr))
101644+ return false;
101645+
101646+ if (!slab_is_available())
101647+ return false;
101648+
101649+ if (!virt_addr_valid(ptr))
101650+ return false;
101651+
101652+ page = virt_to_head_page(ptr);
101653+
101654+ if (!PageSlab(page))
101655+ return false;
101656+
101657+ s = page->slab_cache;
101658+ return s->flags & SLAB_USERCOPY;
101659+}
101660+
101661+#ifdef CONFIG_PAX_USERCOPY
101662+const char *check_heap_object(const void *ptr, unsigned long n)
101663+{
101664+ struct page *page;
101665+ struct kmem_cache *s;
101666+ unsigned long offset;
101667+
101668+ if (ZERO_OR_NULL_PTR(ptr))
101669+ return "<null>";
101670+
101671+ if (!virt_addr_valid(ptr))
101672+ return NULL;
101673+
101674+ page = virt_to_head_page(ptr);
101675+
101676+ if (!PageSlab(page))
101677+ return NULL;
101678+
101679+ s = page->slab_cache;
101680+ if (!(s->flags & SLAB_USERCOPY))
101681+ return s->name;
101682+
101683+ offset = (ptr - page_address(page)) % s->size;
101684+ if (offset <= s->object_size && n <= s->object_size - offset)
101685+ return NULL;
101686+
101687+ return s->name;
101688+}
101689+#endif
101690+
101691 size_t ksize(const void *object)
101692 {
101693 size_t size = __ksize(object);
101694@@ -3410,6 +3474,7 @@ void kfree(const void *x)
101695 if (unlikely(ZERO_OR_NULL_PTR(x)))
101696 return;
101697
101698+ VM_BUG_ON(!virt_addr_valid(x));
101699 page = virt_to_head_page(x);
101700 if (unlikely(!PageSlab(page))) {
101701 BUG_ON(!PageCompound(page));
101702@@ -3726,7 +3791,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
101703
101704 s = find_mergeable(size, align, flags, name, ctor);
101705 if (s) {
101706- s->refcount++;
101707+ atomic_inc(&s->refcount);
101708
101709 /*
101710 * Adjust the object sizes so that we clear
101711@@ -3742,7 +3807,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
101712 }
101713
101714 if (sysfs_slab_alias(s, name)) {
101715- s->refcount--;
101716+ atomic_dec(&s->refcount);
101717 s = NULL;
101718 }
101719 }
101720@@ -3859,7 +3924,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
101721 }
101722 #endif
101723
101724-#ifdef CONFIG_SYSFS
101725+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
101726 static int count_inuse(struct page *page)
101727 {
101728 return page->inuse;
101729@@ -4140,7 +4205,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
101730 len += sprintf(buf + len, "%7ld ", l->count);
101731
101732 if (l->addr)
101733+#ifdef CONFIG_GRKERNSEC_HIDESYM
101734+ len += sprintf(buf + len, "%pS", NULL);
101735+#else
101736 len += sprintf(buf + len, "%pS", (void *)l->addr);
101737+#endif
101738 else
101739 len += sprintf(buf + len, "<not-available>");
101740
101741@@ -4238,12 +4307,12 @@ static void __init resiliency_test(void)
101742 validate_slab_cache(kmalloc_caches[9]);
101743 }
101744 #else
101745-#ifdef CONFIG_SYSFS
101746+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
101747 static void resiliency_test(void) {};
101748 #endif
101749 #endif
101750
101751-#ifdef CONFIG_SYSFS
101752+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
101753 enum slab_stat_type {
101754 SL_ALL, /* All slabs */
101755 SL_PARTIAL, /* Only partially allocated slabs */
101756@@ -4480,13 +4549,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
101757 {
101758 if (!s->ctor)
101759 return 0;
101760+#ifdef CONFIG_GRKERNSEC_HIDESYM
101761+ return sprintf(buf, "%pS\n", NULL);
101762+#else
101763 return sprintf(buf, "%pS\n", s->ctor);
101764+#endif
101765 }
101766 SLAB_ATTR_RO(ctor);
101767
101768 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
101769 {
101770- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
101771+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
101772 }
101773 SLAB_ATTR_RO(aliases);
101774
101775@@ -4574,6 +4647,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
101776 SLAB_ATTR_RO(cache_dma);
101777 #endif
101778
101779+#ifdef CONFIG_PAX_USERCOPY_SLABS
101780+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
101781+{
101782+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
101783+}
101784+SLAB_ATTR_RO(usercopy);
101785+#endif
101786+
101787+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101788+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
101789+{
101790+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
101791+}
101792+SLAB_ATTR_RO(sanitize);
101793+#endif
101794+
101795 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
101796 {
101797 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
101798@@ -4629,7 +4718,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
101799 * as well as cause other issues like converting a mergeable
101800 * cache into an umergeable one.
101801 */
101802- if (s->refcount > 1)
101803+ if (atomic_read(&s->refcount) > 1)
101804 return -EINVAL;
101805
101806 s->flags &= ~SLAB_TRACE;
101807@@ -4749,7 +4838,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
101808 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
101809 size_t length)
101810 {
101811- if (s->refcount > 1)
101812+ if (atomic_read(&s->refcount) > 1)
101813 return -EINVAL;
101814
101815 s->flags &= ~SLAB_FAILSLAB;
101816@@ -4916,6 +5005,12 @@ static struct attribute *slab_attrs[] = {
101817 #ifdef CONFIG_ZONE_DMA
101818 &cache_dma_attr.attr,
101819 #endif
101820+#ifdef CONFIG_PAX_USERCOPY_SLABS
101821+ &usercopy_attr.attr,
101822+#endif
101823+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101824+ &sanitize_attr.attr,
101825+#endif
101826 #ifdef CONFIG_NUMA
101827 &remote_node_defrag_ratio_attr.attr,
101828 #endif
101829@@ -5157,6 +5252,7 @@ static char *create_unique_id(struct kmem_cache *s)
101830 return name;
101831 }
101832
101833+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
101834 static int sysfs_slab_add(struct kmem_cache *s)
101835 {
101836 int err;
101837@@ -5230,6 +5326,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
101838 kobject_del(&s->kobj);
101839 kobject_put(&s->kobj);
101840 }
101841+#endif
101842
101843 /*
101844 * Need to buffer aliases during bootup until sysfs becomes
101845@@ -5243,6 +5340,7 @@ struct saved_alias {
101846
101847 static struct saved_alias *alias_list;
101848
101849+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
101850 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
101851 {
101852 struct saved_alias *al;
101853@@ -5265,6 +5363,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
101854 alias_list = al;
101855 return 0;
101856 }
101857+#endif
101858
101859 static int __init slab_sysfs_init(void)
101860 {
101861diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
101862index 4cba9c2..b4f9fcc 100644
101863--- a/mm/sparse-vmemmap.c
101864+++ b/mm/sparse-vmemmap.c
101865@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
101866 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
101867 if (!p)
101868 return NULL;
101869- pud_populate(&init_mm, pud, p);
101870+ pud_populate_kernel(&init_mm, pud, p);
101871 }
101872 return pud;
101873 }
101874@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
101875 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
101876 if (!p)
101877 return NULL;
101878- pgd_populate(&init_mm, pgd, p);
101879+ pgd_populate_kernel(&init_mm, pgd, p);
101880 }
101881 return pgd;
101882 }
101883diff --git a/mm/sparse.c b/mm/sparse.c
101884index d1b48b6..6e8590e 100644
101885--- a/mm/sparse.c
101886+++ b/mm/sparse.c
101887@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
101888
101889 for (i = 0; i < PAGES_PER_SECTION; i++) {
101890 if (PageHWPoison(&memmap[i])) {
101891- atomic_long_sub(1, &num_poisoned_pages);
101892+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
101893 ClearPageHWPoison(&memmap[i]);
101894 }
101895 }
101896diff --git a/mm/swap.c b/mm/swap.c
101897index cd3a5e6..40c0c8f 100644
101898--- a/mm/swap.c
101899+++ b/mm/swap.c
101900@@ -31,6 +31,7 @@
101901 #include <linux/memcontrol.h>
101902 #include <linux/gfp.h>
101903 #include <linux/uio.h>
101904+#include <linux/hugetlb.h>
101905
101906 #include "internal.h"
101907
101908@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
101909
101910 __page_cache_release(page);
101911 dtor = get_compound_page_dtor(page);
101912+ if (!PageHuge(page))
101913+ BUG_ON(dtor != free_compound_page);
101914 (*dtor)(page);
101915 }
101916
101917diff --git a/mm/swapfile.c b/mm/swapfile.c
101918index 63f55cc..31874e6 100644
101919--- a/mm/swapfile.c
101920+++ b/mm/swapfile.c
101921@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
101922
101923 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
101924 /* Activity counter to indicate that a swapon or swapoff has occurred */
101925-static atomic_t proc_poll_event = ATOMIC_INIT(0);
101926+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
101927
101928 static inline unsigned char swap_count(unsigned char ent)
101929 {
101930@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
101931 spin_unlock(&swap_lock);
101932
101933 err = 0;
101934- atomic_inc(&proc_poll_event);
101935+ atomic_inc_unchecked(&proc_poll_event);
101936 wake_up_interruptible(&proc_poll_wait);
101937
101938 out_dput:
101939@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
101940
101941 poll_wait(file, &proc_poll_wait, wait);
101942
101943- if (seq->poll_event != atomic_read(&proc_poll_event)) {
101944- seq->poll_event = atomic_read(&proc_poll_event);
101945+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
101946+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
101947 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
101948 }
101949
101950@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
101951 return ret;
101952
101953 seq = file->private_data;
101954- seq->poll_event = atomic_read(&proc_poll_event);
101955+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
101956 return 0;
101957 }
101958
101959@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
101960 (frontswap_map) ? "FS" : "");
101961
101962 mutex_unlock(&swapon_mutex);
101963- atomic_inc(&proc_poll_event);
101964+ atomic_inc_unchecked(&proc_poll_event);
101965 wake_up_interruptible(&proc_poll_wait);
101966
101967 if (S_ISREG(inode->i_mode))
101968diff --git a/mm/util.c b/mm/util.c
101969index 3981ae9..28b585b 100644
101970--- a/mm/util.c
101971+++ b/mm/util.c
101972@@ -233,6 +233,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
101973 void arch_pick_mmap_layout(struct mm_struct *mm)
101974 {
101975 mm->mmap_base = TASK_UNMAPPED_BASE;
101976+
101977+#ifdef CONFIG_PAX_RANDMMAP
101978+ if (mm->pax_flags & MF_PAX_RANDMMAP)
101979+ mm->mmap_base += mm->delta_mmap;
101980+#endif
101981+
101982 mm->get_unmapped_area = arch_get_unmapped_area;
101983 }
101984 #endif
101985@@ -403,6 +409,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
101986 if (!mm->arg_end)
101987 goto out_mm; /* Shh! No looking before we're done */
101988
101989+ if (gr_acl_handle_procpidmem(task))
101990+ goto out_mm;
101991+
101992 len = mm->arg_end - mm->arg_start;
101993
101994 if (len > buflen)
101995diff --git a/mm/vmalloc.c b/mm/vmalloc.c
101996index 49abccf..7bd1931 100644
101997--- a/mm/vmalloc.c
101998+++ b/mm/vmalloc.c
101999@@ -39,20 +39,65 @@ struct vfree_deferred {
102000 struct work_struct wq;
102001 };
102002 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
102003+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
102004+
102005+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
102006+struct stack_deferred_llist {
102007+ struct llist_head list;
102008+ void *stack;
102009+ void *lowmem_stack;
102010+};
102011+
102012+struct stack_deferred {
102013+ struct stack_deferred_llist list;
102014+ struct work_struct wq;
102015+};
102016+
102017+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
102018+#endif
102019
102020 static void __vunmap(const void *, int);
102021
102022-static void free_work(struct work_struct *w)
102023+static void vfree_work(struct work_struct *w)
102024 {
102025 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
102026 struct llist_node *llnode = llist_del_all(&p->list);
102027 while (llnode) {
102028- void *p = llnode;
102029+ void *x = llnode;
102030 llnode = llist_next(llnode);
102031- __vunmap(p, 1);
102032+ __vunmap(x, 1);
102033 }
102034 }
102035
102036+static void vunmap_work(struct work_struct *w)
102037+{
102038+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
102039+ struct llist_node *llnode = llist_del_all(&p->list);
102040+ while (llnode) {
102041+ void *x = llnode;
102042+ llnode = llist_next(llnode);
102043+ __vunmap(x, 0);
102044+ }
102045+}
102046+
102047+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
102048+static void unmap_work(struct work_struct *w)
102049+{
102050+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
102051+ struct llist_node *llnode = llist_del_all(&p->list.list);
102052+ while (llnode) {
102053+ struct stack_deferred_llist *x =
102054+ llist_entry((struct llist_head *)llnode,
102055+ struct stack_deferred_llist, list);
102056+ void *stack = ACCESS_ONCE(x->stack);
102057+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
102058+ llnode = llist_next(llnode);
102059+ __vunmap(stack, 0);
102060+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
102061+ }
102062+}
102063+#endif
102064+
102065 /*** Page table manipulation functions ***/
102066
102067 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
102068@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
102069
102070 pte = pte_offset_kernel(pmd, addr);
102071 do {
102072- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
102073- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
102074+
102075+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
102076+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
102077+ BUG_ON(!pte_exec(*pte));
102078+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
102079+ continue;
102080+ }
102081+#endif
102082+
102083+ {
102084+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
102085+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
102086+ }
102087 } while (pte++, addr += PAGE_SIZE, addr != end);
102088 }
102089
102090@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
102091 pte = pte_alloc_kernel(pmd, addr);
102092 if (!pte)
102093 return -ENOMEM;
102094+
102095+ pax_open_kernel();
102096 do {
102097 struct page *page = pages[*nr];
102098
102099- if (WARN_ON(!pte_none(*pte)))
102100+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
102101+ if (pgprot_val(prot) & _PAGE_NX)
102102+#endif
102103+
102104+ if (!pte_none(*pte)) {
102105+ pax_close_kernel();
102106+ WARN_ON(1);
102107 return -EBUSY;
102108- if (WARN_ON(!page))
102109+ }
102110+ if (!page) {
102111+ pax_close_kernel();
102112+ WARN_ON(1);
102113 return -ENOMEM;
102114+ }
102115 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
102116 (*nr)++;
102117 } while (pte++, addr += PAGE_SIZE, addr != end);
102118+ pax_close_kernel();
102119 return 0;
102120 }
102121
102122@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
102123 pmd_t *pmd;
102124 unsigned long next;
102125
102126- pmd = pmd_alloc(&init_mm, pud, addr);
102127+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
102128 if (!pmd)
102129 return -ENOMEM;
102130 do {
102131@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
102132 pud_t *pud;
102133 unsigned long next;
102134
102135- pud = pud_alloc(&init_mm, pgd, addr);
102136+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
102137 if (!pud)
102138 return -ENOMEM;
102139 do {
102140@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
102141 if (addr >= MODULES_VADDR && addr < MODULES_END)
102142 return 1;
102143 #endif
102144+
102145+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
102146+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
102147+ return 1;
102148+#endif
102149+
102150 return is_vmalloc_addr(x);
102151 }
102152
102153@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
102154
102155 if (!pgd_none(*pgd)) {
102156 pud_t *pud = pud_offset(pgd, addr);
102157+#ifdef CONFIG_X86
102158+ if (!pud_large(*pud))
102159+#endif
102160 if (!pud_none(*pud)) {
102161 pmd_t *pmd = pmd_offset(pud, addr);
102162+#ifdef CONFIG_X86
102163+ if (!pmd_large(*pmd))
102164+#endif
102165 if (!pmd_none(*pmd)) {
102166 pte_t *ptep, pte;
102167
102168@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
102169 * Allocate a region of KVA of the specified size and alignment, within the
102170 * vstart and vend.
102171 */
102172-static struct vmap_area *alloc_vmap_area(unsigned long size,
102173+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
102174 unsigned long align,
102175 unsigned long vstart, unsigned long vend,
102176 int node, gfp_t gfp_mask)
102177@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
102178 for_each_possible_cpu(i) {
102179 struct vmap_block_queue *vbq;
102180 struct vfree_deferred *p;
102181+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
102182+ struct stack_deferred *p2;
102183+#endif
102184
102185 vbq = &per_cpu(vmap_block_queue, i);
102186 spin_lock_init(&vbq->lock);
102187 INIT_LIST_HEAD(&vbq->free);
102188+
102189 p = &per_cpu(vfree_deferred, i);
102190 init_llist_head(&p->list);
102191- INIT_WORK(&p->wq, free_work);
102192+ INIT_WORK(&p->wq, vfree_work);
102193+
102194+ p = &per_cpu(vunmap_deferred, i);
102195+ init_llist_head(&p->list);
102196+ INIT_WORK(&p->wq, vunmap_work);
102197+
102198+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
102199+ p2 = &per_cpu(stack_deferred, i);
102200+ init_llist_head(&p2->list.list);
102201+ INIT_WORK(&p2->wq, unmap_work);
102202+#endif
102203 }
102204
102205 /* Import existing vmlist entries. */
102206@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
102207 struct vm_struct *area;
102208
102209 BUG_ON(in_interrupt());
102210+
102211+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
102212+ if (flags & VM_KERNEXEC) {
102213+ if (start != VMALLOC_START || end != VMALLOC_END)
102214+ return NULL;
102215+ start = (unsigned long)MODULES_EXEC_VADDR;
102216+ end = (unsigned long)MODULES_EXEC_END;
102217+ }
102218+#endif
102219+
102220 if (flags & VM_IOREMAP)
102221 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
102222
102223@@ -1510,13 +1615,36 @@ EXPORT_SYMBOL(vfree);
102224 */
102225 void vunmap(const void *addr)
102226 {
102227- BUG_ON(in_interrupt());
102228- might_sleep();
102229- if (addr)
102230+ if (!addr)
102231+ return;
102232+ if (unlikely(in_interrupt())) {
102233+ struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
102234+ if (llist_add((struct llist_node *)addr, &p->list))
102235+ schedule_work(&p->wq);
102236+ } else {
102237+ might_sleep();
102238 __vunmap(addr, 0);
102239+ }
102240 }
102241 EXPORT_SYMBOL(vunmap);
102242
102243+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
102244+void unmap_process_stacks(struct task_struct *task)
102245+{
102246+ if (unlikely(in_interrupt())) {
102247+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
102248+ struct stack_deferred_llist *list = task->stack;
102249+ list->stack = task->stack;
102250+ list->lowmem_stack = task->lowmem_stack;
102251+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
102252+ schedule_work(&p->wq);
102253+ } else {
102254+ __vunmap(task->stack, 0);
102255+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
102256+ }
102257+}
102258+#endif
102259+
102260 /**
102261 * vmap - map an array of pages into virtually contiguous space
102262 * @pages: array of page pointers
102263@@ -1537,6 +1665,11 @@ void *vmap(struct page **pages, unsigned int count,
102264 if (count > totalram_pages)
102265 return NULL;
102266
102267+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
102268+ if (!(pgprot_val(prot) & _PAGE_NX))
102269+ flags |= VM_KERNEXEC;
102270+#endif
102271+
102272 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
102273 __builtin_return_address(0));
102274 if (!area)
102275@@ -1641,6 +1774,14 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
102276 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
102277 goto fail;
102278
102279+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
102280+ if (!(pgprot_val(prot) & _PAGE_NX)) {
102281+ vm_flags |= VM_KERNEXEC;
102282+ start = VMALLOC_START;
102283+ end = VMALLOC_END;
102284+ }
102285+#endif
102286+
102287 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
102288 vm_flags, start, end, node, gfp_mask, caller);
102289 if (!area)
102290@@ -1817,10 +1958,9 @@ EXPORT_SYMBOL(vzalloc_node);
102291 * For tight control over page level allocator and protection flags
102292 * use __vmalloc() instead.
102293 */
102294-
102295 void *vmalloc_exec(unsigned long size)
102296 {
102297- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
102298+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
102299 NUMA_NO_NODE, __builtin_return_address(0));
102300 }
102301
102302@@ -2127,6 +2267,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
102303 {
102304 struct vm_struct *area;
102305
102306+ BUG_ON(vma->vm_mirror);
102307+
102308 size = PAGE_ALIGN(size);
102309
102310 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
102311@@ -2609,7 +2751,11 @@ static int s_show(struct seq_file *m, void *p)
102312 v->addr, v->addr + v->size, v->size);
102313
102314 if (v->caller)
102315+#ifdef CONFIG_GRKERNSEC_HIDESYM
102316+ seq_printf(m, " %pK", v->caller);
102317+#else
102318 seq_printf(m, " %pS", v->caller);
102319+#endif
102320
102321 if (v->nr_pages)
102322 seq_printf(m, " pages=%d", v->nr_pages);
102323diff --git a/mm/vmstat.c b/mm/vmstat.c
102324index 4f5cd97..9fb715a 100644
102325--- a/mm/vmstat.c
102326+++ b/mm/vmstat.c
102327@@ -27,6 +27,7 @@
102328 #include <linux/mm_inline.h>
102329 #include <linux/page_ext.h>
102330 #include <linux/page_owner.h>
102331+#include <linux/grsecurity.h>
102332
102333 #include "internal.h"
102334
102335@@ -86,7 +87,7 @@ void vm_events_fold_cpu(int cpu)
102336 *
102337 * vm_stat contains the global counters
102338 */
102339-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
102340+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
102341 EXPORT_SYMBOL(vm_stat);
102342
102343 #ifdef CONFIG_SMP
102344@@ -438,7 +439,7 @@ static int fold_diff(int *diff)
102345
102346 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
102347 if (diff[i]) {
102348- atomic_long_add(diff[i], &vm_stat[i]);
102349+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
102350 changes++;
102351 }
102352 return changes;
102353@@ -476,7 +477,7 @@ static int refresh_cpu_vm_stats(void)
102354 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
102355 if (v) {
102356
102357- atomic_long_add(v, &zone->vm_stat[i]);
102358+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
102359 global_diff[i] += v;
102360 #ifdef CONFIG_NUMA
102361 /* 3 seconds idle till flush */
102362@@ -540,7 +541,7 @@ void cpu_vm_stats_fold(int cpu)
102363
102364 v = p->vm_stat_diff[i];
102365 p->vm_stat_diff[i] = 0;
102366- atomic_long_add(v, &zone->vm_stat[i]);
102367+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
102368 global_diff[i] += v;
102369 }
102370 }
102371@@ -560,8 +561,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
102372 if (pset->vm_stat_diff[i]) {
102373 int v = pset->vm_stat_diff[i];
102374 pset->vm_stat_diff[i] = 0;
102375- atomic_long_add(v, &zone->vm_stat[i]);
102376- atomic_long_add(v, &vm_stat[i]);
102377+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
102378+ atomic_long_add_unchecked(v, &vm_stat[i]);
102379 }
102380 }
102381 #endif
102382@@ -1293,10 +1294,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
102383 stat_items_size += sizeof(struct vm_event_state);
102384 #endif
102385
102386- v = kmalloc(stat_items_size, GFP_KERNEL);
102387+ v = kzalloc(stat_items_size, GFP_KERNEL);
102388 m->private = v;
102389 if (!v)
102390 return ERR_PTR(-ENOMEM);
102391+
102392+#ifdef CONFIG_GRKERNSEC_PROC_ADD
102393+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
102394+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
102395+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
102396+ && !in_group_p(grsec_proc_gid)
102397+#endif
102398+ )
102399+ return (unsigned long *)m->private + *pos;
102400+#endif
102401+#endif
102402+
102403 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
102404 v[i] = global_page_state(i);
102405 v += NR_VM_ZONE_STAT_ITEMS;
102406@@ -1528,10 +1541,16 @@ static int __init setup_vmstat(void)
102407 cpu_notifier_register_done();
102408 #endif
102409 #ifdef CONFIG_PROC_FS
102410- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
102411- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
102412- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
102413- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
102414+ {
102415+ mode_t gr_mode = S_IRUGO;
102416+#ifdef CONFIG_GRKERNSEC_PROC_ADD
102417+ gr_mode = S_IRUSR;
102418+#endif
102419+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
102420+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
102421+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
102422+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
102423+ }
102424 #endif
102425 return 0;
102426 }
102427diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
102428index 64c6bed..b79a5de 100644
102429--- a/net/8021q/vlan.c
102430+++ b/net/8021q/vlan.c
102431@@ -481,7 +481,7 @@ out:
102432 return NOTIFY_DONE;
102433 }
102434
102435-static struct notifier_block vlan_notifier_block __read_mostly = {
102436+static struct notifier_block vlan_notifier_block = {
102437 .notifier_call = vlan_device_event,
102438 };
102439
102440@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
102441 err = -EPERM;
102442 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
102443 break;
102444- if ((args.u.name_type >= 0) &&
102445- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
102446+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
102447 struct vlan_net *vn;
102448
102449 vn = net_generic(net, vlan_net_id);
102450diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
102451index c92b52f..006c052 100644
102452--- a/net/8021q/vlan_netlink.c
102453+++ b/net/8021q/vlan_netlink.c
102454@@ -245,7 +245,7 @@ static struct net *vlan_get_link_net(const struct net_device *dev)
102455 return dev_net(real_dev);
102456 }
102457
102458-struct rtnl_link_ops vlan_link_ops __read_mostly = {
102459+struct rtnl_link_ops vlan_link_ops = {
102460 .kind = "vlan",
102461 .maxtype = IFLA_VLAN_MAX,
102462 .policy = vlan_policy,
102463diff --git a/net/9p/client.c b/net/9p/client.c
102464index e86a9bea..e91f70e 100644
102465--- a/net/9p/client.c
102466+++ b/net/9p/client.c
102467@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
102468 len - inline_len);
102469 } else {
102470 err = copy_from_user(ename + inline_len,
102471- uidata, len - inline_len);
102472+ (char __force_user *)uidata, len - inline_len);
102473 if (err) {
102474 err = -EFAULT;
102475 goto out_err;
102476@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
102477 kernel_buf = 1;
102478 indata = data;
102479 } else
102480- indata = (__force char *)udata;
102481+ indata = (__force_kernel char *)udata;
102482 /*
102483 * response header len is 11
102484 * PDU Header(7) + IO Size (4)
102485@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
102486 kernel_buf = 1;
102487 odata = data;
102488 } else
102489- odata = (char *)udata;
102490+ odata = (char __force_kernel *)udata;
102491 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
102492 P9_ZC_HDR_SZ, kernel_buf, "dqd",
102493 fid->fid, offset, rsize);
102494diff --git a/net/9p/mod.c b/net/9p/mod.c
102495index 6ab36ae..6f1841b 100644
102496--- a/net/9p/mod.c
102497+++ b/net/9p/mod.c
102498@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
102499 void v9fs_register_trans(struct p9_trans_module *m)
102500 {
102501 spin_lock(&v9fs_trans_lock);
102502- list_add_tail(&m->list, &v9fs_trans_list);
102503+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
102504 spin_unlock(&v9fs_trans_lock);
102505 }
102506 EXPORT_SYMBOL(v9fs_register_trans);
102507@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
102508 void v9fs_unregister_trans(struct p9_trans_module *m)
102509 {
102510 spin_lock(&v9fs_trans_lock);
102511- list_del_init(&m->list);
102512+ pax_list_del_init((struct list_head *)&m->list);
102513 spin_unlock(&v9fs_trans_lock);
102514 }
102515 EXPORT_SYMBOL(v9fs_unregister_trans);
102516diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
102517index 80d08f6..de63fd1 100644
102518--- a/net/9p/trans_fd.c
102519+++ b/net/9p/trans_fd.c
102520@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
102521 oldfs = get_fs();
102522 set_fs(get_ds());
102523 /* The cast to a user pointer is valid due to the set_fs() */
102524- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
102525+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
102526 set_fs(oldfs);
102527
102528 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
102529diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
102530index af46bc4..f9adfcd 100644
102531--- a/net/appletalk/atalk_proc.c
102532+++ b/net/appletalk/atalk_proc.c
102533@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
102534 struct proc_dir_entry *p;
102535 int rc = -ENOMEM;
102536
102537- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
102538+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
102539 if (!atalk_proc_dir)
102540 goto out;
102541
102542diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
102543index 876fbe8..8bbea9f 100644
102544--- a/net/atm/atm_misc.c
102545+++ b/net/atm/atm_misc.c
102546@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
102547 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
102548 return 1;
102549 atm_return(vcc, truesize);
102550- atomic_inc(&vcc->stats->rx_drop);
102551+ atomic_inc_unchecked(&vcc->stats->rx_drop);
102552 return 0;
102553 }
102554 EXPORT_SYMBOL(atm_charge);
102555@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
102556 }
102557 }
102558 atm_return(vcc, guess);
102559- atomic_inc(&vcc->stats->rx_drop);
102560+ atomic_inc_unchecked(&vcc->stats->rx_drop);
102561 return NULL;
102562 }
102563 EXPORT_SYMBOL(atm_alloc_charge);
102564@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
102565
102566 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
102567 {
102568-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
102569+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
102570 __SONET_ITEMS
102571 #undef __HANDLE_ITEM
102572 }
102573@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
102574
102575 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
102576 {
102577-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
102578+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
102579 __SONET_ITEMS
102580 #undef __HANDLE_ITEM
102581 }
102582diff --git a/net/atm/lec.c b/net/atm/lec.c
102583index 4b98f89..5a2f6cb 100644
102584--- a/net/atm/lec.c
102585+++ b/net/atm/lec.c
102586@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
102587 }
102588
102589 static struct lane2_ops lane2_ops = {
102590- lane2_resolve, /* resolve, spec 3.1.3 */
102591- lane2_associate_req, /* associate_req, spec 3.1.4 */
102592- NULL /* associate indicator, spec 3.1.5 */
102593+ .resolve = lane2_resolve,
102594+ .associate_req = lane2_associate_req,
102595+ .associate_indicator = NULL
102596 };
102597
102598 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
102599diff --git a/net/atm/lec.h b/net/atm/lec.h
102600index 4149db1..f2ab682 100644
102601--- a/net/atm/lec.h
102602+++ b/net/atm/lec.h
102603@@ -48,7 +48,7 @@ struct lane2_ops {
102604 const u8 *tlvs, u32 sizeoftlvs);
102605 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
102606 const u8 *tlvs, u32 sizeoftlvs);
102607-};
102608+} __no_const;
102609
102610 /*
102611 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
102612diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
102613index d1b2d9a..d549f7f 100644
102614--- a/net/atm/mpoa_caches.c
102615+++ b/net/atm/mpoa_caches.c
102616@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
102617
102618
102619 static struct in_cache_ops ingress_ops = {
102620- in_cache_add_entry, /* add_entry */
102621- in_cache_get, /* get */
102622- in_cache_get_with_mask, /* get_with_mask */
102623- in_cache_get_by_vcc, /* get_by_vcc */
102624- in_cache_put, /* put */
102625- in_cache_remove_entry, /* remove_entry */
102626- cache_hit, /* cache_hit */
102627- clear_count_and_expired, /* clear_count */
102628- check_resolving_entries, /* check_resolving */
102629- refresh_entries, /* refresh */
102630- in_destroy_cache /* destroy_cache */
102631+ .add_entry = in_cache_add_entry,
102632+ .get = in_cache_get,
102633+ .get_with_mask = in_cache_get_with_mask,
102634+ .get_by_vcc = in_cache_get_by_vcc,
102635+ .put = in_cache_put,
102636+ .remove_entry = in_cache_remove_entry,
102637+ .cache_hit = cache_hit,
102638+ .clear_count = clear_count_and_expired,
102639+ .check_resolving = check_resolving_entries,
102640+ .refresh = refresh_entries,
102641+ .destroy_cache = in_destroy_cache
102642 };
102643
102644 static struct eg_cache_ops egress_ops = {
102645- eg_cache_add_entry, /* add_entry */
102646- eg_cache_get_by_cache_id, /* get_by_cache_id */
102647- eg_cache_get_by_tag, /* get_by_tag */
102648- eg_cache_get_by_vcc, /* get_by_vcc */
102649- eg_cache_get_by_src_ip, /* get_by_src_ip */
102650- eg_cache_put, /* put */
102651- eg_cache_remove_entry, /* remove_entry */
102652- update_eg_cache_entry, /* update */
102653- clear_expired, /* clear_expired */
102654- eg_destroy_cache /* destroy_cache */
102655+ .add_entry = eg_cache_add_entry,
102656+ .get_by_cache_id = eg_cache_get_by_cache_id,
102657+ .get_by_tag = eg_cache_get_by_tag,
102658+ .get_by_vcc = eg_cache_get_by_vcc,
102659+ .get_by_src_ip = eg_cache_get_by_src_ip,
102660+ .put = eg_cache_put,
102661+ .remove_entry = eg_cache_remove_entry,
102662+ .update = update_eg_cache_entry,
102663+ .clear_expired = clear_expired,
102664+ .destroy_cache = eg_destroy_cache
102665 };
102666
102667
102668diff --git a/net/atm/proc.c b/net/atm/proc.c
102669index bbb6461..cf04016 100644
102670--- a/net/atm/proc.c
102671+++ b/net/atm/proc.c
102672@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
102673 const struct k_atm_aal_stats *stats)
102674 {
102675 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
102676- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
102677- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
102678- atomic_read(&stats->rx_drop));
102679+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
102680+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
102681+ atomic_read_unchecked(&stats->rx_drop));
102682 }
102683
102684 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
102685diff --git a/net/atm/resources.c b/net/atm/resources.c
102686index 0447d5d..3cf4728 100644
102687--- a/net/atm/resources.c
102688+++ b/net/atm/resources.c
102689@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
102690 static void copy_aal_stats(struct k_atm_aal_stats *from,
102691 struct atm_aal_stats *to)
102692 {
102693-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
102694+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
102695 __AAL_STAT_ITEMS
102696 #undef __HANDLE_ITEM
102697 }
102698@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
102699 static void subtract_aal_stats(struct k_atm_aal_stats *from,
102700 struct atm_aal_stats *to)
102701 {
102702-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
102703+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
102704 __AAL_STAT_ITEMS
102705 #undef __HANDLE_ITEM
102706 }
102707diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
102708index 919a5ce..cc6b444 100644
102709--- a/net/ax25/sysctl_net_ax25.c
102710+++ b/net/ax25/sysctl_net_ax25.c
102711@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
102712 {
102713 char path[sizeof("net/ax25/") + IFNAMSIZ];
102714 int k;
102715- struct ctl_table *table;
102716+ ctl_table_no_const *table;
102717
102718 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
102719 if (!table)
102720diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
102721index 00e00e0..710fcd2 100644
102722--- a/net/batman-adv/bat_iv_ogm.c
102723+++ b/net/batman-adv/bat_iv_ogm.c
102724@@ -312,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
102725
102726 /* randomize initial seqno to avoid collision */
102727 get_random_bytes(&random_seqno, sizeof(random_seqno));
102728- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
102729+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
102730
102731 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
102732 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
102733@@ -917,9 +917,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
102734 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
102735
102736 /* change sequence number to network order */
102737- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
102738+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
102739 batadv_ogm_packet->seqno = htonl(seqno);
102740- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
102741+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
102742
102743 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
102744
102745@@ -1596,7 +1596,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
102746 return;
102747
102748 /* could be changed by schedule_own_packet() */
102749- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
102750+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
102751
102752 if (ogm_packet->flags & BATADV_DIRECTLINK)
102753 has_directlink_flag = true;
102754diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
102755index 3d1dcaa..4699f4e 100644
102756--- a/net/batman-adv/fragmentation.c
102757+++ b/net/batman-adv/fragmentation.c
102758@@ -449,7 +449,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
102759 frag_header.packet_type = BATADV_UNICAST_FRAG;
102760 frag_header.version = BATADV_COMPAT_VERSION;
102761 frag_header.ttl = BATADV_TTL;
102762- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
102763+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
102764 frag_header.reserved = 0;
102765 frag_header.no = 0;
102766 frag_header.total_size = htons(skb->len);
102767diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
102768index 5ec31d7..e371631 100644
102769--- a/net/batman-adv/soft-interface.c
102770+++ b/net/batman-adv/soft-interface.c
102771@@ -295,7 +295,7 @@ send:
102772 primary_if->net_dev->dev_addr);
102773
102774 /* set broadcast sequence number */
102775- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
102776+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
102777 bcast_packet->seqno = htonl(seqno);
102778
102779 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
102780@@ -760,7 +760,7 @@ static int batadv_softif_init_late(struct net_device *dev)
102781 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
102782
102783 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
102784- atomic_set(&bat_priv->bcast_seqno, 1);
102785+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
102786 atomic_set(&bat_priv->tt.vn, 0);
102787 atomic_set(&bat_priv->tt.local_changes, 0);
102788 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
102789@@ -774,7 +774,7 @@ static int batadv_softif_init_late(struct net_device *dev)
102790
102791 /* randomize initial seqno to avoid collision */
102792 get_random_bytes(&random_seqno, sizeof(random_seqno));
102793- atomic_set(&bat_priv->frag_seqno, random_seqno);
102794+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
102795
102796 bat_priv->primary_if = NULL;
102797 bat_priv->num_ifaces = 0;
102798@@ -982,7 +982,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
102799 return 0;
102800 }
102801
102802-struct rtnl_link_ops batadv_link_ops __read_mostly = {
102803+struct rtnl_link_ops batadv_link_ops = {
102804 .kind = "batadv",
102805 .priv_size = sizeof(struct batadv_priv),
102806 .setup = batadv_softif_init_early,
102807diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
102808index 9398c3f..0e79657 100644
102809--- a/net/batman-adv/types.h
102810+++ b/net/batman-adv/types.h
102811@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
102812 struct batadv_hard_iface_bat_iv {
102813 unsigned char *ogm_buff;
102814 int ogm_buff_len;
102815- atomic_t ogm_seqno;
102816+ atomic_unchecked_t ogm_seqno;
102817 };
102818
102819 /**
102820@@ -766,7 +766,7 @@ struct batadv_priv {
102821 atomic_t bonding;
102822 atomic_t fragmentation;
102823 atomic_t packet_size_max;
102824- atomic_t frag_seqno;
102825+ atomic_unchecked_t frag_seqno;
102826 #ifdef CONFIG_BATMAN_ADV_BLA
102827 atomic_t bridge_loop_avoidance;
102828 #endif
102829@@ -785,7 +785,7 @@ struct batadv_priv {
102830 #endif
102831 uint32_t isolation_mark;
102832 uint32_t isolation_mark_mask;
102833- atomic_t bcast_seqno;
102834+ atomic_unchecked_t bcast_seqno;
102835 atomic_t bcast_queue_left;
102836 atomic_t batman_queue_left;
102837 char num_ifaces;
102838diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
102839index 1d65c5b..43e55fd 100644
102840--- a/net/bluetooth/hci_sock.c
102841+++ b/net/bluetooth/hci_sock.c
102842@@ -1042,7 +1042,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
102843 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
102844 }
102845
102846- len = min_t(unsigned int, len, sizeof(uf));
102847+ len = min((size_t)len, sizeof(uf));
102848 if (copy_from_user(&uf, optval, len)) {
102849 err = -EFAULT;
102850 break;
102851diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
102852index 6ba33f9..4afc26f 100644
102853--- a/net/bluetooth/l2cap_core.c
102854+++ b/net/bluetooth/l2cap_core.c
102855@@ -3534,8 +3534,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
102856 break;
102857
102858 case L2CAP_CONF_RFC:
102859- if (olen == sizeof(rfc))
102860- memcpy(&rfc, (void *)val, olen);
102861+ if (olen != sizeof(rfc))
102862+ break;
102863+
102864+ memcpy(&rfc, (void *)val, olen);
102865
102866 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
102867 rfc.mode != chan->mode)
102868diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
102869index 60694f0..32623ed 100644
102870--- a/net/bluetooth/l2cap_sock.c
102871+++ b/net/bluetooth/l2cap_sock.c
102872@@ -633,7 +633,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
102873 struct sock *sk = sock->sk;
102874 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
102875 struct l2cap_options opts;
102876- int len, err = 0;
102877+ int err = 0;
102878+ size_t len = optlen;
102879 u32 opt;
102880
102881 BT_DBG("sk %p", sk);
102882@@ -660,7 +661,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
102883 opts.max_tx = chan->max_tx;
102884 opts.txwin_size = chan->tx_win;
102885
102886- len = min_t(unsigned int, sizeof(opts), optlen);
102887+ len = min(sizeof(opts), len);
102888 if (copy_from_user((char *) &opts, optval, len)) {
102889 err = -EFAULT;
102890 break;
102891@@ -747,7 +748,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
102892 struct bt_security sec;
102893 struct bt_power pwr;
102894 struct l2cap_conn *conn;
102895- int len, err = 0;
102896+ int err = 0;
102897+ size_t len = optlen;
102898 u32 opt;
102899
102900 BT_DBG("sk %p", sk);
102901@@ -771,7 +773,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
102902
102903 sec.level = BT_SECURITY_LOW;
102904
102905- len = min_t(unsigned int, sizeof(sec), optlen);
102906+ len = min(sizeof(sec), len);
102907 if (copy_from_user((char *) &sec, optval, len)) {
102908 err = -EFAULT;
102909 break;
102910@@ -867,7 +869,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
102911
102912 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
102913
102914- len = min_t(unsigned int, sizeof(pwr), optlen);
102915+ len = min(sizeof(pwr), len);
102916 if (copy_from_user((char *) &pwr, optval, len)) {
102917 err = -EFAULT;
102918 break;
102919diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
102920index 3c6d2c8..6afc970 100644
102921--- a/net/bluetooth/rfcomm/sock.c
102922+++ b/net/bluetooth/rfcomm/sock.c
102923@@ -686,7 +686,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
102924 struct sock *sk = sock->sk;
102925 struct bt_security sec;
102926 int err = 0;
102927- size_t len;
102928+ size_t len = optlen;
102929 u32 opt;
102930
102931 BT_DBG("sk %p", sk);
102932@@ -708,7 +708,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
102933
102934 sec.level = BT_SECURITY_LOW;
102935
102936- len = min_t(unsigned int, sizeof(sec), optlen);
102937+ len = min(sizeof(sec), len);
102938 if (copy_from_user((char *) &sec, optval, len)) {
102939 err = -EFAULT;
102940 break;
102941diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
102942index 8e385a0..a5bdd8e 100644
102943--- a/net/bluetooth/rfcomm/tty.c
102944+++ b/net/bluetooth/rfcomm/tty.c
102945@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
102946 BT_DBG("tty %p id %d", tty, tty->index);
102947
102948 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
102949- dev->channel, dev->port.count);
102950+ dev->channel, atomic_read(&dev->port.count));
102951
102952 err = tty_port_open(&dev->port, tty, filp);
102953 if (err)
102954@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
102955 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
102956
102957 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
102958- dev->port.count);
102959+ atomic_read(&dev->port.count));
102960
102961 tty_port_close(&dev->port, tty, filp);
102962 }
102963diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
102964index 4fbcea0..69a6786 100644
102965--- a/net/bridge/br_netlink.c
102966+++ b/net/bridge/br_netlink.c
102967@@ -726,7 +726,7 @@ static struct rtnl_af_ops br_af_ops __read_mostly = {
102968 .get_link_af_size = br_get_link_af_size,
102969 };
102970
102971-struct rtnl_link_ops br_link_ops __read_mostly = {
102972+struct rtnl_link_ops br_link_ops = {
102973 .kind = "bridge",
102974 .priv_size = sizeof(struct net_bridge),
102975 .setup = br_dev_setup,
102976diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
102977index 91180a7..1301daa 100644
102978--- a/net/bridge/netfilter/ebtables.c
102979+++ b/net/bridge/netfilter/ebtables.c
102980@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102981 tmp.valid_hooks = t->table->valid_hooks;
102982 }
102983 mutex_unlock(&ebt_mutex);
102984- if (copy_to_user(user, &tmp, *len) != 0) {
102985+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
102986 BUGPRINT("c2u Didn't work\n");
102987 ret = -EFAULT;
102988 break;
102989@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
102990 goto out;
102991 tmp.valid_hooks = t->valid_hooks;
102992
102993- if (copy_to_user(user, &tmp, *len) != 0) {
102994+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
102995 ret = -EFAULT;
102996 break;
102997 }
102998@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
102999 tmp.entries_size = t->table->entries_size;
103000 tmp.valid_hooks = t->table->valid_hooks;
103001
103002- if (copy_to_user(user, &tmp, *len) != 0) {
103003+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
103004 ret = -EFAULT;
103005 break;
103006 }
103007diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
103008index f5afda1..dcf770a 100644
103009--- a/net/caif/cfctrl.c
103010+++ b/net/caif/cfctrl.c
103011@@ -10,6 +10,7 @@
103012 #include <linux/spinlock.h>
103013 #include <linux/slab.h>
103014 #include <linux/pkt_sched.h>
103015+#include <linux/sched.h>
103016 #include <net/caif/caif_layer.h>
103017 #include <net/caif/cfpkt.h>
103018 #include <net/caif/cfctrl.h>
103019@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
103020 memset(&dev_info, 0, sizeof(dev_info));
103021 dev_info.id = 0xff;
103022 cfsrvl_init(&this->serv, 0, &dev_info, false);
103023- atomic_set(&this->req_seq_no, 1);
103024- atomic_set(&this->rsp_seq_no, 1);
103025+ atomic_set_unchecked(&this->req_seq_no, 1);
103026+ atomic_set_unchecked(&this->rsp_seq_no, 1);
103027 this->serv.layer.receive = cfctrl_recv;
103028 sprintf(this->serv.layer.name, "ctrl");
103029 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
103030@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
103031 struct cfctrl_request_info *req)
103032 {
103033 spin_lock_bh(&ctrl->info_list_lock);
103034- atomic_inc(&ctrl->req_seq_no);
103035- req->sequence_no = atomic_read(&ctrl->req_seq_no);
103036+ atomic_inc_unchecked(&ctrl->req_seq_no);
103037+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
103038 list_add_tail(&req->list, &ctrl->list);
103039 spin_unlock_bh(&ctrl->info_list_lock);
103040 }
103041@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
103042 if (p != first)
103043 pr_warn("Requests are not received in order\n");
103044
103045- atomic_set(&ctrl->rsp_seq_no,
103046+ atomic_set_unchecked(&ctrl->rsp_seq_no,
103047 p->sequence_no);
103048 list_del(&p->list);
103049 goto out;
103050diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
103051index 67a4a36..8d28068 100644
103052--- a/net/caif/chnl_net.c
103053+++ b/net/caif/chnl_net.c
103054@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
103055 };
103056
103057
103058-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
103059+static struct rtnl_link_ops ipcaif_link_ops = {
103060 .kind = "caif",
103061 .priv_size = sizeof(struct chnl_net),
103062 .setup = ipcaif_net_setup,
103063diff --git a/net/can/af_can.c b/net/can/af_can.c
103064index 32d710e..93bcf05 100644
103065--- a/net/can/af_can.c
103066+++ b/net/can/af_can.c
103067@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
103068 };
103069
103070 /* notifier block for netdevice event */
103071-static struct notifier_block can_netdev_notifier __read_mostly = {
103072+static struct notifier_block can_netdev_notifier = {
103073 .notifier_call = can_notifier,
103074 };
103075
103076diff --git a/net/can/bcm.c b/net/can/bcm.c
103077index ee9ffd9..dfdf3d4 100644
103078--- a/net/can/bcm.c
103079+++ b/net/can/bcm.c
103080@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
103081 }
103082
103083 /* create /proc/net/can-bcm directory */
103084- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
103085+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
103086 return 0;
103087 }
103088
103089diff --git a/net/can/gw.c b/net/can/gw.c
103090index a6f448e..5902171 100644
103091--- a/net/can/gw.c
103092+++ b/net/can/gw.c
103093@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
103094 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
103095
103096 static HLIST_HEAD(cgw_list);
103097-static struct notifier_block notifier;
103098
103099 static struct kmem_cache *cgw_cache __read_mostly;
103100
103101@@ -948,6 +947,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
103102 return err;
103103 }
103104
103105+static struct notifier_block notifier = {
103106+ .notifier_call = cgw_notifier
103107+};
103108+
103109 static __init int cgw_module_init(void)
103110 {
103111 /* sanitize given module parameter */
103112@@ -963,7 +966,6 @@ static __init int cgw_module_init(void)
103113 return -ENOMEM;
103114
103115 /* set notifier */
103116- notifier.notifier_call = cgw_notifier;
103117 register_netdevice_notifier(&notifier);
103118
103119 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
103120diff --git a/net/can/proc.c b/net/can/proc.c
103121index 1a19b98..df2b4ec 100644
103122--- a/net/can/proc.c
103123+++ b/net/can/proc.c
103124@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
103125 void can_init_proc(void)
103126 {
103127 /* create /proc/net/can directory */
103128- can_dir = proc_mkdir("can", init_net.proc_net);
103129+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
103130
103131 if (!can_dir) {
103132 printk(KERN_INFO "can: failed to create /proc/net/can . "
103133diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
103134index a9f4ae4..ee19b92 100644
103135--- a/net/ceph/messenger.c
103136+++ b/net/ceph/messenger.c
103137@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
103138 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
103139
103140 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
103141-static atomic_t addr_str_seq = ATOMIC_INIT(0);
103142+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
103143
103144 static struct page *zero_page; /* used in certain error cases */
103145
103146@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
103147 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
103148 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
103149
103150- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
103151+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
103152 s = addr_str[i];
103153
103154 switch (ss->ss_family) {
103155diff --git a/net/compat.c b/net/compat.c
103156index f7bd286..76ea56a 100644
103157--- a/net/compat.c
103158+++ b/net/compat.c
103159@@ -100,20 +100,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
103160
103161 #define CMSG_COMPAT_FIRSTHDR(msg) \
103162 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
103163- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
103164+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
103165 (struct compat_cmsghdr __user *)NULL)
103166
103167 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
103168 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
103169 (ucmlen) <= (unsigned long) \
103170 ((mhdr)->msg_controllen - \
103171- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
103172+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
103173
103174 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
103175 struct compat_cmsghdr __user *cmsg, int cmsg_len)
103176 {
103177 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
103178- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
103179+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
103180 msg->msg_controllen)
103181 return NULL;
103182 return (struct compat_cmsghdr __user *)ptr;
103183@@ -203,7 +203,7 @@ Efault:
103184
103185 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
103186 {
103187- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
103188+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
103189 struct compat_cmsghdr cmhdr;
103190 struct compat_timeval ctv;
103191 struct compat_timespec cts[3];
103192@@ -259,7 +259,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
103193
103194 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
103195 {
103196- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
103197+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
103198 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
103199 int fdnum = scm->fp->count;
103200 struct file **fp = scm->fp->fp;
103201@@ -347,7 +347,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
103202 return -EFAULT;
103203 old_fs = get_fs();
103204 set_fs(KERNEL_DS);
103205- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
103206+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
103207 set_fs(old_fs);
103208
103209 return err;
103210@@ -408,7 +408,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
103211 len = sizeof(ktime);
103212 old_fs = get_fs();
103213 set_fs(KERNEL_DS);
103214- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
103215+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
103216 set_fs(old_fs);
103217
103218 if (!err) {
103219@@ -551,7 +551,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
103220 case MCAST_JOIN_GROUP:
103221 case MCAST_LEAVE_GROUP:
103222 {
103223- struct compat_group_req __user *gr32 = (void *)optval;
103224+ struct compat_group_req __user *gr32 = (void __user *)optval;
103225 struct group_req __user *kgr =
103226 compat_alloc_user_space(sizeof(struct group_req));
103227 u32 interface;
103228@@ -572,7 +572,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
103229 case MCAST_BLOCK_SOURCE:
103230 case MCAST_UNBLOCK_SOURCE:
103231 {
103232- struct compat_group_source_req __user *gsr32 = (void *)optval;
103233+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
103234 struct group_source_req __user *kgsr = compat_alloc_user_space(
103235 sizeof(struct group_source_req));
103236 u32 interface;
103237@@ -593,7 +593,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
103238 }
103239 case MCAST_MSFILTER:
103240 {
103241- struct compat_group_filter __user *gf32 = (void *)optval;
103242+ struct compat_group_filter __user *gf32 = (void __user *)optval;
103243 struct group_filter __user *kgf;
103244 u32 interface, fmode, numsrc;
103245
103246@@ -631,7 +631,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
103247 char __user *optval, int __user *optlen,
103248 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
103249 {
103250- struct compat_group_filter __user *gf32 = (void *)optval;
103251+ struct compat_group_filter __user *gf32 = (void __user *)optval;
103252 struct group_filter __user *kgf;
103253 int __user *koptlen;
103254 u32 interface, fmode, numsrc;
103255@@ -775,7 +775,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
103256
103257 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
103258 return -EINVAL;
103259- if (copy_from_user(a, args, nas[call]))
103260+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
103261 return -EFAULT;
103262 a0 = a[0];
103263 a1 = a[1];
103264diff --git a/net/core/datagram.c b/net/core/datagram.c
103265index df493d6..1145766 100644
103266--- a/net/core/datagram.c
103267+++ b/net/core/datagram.c
103268@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
103269 }
103270
103271 kfree_skb(skb);
103272- atomic_inc(&sk->sk_drops);
103273+ atomic_inc_unchecked(&sk->sk_drops);
103274 sk_mem_reclaim_partial(sk);
103275
103276 return err;
103277diff --git a/net/core/dev.c b/net/core/dev.c
103278index 22a53ac..1d19af7 100644
103279--- a/net/core/dev.c
103280+++ b/net/core/dev.c
103281@@ -1681,14 +1681,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
103282 {
103283 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
103284 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
103285- atomic_long_inc(&dev->rx_dropped);
103286+ atomic_long_inc_unchecked(&dev->rx_dropped);
103287 kfree_skb(skb);
103288 return NET_RX_DROP;
103289 }
103290 }
103291
103292 if (unlikely(!is_skb_forwardable(dev, skb))) {
103293- atomic_long_inc(&dev->rx_dropped);
103294+ atomic_long_inc_unchecked(&dev->rx_dropped);
103295 kfree_skb(skb);
103296 return NET_RX_DROP;
103297 }
103298@@ -2987,7 +2987,7 @@ recursion_alert:
103299 drop:
103300 rcu_read_unlock_bh();
103301
103302- atomic_long_inc(&dev->tx_dropped);
103303+ atomic_long_inc_unchecked(&dev->tx_dropped);
103304 kfree_skb_list(skb);
103305 return rc;
103306 out:
103307@@ -3336,7 +3336,7 @@ enqueue:
103308
103309 local_irq_restore(flags);
103310
103311- atomic_long_inc(&skb->dev->rx_dropped);
103312+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
103313 kfree_skb(skb);
103314 return NET_RX_DROP;
103315 }
103316@@ -3413,7 +3413,7 @@ int netif_rx_ni(struct sk_buff *skb)
103317 }
103318 EXPORT_SYMBOL(netif_rx_ni);
103319
103320-static void net_tx_action(struct softirq_action *h)
103321+static __latent_entropy void net_tx_action(void)
103322 {
103323 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
103324
103325@@ -3751,7 +3751,7 @@ ncls:
103326 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
103327 } else {
103328 drop:
103329- atomic_long_inc(&skb->dev->rx_dropped);
103330+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
103331 kfree_skb(skb);
103332 /* Jamal, now you will not able to escape explaining
103333 * me how you were going to use this. :-)
103334@@ -4640,7 +4640,7 @@ out_unlock:
103335 return work;
103336 }
103337
103338-static void net_rx_action(struct softirq_action *h)
103339+static __latent_entropy void net_rx_action(void)
103340 {
103341 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
103342 unsigned long time_limit = jiffies + 2;
103343@@ -6676,8 +6676,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
103344 } else {
103345 netdev_stats_to_stats64(storage, &dev->stats);
103346 }
103347- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
103348- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
103349+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
103350+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
103351 return storage;
103352 }
103353 EXPORT_SYMBOL(dev_get_stats);
103354diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
103355index b94b1d2..da3ed7c 100644
103356--- a/net/core/dev_ioctl.c
103357+++ b/net/core/dev_ioctl.c
103358@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
103359 no_module = !dev;
103360 if (no_module && capable(CAP_NET_ADMIN))
103361 no_module = request_module("netdev-%s", name);
103362- if (no_module && capable(CAP_SYS_MODULE))
103363+ if (no_module && capable(CAP_SYS_MODULE)) {
103364+#ifdef CONFIG_GRKERNSEC_MODHARDEN
103365+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
103366+#else
103367 request_module("%s", name);
103368+#endif
103369+ }
103370 }
103371 EXPORT_SYMBOL(dev_load);
103372
103373diff --git a/net/core/filter.c b/net/core/filter.c
103374index f6bdc2b..76eba8e 100644
103375--- a/net/core/filter.c
103376+++ b/net/core/filter.c
103377@@ -533,7 +533,11 @@ do_pass:
103378
103379 /* Unknown instruction. */
103380 default:
103381- goto err;
103382+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
103383+ fp->code, fp->jt, fp->jf, fp->k);
103384+ kfree(addrs);
103385+ BUG();
103386+ return -EINVAL;
103387 }
103388
103389 insn++;
103390@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
103391 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
103392 int pc, ret = 0;
103393
103394- BUILD_BUG_ON(BPF_MEMWORDS > 16);
103395+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
103396
103397 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
103398 if (!masks)
103399@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
103400 if (!fp)
103401 return -ENOMEM;
103402
103403- memcpy(fp->insns, fprog->filter, fsize);
103404+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
103405
103406 fp->len = fprog->len;
103407 /* Since unattached filters are not copied back to user
103408diff --git a/net/core/flow.c b/net/core/flow.c
103409index 1033725..340f65d 100644
103410--- a/net/core/flow.c
103411+++ b/net/core/flow.c
103412@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
103413 static int flow_entry_valid(struct flow_cache_entry *fle,
103414 struct netns_xfrm *xfrm)
103415 {
103416- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
103417+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
103418 return 0;
103419 if (fle->object && !fle->object->ops->check(fle->object))
103420 return 0;
103421@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
103422 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
103423 fcp->hash_count++;
103424 }
103425- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
103426+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
103427 flo = fle->object;
103428 if (!flo)
103429 goto ret_object;
103430@@ -263,7 +263,7 @@ nocache:
103431 }
103432 flo = resolver(net, key, family, dir, flo, ctx);
103433 if (fle) {
103434- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
103435+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
103436 if (!IS_ERR(flo))
103437 fle->object = flo;
103438 else
103439diff --git a/net/core/neighbour.c b/net/core/neighbour.c
103440index 70fe9e1..926784c 100644
103441--- a/net/core/neighbour.c
103442+++ b/net/core/neighbour.c
103443@@ -2806,7 +2806,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
103444 void __user *buffer, size_t *lenp, loff_t *ppos)
103445 {
103446 int size, ret;
103447- struct ctl_table tmp = *ctl;
103448+ ctl_table_no_const tmp = *ctl;
103449
103450 tmp.extra1 = &zero;
103451 tmp.extra2 = &unres_qlen_max;
103452@@ -2868,7 +2868,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
103453 void __user *buffer,
103454 size_t *lenp, loff_t *ppos)
103455 {
103456- struct ctl_table tmp = *ctl;
103457+ ctl_table_no_const tmp = *ctl;
103458 int ret;
103459
103460 tmp.extra1 = &zero;
103461diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
103462index 2bf8329..2eb1423 100644
103463--- a/net/core/net-procfs.c
103464+++ b/net/core/net-procfs.c
103465@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
103466 struct rtnl_link_stats64 temp;
103467 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
103468
103469- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
103470+ if (gr_proc_is_restricted())
103471+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
103472+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
103473+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
103474+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
103475+ else
103476+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
103477 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
103478 dev->name, stats->rx_bytes, stats->rx_packets,
103479 stats->rx_errors,
103480@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
103481 return 0;
103482 }
103483
103484-static const struct seq_operations dev_seq_ops = {
103485+const struct seq_operations dev_seq_ops = {
103486 .start = dev_seq_start,
103487 .next = dev_seq_next,
103488 .stop = dev_seq_stop,
103489@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
103490
103491 static int softnet_seq_open(struct inode *inode, struct file *file)
103492 {
103493- return seq_open(file, &softnet_seq_ops);
103494+ return seq_open_restrict(file, &softnet_seq_ops);
103495 }
103496
103497 static const struct file_operations softnet_seq_fops = {
103498@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
103499 else
103500 seq_printf(seq, "%04x", ntohs(pt->type));
103501
103502+#ifdef CONFIG_GRKERNSEC_HIDESYM
103503+ seq_printf(seq, " %-8s %pf\n",
103504+ pt->dev ? pt->dev->name : "", NULL);
103505+#else
103506 seq_printf(seq, " %-8s %pf\n",
103507 pt->dev ? pt->dev->name : "", pt->func);
103508+#endif
103509 }
103510
103511 return 0;
103512diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
103513index f2aa73b..0d1a1ea 100644
103514--- a/net/core/net-sysfs.c
103515+++ b/net/core/net-sysfs.c
103516@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
103517 {
103518 struct net_device *netdev = to_net_dev(dev);
103519 return sprintf(buf, fmt_dec,
103520- atomic_read(&netdev->carrier_changes));
103521+ atomic_read_unchecked(&netdev->carrier_changes));
103522 }
103523 static DEVICE_ATTR_RO(carrier_changes);
103524
103525diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
103526index 70d3450..eb7c528 100644
103527--- a/net/core/net_namespace.c
103528+++ b/net/core/net_namespace.c
103529@@ -663,7 +663,7 @@ static int __register_pernet_operations(struct list_head *list,
103530 int error;
103531 LIST_HEAD(net_exit_list);
103532
103533- list_add_tail(&ops->list, list);
103534+ pax_list_add_tail((struct list_head *)&ops->list, list);
103535 if (ops->init || (ops->id && ops->size)) {
103536 for_each_net(net) {
103537 error = ops_init(ops, net);
103538@@ -676,7 +676,7 @@ static int __register_pernet_operations(struct list_head *list,
103539
103540 out_undo:
103541 /* If I have an error cleanup all namespaces I initialized */
103542- list_del(&ops->list);
103543+ pax_list_del((struct list_head *)&ops->list);
103544 ops_exit_list(ops, &net_exit_list);
103545 ops_free_list(ops, &net_exit_list);
103546 return error;
103547@@ -687,7 +687,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
103548 struct net *net;
103549 LIST_HEAD(net_exit_list);
103550
103551- list_del(&ops->list);
103552+ pax_list_del((struct list_head *)&ops->list);
103553 for_each_net(net)
103554 list_add_tail(&net->exit_list, &net_exit_list);
103555 ops_exit_list(ops, &net_exit_list);
103556@@ -821,7 +821,7 @@ int register_pernet_device(struct pernet_operations *ops)
103557 mutex_lock(&net_mutex);
103558 error = register_pernet_operations(&pernet_list, ops);
103559 if (!error && (first_device == &pernet_list))
103560- first_device = &ops->list;
103561+ first_device = (struct list_head *)&ops->list;
103562 mutex_unlock(&net_mutex);
103563 return error;
103564 }
103565diff --git a/net/core/netpoll.c b/net/core/netpoll.c
103566index c126a87..10ad89d 100644
103567--- a/net/core/netpoll.c
103568+++ b/net/core/netpoll.c
103569@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
103570 struct udphdr *udph;
103571 struct iphdr *iph;
103572 struct ethhdr *eth;
103573- static atomic_t ip_ident;
103574+ static atomic_unchecked_t ip_ident;
103575 struct ipv6hdr *ip6h;
103576
103577 udp_len = len + sizeof(*udph);
103578@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
103579 put_unaligned(0x45, (unsigned char *)iph);
103580 iph->tos = 0;
103581 put_unaligned(htons(ip_len), &(iph->tot_len));
103582- iph->id = htons(atomic_inc_return(&ip_ident));
103583+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
103584 iph->frag_off = 0;
103585 iph->ttl = 64;
103586 iph->protocol = IPPROTO_UDP;
103587diff --git a/net/core/pktgen.c b/net/core/pktgen.c
103588index 508155b..fad080f 100644
103589--- a/net/core/pktgen.c
103590+++ b/net/core/pktgen.c
103591@@ -3755,7 +3755,7 @@ static int __net_init pg_net_init(struct net *net)
103592 pn->net = net;
103593 INIT_LIST_HEAD(&pn->pktgen_threads);
103594 pn->pktgen_exiting = false;
103595- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
103596+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
103597 if (!pn->proc_dir) {
103598 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
103599 return -ENODEV;
103600diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
103601index 7ebed55..378bf34 100644
103602--- a/net/core/rtnetlink.c
103603+++ b/net/core/rtnetlink.c
103604@@ -61,7 +61,7 @@ struct rtnl_link {
103605 rtnl_doit_func doit;
103606 rtnl_dumpit_func dumpit;
103607 rtnl_calcit_func calcit;
103608-};
103609+} __no_const;
103610
103611 static DEFINE_MUTEX(rtnl_mutex);
103612
103613@@ -307,10 +307,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
103614 * to use the ops for creating device. So do not
103615 * fill up dellink as well. That disables rtnl_dellink.
103616 */
103617- if (ops->setup && !ops->dellink)
103618- ops->dellink = unregister_netdevice_queue;
103619+ if (ops->setup && !ops->dellink) {
103620+ pax_open_kernel();
103621+ *(void **)&ops->dellink = unregister_netdevice_queue;
103622+ pax_close_kernel();
103623+ }
103624
103625- list_add_tail(&ops->list, &link_ops);
103626+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
103627 return 0;
103628 }
103629 EXPORT_SYMBOL_GPL(__rtnl_link_register);
103630@@ -357,7 +360,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
103631 for_each_net(net) {
103632 __rtnl_kill_links(net, ops);
103633 }
103634- list_del(&ops->list);
103635+ pax_list_del((struct list_head *)&ops->list);
103636 }
103637 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
103638
103639@@ -1047,7 +1050,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
103640 (dev->ifalias &&
103641 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
103642 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
103643- atomic_read(&dev->carrier_changes)))
103644+ atomic_read_unchecked(&dev->carrier_changes)))
103645 goto nla_put_failure;
103646
103647 if (1) {
103648diff --git a/net/core/scm.c b/net/core/scm.c
103649index 3b6899b..cf36238 100644
103650--- a/net/core/scm.c
103651+++ b/net/core/scm.c
103652@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
103653 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
103654 {
103655 struct cmsghdr __user *cm
103656- = (__force struct cmsghdr __user *)msg->msg_control;
103657+ = (struct cmsghdr __force_user *)msg->msg_control;
103658 struct cmsghdr cmhdr;
103659 int cmlen = CMSG_LEN(len);
103660 int err;
103661@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
103662 err = -EFAULT;
103663 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
103664 goto out;
103665- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
103666+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
103667 goto out;
103668 cmlen = CMSG_SPACE(len);
103669 if (msg->msg_controllen < cmlen)
103670@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
103671 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
103672 {
103673 struct cmsghdr __user *cm
103674- = (__force struct cmsghdr __user*)msg->msg_control;
103675+ = (struct cmsghdr __force_user *)msg->msg_control;
103676
103677 int fdmax = 0;
103678 int fdnum = scm->fp->count;
103679@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
103680 if (fdnum < fdmax)
103681 fdmax = fdnum;
103682
103683- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
103684+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
103685 i++, cmfptr++)
103686 {
103687 struct socket *sock;
103688diff --git a/net/core/skbuff.c b/net/core/skbuff.c
103689index e9f9a15..6eb024e 100644
103690--- a/net/core/skbuff.c
103691+++ b/net/core/skbuff.c
103692@@ -2139,7 +2139,7 @@ EXPORT_SYMBOL(__skb_checksum);
103693 __wsum skb_checksum(const struct sk_buff *skb, int offset,
103694 int len, __wsum csum)
103695 {
103696- const struct skb_checksum_ops ops = {
103697+ static const struct skb_checksum_ops ops = {
103698 .update = csum_partial_ext,
103699 .combine = csum_block_add_ext,
103700 };
103701@@ -3379,12 +3379,14 @@ void __init skb_init(void)
103702 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
103703 sizeof(struct sk_buff),
103704 0,
103705- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
103706+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
103707+ SLAB_NO_SANITIZE,
103708 NULL);
103709 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
103710 sizeof(struct sk_buff_fclones),
103711 0,
103712- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
103713+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
103714+ SLAB_NO_SANITIZE,
103715 NULL);
103716 }
103717
103718diff --git a/net/core/sock.c b/net/core/sock.c
103719index 71e3e5f..ab90920 100644
103720--- a/net/core/sock.c
103721+++ b/net/core/sock.c
103722@@ -443,7 +443,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
103723 struct sk_buff_head *list = &sk->sk_receive_queue;
103724
103725 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
103726- atomic_inc(&sk->sk_drops);
103727+ atomic_inc_unchecked(&sk->sk_drops);
103728 trace_sock_rcvqueue_full(sk, skb);
103729 return -ENOMEM;
103730 }
103731@@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
103732 return err;
103733
103734 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
103735- atomic_inc(&sk->sk_drops);
103736+ atomic_inc_unchecked(&sk->sk_drops);
103737 return -ENOBUFS;
103738 }
103739
103740@@ -466,7 +466,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
103741 skb_dst_force(skb);
103742
103743 spin_lock_irqsave(&list->lock, flags);
103744- skb->dropcount = atomic_read(&sk->sk_drops);
103745+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
103746 __skb_queue_tail(list, skb);
103747 spin_unlock_irqrestore(&list->lock, flags);
103748
103749@@ -486,7 +486,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
103750 skb->dev = NULL;
103751
103752 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
103753- atomic_inc(&sk->sk_drops);
103754+ atomic_inc_unchecked(&sk->sk_drops);
103755 goto discard_and_relse;
103756 }
103757 if (nested)
103758@@ -504,7 +504,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
103759 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
103760 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
103761 bh_unlock_sock(sk);
103762- atomic_inc(&sk->sk_drops);
103763+ atomic_inc_unchecked(&sk->sk_drops);
103764 goto discard_and_relse;
103765 }
103766
103767@@ -910,6 +910,7 @@ set_rcvbuf:
103768 }
103769 break;
103770
103771+#ifndef GRKERNSEC_BPF_HARDEN
103772 case SO_ATTACH_BPF:
103773 ret = -EINVAL;
103774 if (optlen == sizeof(u32)) {
103775@@ -922,7 +923,7 @@ set_rcvbuf:
103776 ret = sk_attach_bpf(ufd, sk);
103777 }
103778 break;
103779-
103780+#endif
103781 case SO_DETACH_FILTER:
103782 ret = sk_detach_filter(sk);
103783 break;
103784@@ -1026,12 +1027,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
103785 struct timeval tm;
103786 } v;
103787
103788- int lv = sizeof(int);
103789- int len;
103790+ unsigned int lv = sizeof(int);
103791+ unsigned int len;
103792
103793 if (get_user(len, optlen))
103794 return -EFAULT;
103795- if (len < 0)
103796+ if (len > INT_MAX)
103797 return -EINVAL;
103798
103799 memset(&v, 0, sizeof(v));
103800@@ -1169,11 +1170,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
103801
103802 case SO_PEERNAME:
103803 {
103804- char address[128];
103805+ char address[_K_SS_MAXSIZE];
103806
103807 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
103808 return -ENOTCONN;
103809- if (lv < len)
103810+ if (lv < len || sizeof address < len)
103811 return -EINVAL;
103812 if (copy_to_user(optval, address, len))
103813 return -EFAULT;
103814@@ -1258,7 +1259,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
103815
103816 if (len > lv)
103817 len = lv;
103818- if (copy_to_user(optval, &v, len))
103819+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
103820 return -EFAULT;
103821 lenout:
103822 if (put_user(len, optlen))
103823@@ -2375,7 +2376,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
103824 */
103825 smp_wmb();
103826 atomic_set(&sk->sk_refcnt, 1);
103827- atomic_set(&sk->sk_drops, 0);
103828+ atomic_set_unchecked(&sk->sk_drops, 0);
103829 }
103830 EXPORT_SYMBOL(sock_init_data);
103831
103832@@ -2503,6 +2504,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
103833 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
103834 int level, int type)
103835 {
103836+ struct sock_extended_err ee;
103837 struct sock_exterr_skb *serr;
103838 struct sk_buff *skb;
103839 int copied, err;
103840@@ -2524,7 +2526,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
103841 sock_recv_timestamp(msg, sk, skb);
103842
103843 serr = SKB_EXT_ERR(skb);
103844- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
103845+ ee = serr->ee;
103846+ put_cmsg(msg, level, type, sizeof ee, &ee);
103847
103848 msg->msg_flags |= MSG_ERRQUEUE;
103849 err = copied;
103850diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
103851index ad704c7..ca48aff 100644
103852--- a/net/core/sock_diag.c
103853+++ b/net/core/sock_diag.c
103854@@ -9,26 +9,33 @@
103855 #include <linux/inet_diag.h>
103856 #include <linux/sock_diag.h>
103857
103858-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
103859+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
103860 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
103861 static DEFINE_MUTEX(sock_diag_table_mutex);
103862
103863 int sock_diag_check_cookie(void *sk, __u32 *cookie)
103864 {
103865+#ifndef CONFIG_GRKERNSEC_HIDESYM
103866 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
103867 cookie[1] != INET_DIAG_NOCOOKIE) &&
103868 ((u32)(unsigned long)sk != cookie[0] ||
103869 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
103870 return -ESTALE;
103871 else
103872+#endif
103873 return 0;
103874 }
103875 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
103876
103877 void sock_diag_save_cookie(void *sk, __u32 *cookie)
103878 {
103879+#ifdef CONFIG_GRKERNSEC_HIDESYM
103880+ cookie[0] = 0;
103881+ cookie[1] = 0;
103882+#else
103883 cookie[0] = (u32)(unsigned long)sk;
103884 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
103885+#endif
103886 }
103887 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
103888
103889@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
103890 mutex_lock(&sock_diag_table_mutex);
103891 if (sock_diag_handlers[hndl->family])
103892 err = -EBUSY;
103893- else
103894+ else {
103895+ pax_open_kernel();
103896 sock_diag_handlers[hndl->family] = hndl;
103897+ pax_close_kernel();
103898+ }
103899 mutex_unlock(&sock_diag_table_mutex);
103900
103901 return err;
103902@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
103903
103904 mutex_lock(&sock_diag_table_mutex);
103905 BUG_ON(sock_diag_handlers[family] != hnld);
103906+ pax_open_kernel();
103907 sock_diag_handlers[family] = NULL;
103908+ pax_close_kernel();
103909 mutex_unlock(&sock_diag_table_mutex);
103910 }
103911 EXPORT_SYMBOL_GPL(sock_diag_unregister);
103912diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
103913index 8ce351f..2c388f7 100644
103914--- a/net/core/sysctl_net_core.c
103915+++ b/net/core/sysctl_net_core.c
103916@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
103917 {
103918 unsigned int orig_size, size;
103919 int ret, i;
103920- struct ctl_table tmp = {
103921+ ctl_table_no_const tmp = {
103922 .data = &size,
103923 .maxlen = sizeof(size),
103924 .mode = table->mode
103925@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
103926 void __user *buffer, size_t *lenp, loff_t *ppos)
103927 {
103928 char id[IFNAMSIZ];
103929- struct ctl_table tbl = {
103930+ ctl_table_no_const tbl = {
103931 .data = id,
103932 .maxlen = IFNAMSIZ,
103933 };
103934@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
103935 static int proc_do_rss_key(struct ctl_table *table, int write,
103936 void __user *buffer, size_t *lenp, loff_t *ppos)
103937 {
103938- struct ctl_table fake_table;
103939+ ctl_table_no_const fake_table;
103940 char buf[NETDEV_RSS_KEY_LEN * 3];
103941
103942 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
103943@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
103944 .mode = 0444,
103945 .proc_handler = proc_do_rss_key,
103946 },
103947-#ifdef CONFIG_BPF_JIT
103948+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
103949 {
103950 .procname = "bpf_jit_enable",
103951 .data = &bpf_jit_enable,
103952@@ -411,13 +411,12 @@ static struct ctl_table netns_core_table[] = {
103953
103954 static __net_init int sysctl_core_net_init(struct net *net)
103955 {
103956- struct ctl_table *tbl;
103957+ ctl_table_no_const *tbl = NULL;
103958
103959 net->core.sysctl_somaxconn = SOMAXCONN;
103960
103961- tbl = netns_core_table;
103962 if (!net_eq(net, &init_net)) {
103963- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
103964+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
103965 if (tbl == NULL)
103966 goto err_dup;
103967
103968@@ -427,17 +426,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
103969 if (net->user_ns != &init_user_ns) {
103970 tbl[0].procname = NULL;
103971 }
103972- }
103973-
103974- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
103975+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
103976+ } else
103977+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
103978 if (net->core.sysctl_hdr == NULL)
103979 goto err_reg;
103980
103981 return 0;
103982
103983 err_reg:
103984- if (tbl != netns_core_table)
103985- kfree(tbl);
103986+ kfree(tbl);
103987 err_dup:
103988 return -ENOMEM;
103989 }
103990@@ -452,7 +450,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
103991 kfree(tbl);
103992 }
103993
103994-static __net_initdata struct pernet_operations sysctl_core_ops = {
103995+static __net_initconst struct pernet_operations sysctl_core_ops = {
103996 .init = sysctl_core_net_init,
103997 .exit = sysctl_core_net_exit,
103998 };
103999diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
104000index 8102286..a0c2755 100644
104001--- a/net/decnet/af_decnet.c
104002+++ b/net/decnet/af_decnet.c
104003@@ -466,6 +466,7 @@ static struct proto dn_proto = {
104004 .sysctl_rmem = sysctl_decnet_rmem,
104005 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
104006 .obj_size = sizeof(struct dn_sock),
104007+ .slab_flags = SLAB_USERCOPY,
104008 };
104009
104010 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
104011diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
104012index b2c26b0..41f803e 100644
104013--- a/net/decnet/dn_dev.c
104014+++ b/net/decnet/dn_dev.c
104015@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
104016 .extra1 = &min_t3,
104017 .extra2 = &max_t3
104018 },
104019- {0}
104020+ { }
104021 },
104022 };
104023
104024diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
104025index 5325b54..a0d4d69 100644
104026--- a/net/decnet/sysctl_net_decnet.c
104027+++ b/net/decnet/sysctl_net_decnet.c
104028@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
104029
104030 if (len > *lenp) len = *lenp;
104031
104032- if (copy_to_user(buffer, addr, len))
104033+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
104034 return -EFAULT;
104035
104036 *lenp = len;
104037@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
104038
104039 if (len > *lenp) len = *lenp;
104040
104041- if (copy_to_user(buffer, devname, len))
104042+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
104043 return -EFAULT;
104044
104045 *lenp = len;
104046diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
104047index a2c7e4c..3dc9f67 100644
104048--- a/net/hsr/hsr_netlink.c
104049+++ b/net/hsr/hsr_netlink.c
104050@@ -102,7 +102,7 @@ nla_put_failure:
104051 return -EMSGSIZE;
104052 }
104053
104054-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
104055+static struct rtnl_link_ops hsr_link_ops = {
104056 .kind = "hsr",
104057 .maxtype = IFLA_HSR_MAX,
104058 .policy = hsr_policy,
104059diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
104060index 055fbb7..c0dbe60 100644
104061--- a/net/ieee802154/6lowpan/core.c
104062+++ b/net/ieee802154/6lowpan/core.c
104063@@ -217,7 +217,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
104064 dev_put(real_dev);
104065 }
104066
104067-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
104068+static struct rtnl_link_ops lowpan_link_ops = {
104069 .kind = "lowpan",
104070 .priv_size = sizeof(struct lowpan_dev_info),
104071 .setup = lowpan_setup,
104072diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
104073index f46e4d1..30231f1 100644
104074--- a/net/ieee802154/6lowpan/reassembly.c
104075+++ b/net/ieee802154/6lowpan/reassembly.c
104076@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
104077
104078 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
104079 {
104080- struct ctl_table *table;
104081+ ctl_table_no_const *table = NULL;
104082 struct ctl_table_header *hdr;
104083 struct netns_ieee802154_lowpan *ieee802154_lowpan =
104084 net_ieee802154_lowpan(net);
104085
104086- table = lowpan_frags_ns_ctl_table;
104087 if (!net_eq(net, &init_net)) {
104088- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
104089+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
104090 GFP_KERNEL);
104091 if (table == NULL)
104092 goto err_alloc;
104093@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
104094 /* Don't export sysctls to unprivileged users */
104095 if (net->user_ns != &init_user_ns)
104096 table[0].procname = NULL;
104097- }
104098-
104099- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
104100+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
104101+ } else
104102+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
104103 if (hdr == NULL)
104104 goto err_reg;
104105
104106@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
104107 return 0;
104108
104109 err_reg:
104110- if (!net_eq(net, &init_net))
104111- kfree(table);
104112+ kfree(table);
104113 err_alloc:
104114 return -ENOMEM;
104115 }
104116diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
104117index d2e49ba..f78e8aa 100644
104118--- a/net/ipv4/af_inet.c
104119+++ b/net/ipv4/af_inet.c
104120@@ -1390,7 +1390,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
104121 return ip_recv_error(sk, msg, len, addr_len);
104122 #if IS_ENABLED(CONFIG_IPV6)
104123 if (sk->sk_family == AF_INET6)
104124- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
104125+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
104126 #endif
104127 return -EINVAL;
104128 }
104129diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
104130index 3a8985c..9d2a870 100644
104131--- a/net/ipv4/devinet.c
104132+++ b/net/ipv4/devinet.c
104133@@ -69,7 +69,8 @@
104134
104135 static struct ipv4_devconf ipv4_devconf = {
104136 .data = {
104137- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
104138+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
104139+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
104140 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
104141 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
104142 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
104143@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
104144
104145 static struct ipv4_devconf ipv4_devconf_dflt = {
104146 .data = {
104147- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
104148+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
104149+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
104150 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
104151 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
104152 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
104153@@ -1549,7 +1551,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
104154 idx = 0;
104155 head = &net->dev_index_head[h];
104156 rcu_read_lock();
104157- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
104158+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
104159 net->dev_base_seq;
104160 hlist_for_each_entry_rcu(dev, head, index_hlist) {
104161 if (idx < s_idx)
104162@@ -1868,7 +1870,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
104163 idx = 0;
104164 head = &net->dev_index_head[h];
104165 rcu_read_lock();
104166- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
104167+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
104168 net->dev_base_seq;
104169 hlist_for_each_entry_rcu(dev, head, index_hlist) {
104170 if (idx < s_idx)
104171@@ -2103,7 +2105,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
104172 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
104173 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
104174
104175-static struct devinet_sysctl_table {
104176+static const struct devinet_sysctl_table {
104177 struct ctl_table_header *sysctl_header;
104178 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
104179 } devinet_sysctl = {
104180@@ -2235,7 +2237,7 @@ static __net_init int devinet_init_net(struct net *net)
104181 int err;
104182 struct ipv4_devconf *all, *dflt;
104183 #ifdef CONFIG_SYSCTL
104184- struct ctl_table *tbl = ctl_forward_entry;
104185+ ctl_table_no_const *tbl = NULL;
104186 struct ctl_table_header *forw_hdr;
104187 #endif
104188
104189@@ -2253,7 +2255,7 @@ static __net_init int devinet_init_net(struct net *net)
104190 goto err_alloc_dflt;
104191
104192 #ifdef CONFIG_SYSCTL
104193- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
104194+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
104195 if (tbl == NULL)
104196 goto err_alloc_ctl;
104197
104198@@ -2273,7 +2275,10 @@ static __net_init int devinet_init_net(struct net *net)
104199 goto err_reg_dflt;
104200
104201 err = -ENOMEM;
104202- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
104203+ if (!net_eq(net, &init_net))
104204+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
104205+ else
104206+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
104207 if (forw_hdr == NULL)
104208 goto err_reg_ctl;
104209 net->ipv4.forw_hdr = forw_hdr;
104210@@ -2289,8 +2294,7 @@ err_reg_ctl:
104211 err_reg_dflt:
104212 __devinet_sysctl_unregister(all);
104213 err_reg_all:
104214- if (tbl != ctl_forward_entry)
104215- kfree(tbl);
104216+ kfree(tbl);
104217 err_alloc_ctl:
104218 #endif
104219 if (dflt != &ipv4_devconf_dflt)
104220diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
104221index 23b9b3e..60cf0c4 100644
104222--- a/net/ipv4/fib_frontend.c
104223+++ b/net/ipv4/fib_frontend.c
104224@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
104225 #ifdef CONFIG_IP_ROUTE_MULTIPATH
104226 fib_sync_up(dev);
104227 #endif
104228- atomic_inc(&net->ipv4.dev_addr_genid);
104229+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
104230 rt_cache_flush(dev_net(dev));
104231 break;
104232 case NETDEV_DOWN:
104233 fib_del_ifaddr(ifa, NULL);
104234- atomic_inc(&net->ipv4.dev_addr_genid);
104235+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
104236 if (ifa->ifa_dev->ifa_list == NULL) {
104237 /* Last address was deleted from this interface.
104238 * Disable IP.
104239@@ -1063,7 +1063,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
104240 #ifdef CONFIG_IP_ROUTE_MULTIPATH
104241 fib_sync_up(dev);
104242 #endif
104243- atomic_inc(&net->ipv4.dev_addr_genid);
104244+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
104245 rt_cache_flush(net);
104246 break;
104247 case NETDEV_DOWN:
104248diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
104249index 1e2090e..351a724 100644
104250--- a/net/ipv4/fib_semantics.c
104251+++ b/net/ipv4/fib_semantics.c
104252@@ -753,7 +753,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
104253 nh->nh_saddr = inet_select_addr(nh->nh_dev,
104254 nh->nh_gw,
104255 nh->nh_parent->fib_scope);
104256- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
104257+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
104258
104259 return nh->nh_saddr;
104260 }
104261diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
104262index ff069f6..335e752 100644
104263--- a/net/ipv4/fou.c
104264+++ b/net/ipv4/fou.c
104265@@ -771,12 +771,12 @@ EXPORT_SYMBOL(gue_build_header);
104266
104267 #ifdef CONFIG_NET_FOU_IP_TUNNELS
104268
104269-static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
104270+static const struct ip_tunnel_encap_ops fou_iptun_ops = {
104271 .encap_hlen = fou_encap_hlen,
104272 .build_header = fou_build_header,
104273 };
104274
104275-static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
104276+static const struct ip_tunnel_encap_ops gue_iptun_ops = {
104277 .encap_hlen = gue_encap_hlen,
104278 .build_header = gue_build_header,
104279 };
104280diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
104281index 9111a4e..3576905 100644
104282--- a/net/ipv4/inet_hashtables.c
104283+++ b/net/ipv4/inet_hashtables.c
104284@@ -18,6 +18,7 @@
104285 #include <linux/sched.h>
104286 #include <linux/slab.h>
104287 #include <linux/wait.h>
104288+#include <linux/security.h>
104289
104290 #include <net/inet_connection_sock.h>
104291 #include <net/inet_hashtables.h>
104292@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
104293 return inet_ehashfn(net, laddr, lport, faddr, fport);
104294 }
104295
104296+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
104297+
104298 /*
104299 * Allocate and initialize a new local port bind bucket.
104300 * The bindhash mutex for snum's hash chain must be held here.
104301@@ -554,6 +557,8 @@ ok:
104302 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
104303 spin_unlock(&head->lock);
104304
104305+ gr_update_task_in_ip_table(inet_sk(sk));
104306+
104307 if (tw) {
104308 inet_twsk_deschedule(tw, death_row);
104309 while (twrefcnt) {
104310diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
104311index 241afd7..31b95d5 100644
104312--- a/net/ipv4/inetpeer.c
104313+++ b/net/ipv4/inetpeer.c
104314@@ -461,7 +461,7 @@ relookup:
104315 if (p) {
104316 p->daddr = *daddr;
104317 atomic_set(&p->refcnt, 1);
104318- atomic_set(&p->rid, 0);
104319+ atomic_set_unchecked(&p->rid, 0);
104320 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
104321 p->rate_tokens = 0;
104322 /* 60*HZ is arbitrary, but chosen enough high so that the first
104323diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
104324index 145a50c..5dd8cc5 100644
104325--- a/net/ipv4/ip_fragment.c
104326+++ b/net/ipv4/ip_fragment.c
104327@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
104328 return 0;
104329
104330 start = qp->rid;
104331- end = atomic_inc_return(&peer->rid);
104332+ end = atomic_inc_return_unchecked(&peer->rid);
104333 qp->rid = end;
104334
104335 rc = qp->q.fragments && (end - start) > max;
104336@@ -748,12 +748,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
104337
104338 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
104339 {
104340- struct ctl_table *table;
104341+ ctl_table_no_const *table = NULL;
104342 struct ctl_table_header *hdr;
104343
104344- table = ip4_frags_ns_ctl_table;
104345 if (!net_eq(net, &init_net)) {
104346- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
104347+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
104348 if (table == NULL)
104349 goto err_alloc;
104350
104351@@ -767,9 +766,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
104352 /* Don't export sysctls to unprivileged users */
104353 if (net->user_ns != &init_user_ns)
104354 table[0].procname = NULL;
104355- }
104356+ hdr = register_net_sysctl(net, "net/ipv4", table);
104357+ } else
104358+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
104359
104360- hdr = register_net_sysctl(net, "net/ipv4", table);
104361 if (hdr == NULL)
104362 goto err_reg;
104363
104364@@ -777,8 +777,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
104365 return 0;
104366
104367 err_reg:
104368- if (!net_eq(net, &init_net))
104369- kfree(table);
104370+ kfree(table);
104371 err_alloc:
104372 return -ENOMEM;
104373 }
104374diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
104375index 6207275f..00323a2 100644
104376--- a/net/ipv4/ip_gre.c
104377+++ b/net/ipv4/ip_gre.c
104378@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
104379 module_param(log_ecn_error, bool, 0644);
104380 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
104381
104382-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
104383+static struct rtnl_link_ops ipgre_link_ops;
104384 static int ipgre_tunnel_init(struct net_device *dev);
104385
104386 static int ipgre_net_id __read_mostly;
104387@@ -817,7 +817,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
104388 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
104389 };
104390
104391-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
104392+static struct rtnl_link_ops ipgre_link_ops = {
104393 .kind = "gre",
104394 .maxtype = IFLA_GRE_MAX,
104395 .policy = ipgre_policy,
104396@@ -832,7 +832,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
104397 .get_link_net = ip_tunnel_get_link_net,
104398 };
104399
104400-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
104401+static struct rtnl_link_ops ipgre_tap_ops = {
104402 .kind = "gretap",
104403 .maxtype = IFLA_GRE_MAX,
104404 .policy = ipgre_policy,
104405diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
104406index 3d4da2c..40f9c29 100644
104407--- a/net/ipv4/ip_input.c
104408+++ b/net/ipv4/ip_input.c
104409@@ -147,6 +147,10 @@
104410 #include <linux/mroute.h>
104411 #include <linux/netlink.h>
104412
104413+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104414+extern int grsec_enable_blackhole;
104415+#endif
104416+
104417 /*
104418 * Process Router Attention IP option (RFC 2113)
104419 */
104420@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
104421 if (!raw) {
104422 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
104423 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
104424+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104425+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104426+#endif
104427 icmp_send(skb, ICMP_DEST_UNREACH,
104428 ICMP_PROT_UNREACH, 0);
104429 }
104430diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
104431index 5cd9927..8610b9f 100644
104432--- a/net/ipv4/ip_sockglue.c
104433+++ b/net/ipv4/ip_sockglue.c
104434@@ -1254,7 +1254,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
104435 len = min_t(unsigned int, len, opt->optlen);
104436 if (put_user(len, optlen))
104437 return -EFAULT;
104438- if (copy_to_user(optval, opt->__data, len))
104439+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
104440+ copy_to_user(optval, opt->__data, len))
104441 return -EFAULT;
104442 return 0;
104443 }
104444@@ -1388,7 +1389,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
104445 if (sk->sk_type != SOCK_STREAM)
104446 return -ENOPROTOOPT;
104447
104448- msg.msg_control = (__force void *) optval;
104449+ msg.msg_control = (__force_kernel void *) optval;
104450 msg.msg_controllen = len;
104451 msg.msg_flags = flags;
104452
104453diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
104454index 94efe14..1453fcc 100644
104455--- a/net/ipv4/ip_vti.c
104456+++ b/net/ipv4/ip_vti.c
104457@@ -45,7 +45,7 @@
104458 #include <net/net_namespace.h>
104459 #include <net/netns/generic.h>
104460
104461-static struct rtnl_link_ops vti_link_ops __read_mostly;
104462+static struct rtnl_link_ops vti_link_ops;
104463
104464 static int vti_net_id __read_mostly;
104465 static int vti_tunnel_init(struct net_device *dev);
104466@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
104467 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
104468 };
104469
104470-static struct rtnl_link_ops vti_link_ops __read_mostly = {
104471+static struct rtnl_link_ops vti_link_ops = {
104472 .kind = "vti",
104473 .maxtype = IFLA_VTI_MAX,
104474 .policy = vti_policy,
104475diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
104476index b26376e..fc3d733 100644
104477--- a/net/ipv4/ipconfig.c
104478+++ b/net/ipv4/ipconfig.c
104479@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
104480
104481 mm_segment_t oldfs = get_fs();
104482 set_fs(get_ds());
104483- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
104484+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
104485 set_fs(oldfs);
104486 return res;
104487 }
104488@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
104489
104490 mm_segment_t oldfs = get_fs();
104491 set_fs(get_ds());
104492- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
104493+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
104494 set_fs(oldfs);
104495 return res;
104496 }
104497@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
104498
104499 mm_segment_t oldfs = get_fs();
104500 set_fs(get_ds());
104501- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
104502+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
104503 set_fs(oldfs);
104504 return res;
104505 }
104506diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
104507index 915d215..48d1db7 100644
104508--- a/net/ipv4/ipip.c
104509+++ b/net/ipv4/ipip.c
104510@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
104511 static int ipip_net_id __read_mostly;
104512
104513 static int ipip_tunnel_init(struct net_device *dev);
104514-static struct rtnl_link_ops ipip_link_ops __read_mostly;
104515+static struct rtnl_link_ops ipip_link_ops;
104516
104517 static int ipip_err(struct sk_buff *skb, u32 info)
104518 {
104519@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
104520 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
104521 };
104522
104523-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
104524+static struct rtnl_link_ops ipip_link_ops = {
104525 .kind = "ipip",
104526 .maxtype = IFLA_IPTUN_MAX,
104527 .policy = ipip_policy,
104528diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
104529index f95b6f9..2ee2097 100644
104530--- a/net/ipv4/netfilter/arp_tables.c
104531+++ b/net/ipv4/netfilter/arp_tables.c
104532@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
104533 #endif
104534
104535 static int get_info(struct net *net, void __user *user,
104536- const int *len, int compat)
104537+ int len, int compat)
104538 {
104539 char name[XT_TABLE_MAXNAMELEN];
104540 struct xt_table *t;
104541 int ret;
104542
104543- if (*len != sizeof(struct arpt_getinfo)) {
104544- duprintf("length %u != %Zu\n", *len,
104545+ if (len != sizeof(struct arpt_getinfo)) {
104546+ duprintf("length %u != %Zu\n", len,
104547 sizeof(struct arpt_getinfo));
104548 return -EINVAL;
104549 }
104550@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
104551 info.size = private->size;
104552 strcpy(info.name, name);
104553
104554- if (copy_to_user(user, &info, *len) != 0)
104555+ if (copy_to_user(user, &info, len) != 0)
104556 ret = -EFAULT;
104557 else
104558 ret = 0;
104559@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
104560
104561 switch (cmd) {
104562 case ARPT_SO_GET_INFO:
104563- ret = get_info(sock_net(sk), user, len, 1);
104564+ ret = get_info(sock_net(sk), user, *len, 1);
104565 break;
104566 case ARPT_SO_GET_ENTRIES:
104567 ret = compat_get_entries(sock_net(sk), user, len);
104568@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
104569
104570 switch (cmd) {
104571 case ARPT_SO_GET_INFO:
104572- ret = get_info(sock_net(sk), user, len, 0);
104573+ ret = get_info(sock_net(sk), user, *len, 0);
104574 break;
104575
104576 case ARPT_SO_GET_ENTRIES:
104577diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
104578index cf5e82f..75a20f5 100644
104579--- a/net/ipv4/netfilter/ip_tables.c
104580+++ b/net/ipv4/netfilter/ip_tables.c
104581@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
104582 #endif
104583
104584 static int get_info(struct net *net, void __user *user,
104585- const int *len, int compat)
104586+ int len, int compat)
104587 {
104588 char name[XT_TABLE_MAXNAMELEN];
104589 struct xt_table *t;
104590 int ret;
104591
104592- if (*len != sizeof(struct ipt_getinfo)) {
104593- duprintf("length %u != %zu\n", *len,
104594+ if (len != sizeof(struct ipt_getinfo)) {
104595+ duprintf("length %u != %zu\n", len,
104596 sizeof(struct ipt_getinfo));
104597 return -EINVAL;
104598 }
104599@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
104600 info.size = private->size;
104601 strcpy(info.name, name);
104602
104603- if (copy_to_user(user, &info, *len) != 0)
104604+ if (copy_to_user(user, &info, len) != 0)
104605 ret = -EFAULT;
104606 else
104607 ret = 0;
104608@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104609
104610 switch (cmd) {
104611 case IPT_SO_GET_INFO:
104612- ret = get_info(sock_net(sk), user, len, 1);
104613+ ret = get_info(sock_net(sk), user, *len, 1);
104614 break;
104615 case IPT_SO_GET_ENTRIES:
104616 ret = compat_get_entries(sock_net(sk), user, len);
104617@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104618
104619 switch (cmd) {
104620 case IPT_SO_GET_INFO:
104621- ret = get_info(sock_net(sk), user, len, 0);
104622+ ret = get_info(sock_net(sk), user, *len, 0);
104623 break;
104624
104625 case IPT_SO_GET_ENTRIES:
104626diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
104627index e90f83a..3e6acca 100644
104628--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
104629+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
104630@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
104631 spin_lock_init(&cn->lock);
104632
104633 #ifdef CONFIG_PROC_FS
104634- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
104635+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
104636 if (!cn->procdir) {
104637 pr_err("Unable to proc dir entry\n");
104638 return -ENOMEM;
104639diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
104640index 787b0d6..ab6c0ba 100644
104641--- a/net/ipv4/ping.c
104642+++ b/net/ipv4/ping.c
104643@@ -59,7 +59,7 @@ struct ping_table {
104644 };
104645
104646 static struct ping_table ping_table;
104647-struct pingv6_ops pingv6_ops;
104648+struct pingv6_ops *pingv6_ops;
104649 EXPORT_SYMBOL_GPL(pingv6_ops);
104650
104651 static u16 ping_port_rover;
104652@@ -359,7 +359,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
104653 return -ENODEV;
104654 }
104655 }
104656- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
104657+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
104658 scoped);
104659 rcu_read_unlock();
104660
104661@@ -567,7 +567,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
104662 }
104663 #if IS_ENABLED(CONFIG_IPV6)
104664 } else if (skb->protocol == htons(ETH_P_IPV6)) {
104665- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
104666+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
104667 #endif
104668 }
104669
104670@@ -585,7 +585,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
104671 info, (u8 *)icmph);
104672 #if IS_ENABLED(CONFIG_IPV6)
104673 } else if (family == AF_INET6) {
104674- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
104675+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
104676 info, (u8 *)icmph);
104677 #endif
104678 }
104679@@ -919,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
104680 }
104681
104682 if (inet6_sk(sk)->rxopt.all)
104683- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
104684+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
104685 if (skb->protocol == htons(ETH_P_IPV6) &&
104686 inet6_sk(sk)->rxopt.all)
104687- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
104688+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
104689 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
104690 ip_cmsg_recv(msg, skb);
104691 #endif
104692@@ -1117,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
104693 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
104694 0, sock_i_ino(sp),
104695 atomic_read(&sp->sk_refcnt), sp,
104696- atomic_read(&sp->sk_drops));
104697+ atomic_read_unchecked(&sp->sk_drops));
104698 }
104699
104700 static int ping_v4_seq_show(struct seq_file *seq, void *v)
104701diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
104702index f027a70..2e64edc 100644
104703--- a/net/ipv4/raw.c
104704+++ b/net/ipv4/raw.c
104705@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
104706 int raw_rcv(struct sock *sk, struct sk_buff *skb)
104707 {
104708 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
104709- atomic_inc(&sk->sk_drops);
104710+ atomic_inc_unchecked(&sk->sk_drops);
104711 kfree_skb(skb);
104712 return NET_RX_DROP;
104713 }
104714@@ -773,16 +773,20 @@ static int raw_init(struct sock *sk)
104715
104716 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
104717 {
104718+ struct icmp_filter filter;
104719+
104720 if (optlen > sizeof(struct icmp_filter))
104721 optlen = sizeof(struct icmp_filter);
104722- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
104723+ if (copy_from_user(&filter, optval, optlen))
104724 return -EFAULT;
104725+ raw_sk(sk)->filter = filter;
104726 return 0;
104727 }
104728
104729 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
104730 {
104731 int len, ret = -EFAULT;
104732+ struct icmp_filter filter;
104733
104734 if (get_user(len, optlen))
104735 goto out;
104736@@ -792,8 +796,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
104737 if (len > sizeof(struct icmp_filter))
104738 len = sizeof(struct icmp_filter);
104739 ret = -EFAULT;
104740- if (put_user(len, optlen) ||
104741- copy_to_user(optval, &raw_sk(sk)->filter, len))
104742+ filter = raw_sk(sk)->filter;
104743+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
104744 goto out;
104745 ret = 0;
104746 out: return ret;
104747@@ -1022,7 +1026,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
104748 0, 0L, 0,
104749 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
104750 0, sock_i_ino(sp),
104751- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
104752+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
104753 }
104754
104755 static int raw_seq_show(struct seq_file *seq, void *v)
104756diff --git a/net/ipv4/route.c b/net/ipv4/route.c
104757index 20fc020..3ba426f 100644
104758--- a/net/ipv4/route.c
104759+++ b/net/ipv4/route.c
104760@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
104761
104762 static int rt_cache_seq_open(struct inode *inode, struct file *file)
104763 {
104764- return seq_open(file, &rt_cache_seq_ops);
104765+ return seq_open_restrict(file, &rt_cache_seq_ops);
104766 }
104767
104768 static const struct file_operations rt_cache_seq_fops = {
104769@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
104770
104771 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
104772 {
104773- return seq_open(file, &rt_cpu_seq_ops);
104774+ return seq_open_restrict(file, &rt_cpu_seq_ops);
104775 }
104776
104777 static const struct file_operations rt_cpu_seq_fops = {
104778@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
104779
104780 static int rt_acct_proc_open(struct inode *inode, struct file *file)
104781 {
104782- return single_open(file, rt_acct_proc_show, NULL);
104783+ return single_open_restrict(file, rt_acct_proc_show, NULL);
104784 }
104785
104786 static const struct file_operations rt_acct_proc_fops = {
104787@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
104788
104789 #define IP_IDENTS_SZ 2048u
104790 struct ip_ident_bucket {
104791- atomic_t id;
104792+ atomic_unchecked_t id;
104793 u32 stamp32;
104794 };
104795
104796-static struct ip_ident_bucket *ip_idents __read_mostly;
104797+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
104798
104799 /* In order to protect privacy, we add a perturbation to identifiers
104800 * if one generator is seldom used. This makes hard for an attacker
104801@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
104802 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
104803 delta = prandom_u32_max(now - old);
104804
104805- return atomic_add_return(segs + delta, &bucket->id) - segs;
104806+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
104807 }
104808 EXPORT_SYMBOL(ip_idents_reserve);
104809
104810@@ -2639,34 +2639,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
104811 .maxlen = sizeof(int),
104812 .mode = 0200,
104813 .proc_handler = ipv4_sysctl_rtcache_flush,
104814+ .extra1 = &init_net,
104815 },
104816 { },
104817 };
104818
104819 static __net_init int sysctl_route_net_init(struct net *net)
104820 {
104821- struct ctl_table *tbl;
104822+ ctl_table_no_const *tbl = NULL;
104823
104824- tbl = ipv4_route_flush_table;
104825 if (!net_eq(net, &init_net)) {
104826- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
104827+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
104828 if (tbl == NULL)
104829 goto err_dup;
104830
104831 /* Don't export sysctls to unprivileged users */
104832 if (net->user_ns != &init_user_ns)
104833 tbl[0].procname = NULL;
104834- }
104835- tbl[0].extra1 = net;
104836+ tbl[0].extra1 = net;
104837+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
104838+ } else
104839+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
104840
104841- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
104842 if (net->ipv4.route_hdr == NULL)
104843 goto err_reg;
104844 return 0;
104845
104846 err_reg:
104847- if (tbl != ipv4_route_flush_table)
104848- kfree(tbl);
104849+ kfree(tbl);
104850 err_dup:
104851 return -ENOMEM;
104852 }
104853@@ -2689,8 +2689,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
104854
104855 static __net_init int rt_genid_init(struct net *net)
104856 {
104857- atomic_set(&net->ipv4.rt_genid, 0);
104858- atomic_set(&net->fnhe_genid, 0);
104859+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
104860+ atomic_set_unchecked(&net->fnhe_genid, 0);
104861 get_random_bytes(&net->ipv4.dev_addr_genid,
104862 sizeof(net->ipv4.dev_addr_genid));
104863 return 0;
104864@@ -2734,11 +2734,7 @@ int __init ip_rt_init(void)
104865 int rc = 0;
104866 int cpu;
104867
104868- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
104869- if (!ip_idents)
104870- panic("IP: failed to allocate ip_idents\n");
104871-
104872- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
104873+ prandom_bytes(ip_idents, sizeof(ip_idents));
104874
104875 for_each_possible_cpu(cpu) {
104876 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
104877diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
104878index d151539..5f5e247 100644
104879--- a/net/ipv4/sysctl_net_ipv4.c
104880+++ b/net/ipv4/sysctl_net_ipv4.c
104881@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
104882 container_of(table->data, struct net, ipv4.ip_local_ports.range);
104883 int ret;
104884 int range[2];
104885- struct ctl_table tmp = {
104886+ ctl_table_no_const tmp = {
104887 .data = &range,
104888 .maxlen = sizeof(range),
104889 .mode = table->mode,
104890@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
104891 int ret;
104892 gid_t urange[2];
104893 kgid_t low, high;
104894- struct ctl_table tmp = {
104895+ ctl_table_no_const tmp = {
104896 .data = &urange,
104897 .maxlen = sizeof(urange),
104898 .mode = table->mode,
104899@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
104900 void __user *buffer, size_t *lenp, loff_t *ppos)
104901 {
104902 char val[TCP_CA_NAME_MAX];
104903- struct ctl_table tbl = {
104904+ ctl_table_no_const tbl = {
104905 .data = val,
104906 .maxlen = TCP_CA_NAME_MAX,
104907 };
104908@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
104909 void __user *buffer, size_t *lenp,
104910 loff_t *ppos)
104911 {
104912- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
104913+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
104914 int ret;
104915
104916 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
104917@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
104918 void __user *buffer, size_t *lenp,
104919 loff_t *ppos)
104920 {
104921- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
104922+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
104923 int ret;
104924
104925 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
104926@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
104927 void __user *buffer, size_t *lenp,
104928 loff_t *ppos)
104929 {
104930- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
104931+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
104932 struct tcp_fastopen_context *ctxt;
104933 int ret;
104934 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
104935@@ -888,13 +888,12 @@ static struct ctl_table ipv4_net_table[] = {
104936
104937 static __net_init int ipv4_sysctl_init_net(struct net *net)
104938 {
104939- struct ctl_table *table;
104940+ ctl_table_no_const *table = NULL;
104941
104942- table = ipv4_net_table;
104943 if (!net_eq(net, &init_net)) {
104944 int i;
104945
104946- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
104947+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
104948 if (table == NULL)
104949 goto err_alloc;
104950
104951@@ -903,7 +902,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
104952 table[i].data += (void *)net - (void *)&init_net;
104953 }
104954
104955- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
104956+ if (!net_eq(net, &init_net))
104957+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
104958+ else
104959+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
104960 if (net->ipv4.ipv4_hdr == NULL)
104961 goto err_reg;
104962
104963diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
104964index 995a225..e1e9183 100644
104965--- a/net/ipv4/tcp.c
104966+++ b/net/ipv4/tcp.c
104967@@ -520,8 +520,10 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
104968
104969 /* Race breaker. If space is freed after
104970 * wspace test but before the flags are set,
104971- * IO signal will be lost.
104972+ * IO signal will be lost. Memory barrier
104973+ * pairs with the input side.
104974 */
104975+ smp_mb__after_atomic();
104976 if (sk_stream_is_writeable(sk))
104977 mask |= POLLOUT | POLLWRNORM;
104978 }
104979diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
104980index f501ac04..0c5a1b2 100644
104981--- a/net/ipv4/tcp_input.c
104982+++ b/net/ipv4/tcp_input.c
104983@@ -767,7 +767,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
104984 * without any lock. We want to make sure compiler wont store
104985 * intermediate values in this location.
104986 */
104987- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
104988+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
104989 sk->sk_max_pacing_rate);
104990 }
104991
104992@@ -4541,7 +4541,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
104993 * simplifies code)
104994 */
104995 static void
104996-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
104997+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
104998 struct sk_buff *head, struct sk_buff *tail,
104999 u32 start, u32 end)
105000 {
105001@@ -4799,6 +4799,8 @@ static void tcp_check_space(struct sock *sk)
105002 {
105003 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
105004 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
105005+ /* pairs with tcp_poll() */
105006+ smp_mb__after_atomic();
105007 if (sk->sk_socket &&
105008 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
105009 tcp_new_space(sk);
105010@@ -5525,6 +5527,7 @@ discard:
105011 tcp_paws_reject(&tp->rx_opt, 0))
105012 goto discard_and_undo;
105013
105014+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
105015 if (th->syn) {
105016 /* We see SYN without ACK. It is attempt of
105017 * simultaneous connect with crossed SYNs.
105018@@ -5575,6 +5578,7 @@ discard:
105019 goto discard;
105020 #endif
105021 }
105022+#endif
105023 /* "fifth, if neither of the SYN or RST bits is set then
105024 * drop the segment and return."
105025 */
105026@@ -5621,7 +5625,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
105027 goto discard;
105028
105029 if (th->syn) {
105030- if (th->fin)
105031+ if (th->fin || th->urg || th->psh)
105032 goto discard;
105033 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
105034 return 1;
105035diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
105036index f1756ee..8908cb0 100644
105037--- a/net/ipv4/tcp_ipv4.c
105038+++ b/net/ipv4/tcp_ipv4.c
105039@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
105040 int sysctl_tcp_low_latency __read_mostly;
105041 EXPORT_SYMBOL(sysctl_tcp_low_latency);
105042
105043+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105044+extern int grsec_enable_blackhole;
105045+#endif
105046+
105047 #ifdef CONFIG_TCP_MD5SIG
105048 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
105049 __be32 daddr, __be32 saddr, const struct tcphdr *th);
105050@@ -1475,6 +1479,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
105051 return 0;
105052
105053 reset:
105054+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105055+ if (!grsec_enable_blackhole)
105056+#endif
105057 tcp_v4_send_reset(rsk, skb);
105058 discard:
105059 kfree_skb(skb);
105060@@ -1639,12 +1646,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
105061 TCP_SKB_CB(skb)->sacked = 0;
105062
105063 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
105064- if (!sk)
105065+ if (!sk) {
105066+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105067+ ret = 1;
105068+#endif
105069 goto no_tcp_socket;
105070-
105071+ }
105072 process:
105073- if (sk->sk_state == TCP_TIME_WAIT)
105074+ if (sk->sk_state == TCP_TIME_WAIT) {
105075+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105076+ ret = 2;
105077+#endif
105078 goto do_time_wait;
105079+ }
105080
105081 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
105082 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
105083@@ -1700,6 +1714,10 @@ csum_error:
105084 bad_packet:
105085 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
105086 } else {
105087+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105088+ if (!grsec_enable_blackhole || (ret == 1 &&
105089+ (skb->dev->flags & IFF_LOOPBACK)))
105090+#endif
105091 tcp_v4_send_reset(NULL, skb);
105092 }
105093
105094diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
105095index dd11ac7..c0872da 100644
105096--- a/net/ipv4/tcp_minisocks.c
105097+++ b/net/ipv4/tcp_minisocks.c
105098@@ -27,6 +27,10 @@
105099 #include <net/inet_common.h>
105100 #include <net/xfrm.h>
105101
105102+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105103+extern int grsec_enable_blackhole;
105104+#endif
105105+
105106 int sysctl_tcp_syncookies __read_mostly = 1;
105107 EXPORT_SYMBOL(sysctl_tcp_syncookies);
105108
105109@@ -785,7 +789,10 @@ embryonic_reset:
105110 * avoid becoming vulnerable to outside attack aiming at
105111 * resetting legit local connections.
105112 */
105113- req->rsk_ops->send_reset(sk, skb);
105114+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105115+ if (!grsec_enable_blackhole)
105116+#endif
105117+ req->rsk_ops->send_reset(sk, skb);
105118 } else if (fastopen) { /* received a valid RST pkt */
105119 reqsk_fastopen_remove(sk, req, true);
105120 tcp_reset(sk);
105121diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
105122index ebf5ff5..4d1ff32 100644
105123--- a/net/ipv4/tcp_probe.c
105124+++ b/net/ipv4/tcp_probe.c
105125@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
105126 if (cnt + width >= len)
105127 break;
105128
105129- if (copy_to_user(buf + cnt, tbuf, width))
105130+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
105131 return -EFAULT;
105132 cnt += width;
105133 }
105134diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
105135index 0732b78..a82bdc6 100644
105136--- a/net/ipv4/tcp_timer.c
105137+++ b/net/ipv4/tcp_timer.c
105138@@ -22,6 +22,10 @@
105139 #include <linux/gfp.h>
105140 #include <net/tcp.h>
105141
105142+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105143+extern int grsec_lastack_retries;
105144+#endif
105145+
105146 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
105147 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
105148 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
105149@@ -194,6 +198,13 @@ static int tcp_write_timeout(struct sock *sk)
105150 }
105151 }
105152
105153+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105154+ if ((sk->sk_state == TCP_LAST_ACK) &&
105155+ (grsec_lastack_retries > 0) &&
105156+ (grsec_lastack_retries < retry_until))
105157+ retry_until = grsec_lastack_retries;
105158+#endif
105159+
105160 if (retransmits_timed_out(sk, retry_until,
105161 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
105162 /* Has it gone just too far? */
105163diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
105164index 97ef1f8b..e446c33 100644
105165--- a/net/ipv4/udp.c
105166+++ b/net/ipv4/udp.c
105167@@ -87,6 +87,7 @@
105168 #include <linux/types.h>
105169 #include <linux/fcntl.h>
105170 #include <linux/module.h>
105171+#include <linux/security.h>
105172 #include <linux/socket.h>
105173 #include <linux/sockios.h>
105174 #include <linux/igmp.h>
105175@@ -114,6 +115,10 @@
105176 #include <net/busy_poll.h>
105177 #include "udp_impl.h"
105178
105179+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105180+extern int grsec_enable_blackhole;
105181+#endif
105182+
105183 struct udp_table udp_table __read_mostly;
105184 EXPORT_SYMBOL(udp_table);
105185
105186@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
105187 return true;
105188 }
105189
105190+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
105191+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
105192+
105193 /*
105194 * This routine is called by the ICMP module when it gets some
105195 * sort of error condition. If err < 0 then the socket should
105196@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
105197 dport = usin->sin_port;
105198 if (dport == 0)
105199 return -EINVAL;
105200+
105201+ err = gr_search_udp_sendmsg(sk, usin);
105202+ if (err)
105203+ return err;
105204 } else {
105205 if (sk->sk_state != TCP_ESTABLISHED)
105206 return -EDESTADDRREQ;
105207+
105208+ err = gr_search_udp_sendmsg(sk, NULL);
105209+ if (err)
105210+ return err;
105211+
105212 daddr = inet->inet_daddr;
105213 dport = inet->inet_dport;
105214 /* Open fast path for connected socket.
105215@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
105216 IS_UDPLITE(sk));
105217 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
105218 IS_UDPLITE(sk));
105219- atomic_inc(&sk->sk_drops);
105220+ atomic_inc_unchecked(&sk->sk_drops);
105221 __skb_unlink(skb, rcvq);
105222 __skb_queue_tail(&list_kill, skb);
105223 }
105224@@ -1275,6 +1292,10 @@ try_again:
105225 if (!skb)
105226 goto out;
105227
105228+ err = gr_search_udp_recvmsg(sk, skb);
105229+ if (err)
105230+ goto out_free;
105231+
105232 ulen = skb->len - sizeof(struct udphdr);
105233 copied = len;
105234 if (copied > ulen)
105235@@ -1307,7 +1328,7 @@ try_again:
105236 if (unlikely(err)) {
105237 trace_kfree_skb(skb, udp_recvmsg);
105238 if (!peeked) {
105239- atomic_inc(&sk->sk_drops);
105240+ atomic_inc_unchecked(&sk->sk_drops);
105241 UDP_INC_STATS_USER(sock_net(sk),
105242 UDP_MIB_INERRORS, is_udplite);
105243 }
105244@@ -1605,7 +1626,7 @@ csum_error:
105245 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
105246 drop:
105247 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
105248- atomic_inc(&sk->sk_drops);
105249+ atomic_inc_unchecked(&sk->sk_drops);
105250 kfree_skb(skb);
105251 return -1;
105252 }
105253@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
105254 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
105255
105256 if (!skb1) {
105257- atomic_inc(&sk->sk_drops);
105258+ atomic_inc_unchecked(&sk->sk_drops);
105259 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
105260 IS_UDPLITE(sk));
105261 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
105262@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
105263 goto csum_error;
105264
105265 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
105266+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105267+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
105268+#endif
105269 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
105270
105271 /*
105272@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
105273 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
105274 0, sock_i_ino(sp),
105275 atomic_read(&sp->sk_refcnt), sp,
105276- atomic_read(&sp->sk_drops));
105277+ atomic_read_unchecked(&sp->sk_drops));
105278 }
105279
105280 int udp4_seq_show(struct seq_file *seq, void *v)
105281diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
105282index 6156f68..d6ab46d 100644
105283--- a/net/ipv4/xfrm4_policy.c
105284+++ b/net/ipv4/xfrm4_policy.c
105285@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
105286 fl4->flowi4_tos = iph->tos;
105287 }
105288
105289-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
105290+static int xfrm4_garbage_collect(struct dst_ops *ops)
105291 {
105292 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
105293
105294- xfrm4_policy_afinfo.garbage_collect(net);
105295+ xfrm_garbage_collect_deferred(net);
105296 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
105297 }
105298
105299@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
105300
105301 static int __net_init xfrm4_net_init(struct net *net)
105302 {
105303- struct ctl_table *table;
105304+ ctl_table_no_const *table = NULL;
105305 struct ctl_table_header *hdr;
105306
105307- table = xfrm4_policy_table;
105308 if (!net_eq(net, &init_net)) {
105309- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
105310+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
105311 if (!table)
105312 goto err_alloc;
105313
105314 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
105315- }
105316-
105317- hdr = register_net_sysctl(net, "net/ipv4", table);
105318+ hdr = register_net_sysctl(net, "net/ipv4", table);
105319+ } else
105320+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
105321 if (!hdr)
105322 goto err_reg;
105323
105324@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
105325 return 0;
105326
105327 err_reg:
105328- if (!net_eq(net, &init_net))
105329- kfree(table);
105330+ kfree(table);
105331 err_alloc:
105332 return -ENOMEM;
105333 }
105334diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
105335index b603002..0de5c88 100644
105336--- a/net/ipv6/addrconf.c
105337+++ b/net/ipv6/addrconf.c
105338@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
105339 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
105340 .mtu6 = IPV6_MIN_MTU,
105341 .accept_ra = 1,
105342- .accept_redirects = 1,
105343+ .accept_redirects = 0,
105344 .autoconf = 1,
105345 .force_mld_version = 0,
105346 .mldv1_unsolicited_report_interval = 10 * HZ,
105347@@ -209,7 +209,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
105348 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
105349 .mtu6 = IPV6_MIN_MTU,
105350 .accept_ra = 1,
105351- .accept_redirects = 1,
105352+ .accept_redirects = 0,
105353 .autoconf = 1,
105354 .force_mld_version = 0,
105355 .mldv1_unsolicited_report_interval = 10 * HZ,
105356@@ -607,7 +607,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
105357 idx = 0;
105358 head = &net->dev_index_head[h];
105359 rcu_read_lock();
105360- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
105361+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
105362 net->dev_base_seq;
105363 hlist_for_each_entry_rcu(dev, head, index_hlist) {
105364 if (idx < s_idx)
105365@@ -2438,7 +2438,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
105366 p.iph.ihl = 5;
105367 p.iph.protocol = IPPROTO_IPV6;
105368 p.iph.ttl = 64;
105369- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
105370+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
105371
105372 if (ops->ndo_do_ioctl) {
105373 mm_segment_t oldfs = get_fs();
105374@@ -3587,16 +3587,23 @@ static const struct file_operations if6_fops = {
105375 .release = seq_release_net,
105376 };
105377
105378+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
105379+extern void unregister_ipv6_seq_ops_addr(void);
105380+
105381 static int __net_init if6_proc_net_init(struct net *net)
105382 {
105383- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
105384+ register_ipv6_seq_ops_addr(&if6_seq_ops);
105385+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
105386+ unregister_ipv6_seq_ops_addr();
105387 return -ENOMEM;
105388+ }
105389 return 0;
105390 }
105391
105392 static void __net_exit if6_proc_net_exit(struct net *net)
105393 {
105394 remove_proc_entry("if_inet6", net->proc_net);
105395+ unregister_ipv6_seq_ops_addr();
105396 }
105397
105398 static struct pernet_operations if6_proc_net_ops = {
105399@@ -4215,7 +4222,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
105400 s_ip_idx = ip_idx = cb->args[2];
105401
105402 rcu_read_lock();
105403- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
105404+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
105405 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
105406 idx = 0;
105407 head = &net->dev_index_head[h];
105408@@ -4864,7 +4871,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
105409 rt_genid_bump_ipv6(net);
105410 break;
105411 }
105412- atomic_inc(&net->ipv6.dev_addr_genid);
105413+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
105414 }
105415
105416 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
105417@@ -4884,7 +4891,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
105418 int *valp = ctl->data;
105419 int val = *valp;
105420 loff_t pos = *ppos;
105421- struct ctl_table lctl;
105422+ ctl_table_no_const lctl;
105423 int ret;
105424
105425 /*
105426@@ -4909,7 +4916,7 @@ int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
105427 {
105428 struct inet6_dev *idev = ctl->extra1;
105429 int min_mtu = IPV6_MIN_MTU;
105430- struct ctl_table lctl;
105431+ ctl_table_no_const lctl;
105432
105433 lctl = *ctl;
105434 lctl.extra1 = &min_mtu;
105435@@ -4984,7 +4991,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
105436 int *valp = ctl->data;
105437 int val = *valp;
105438 loff_t pos = *ppos;
105439- struct ctl_table lctl;
105440+ ctl_table_no_const lctl;
105441 int ret;
105442
105443 /*
105444diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
105445index e8c4400..a4cd5da 100644
105446--- a/net/ipv6/af_inet6.c
105447+++ b/net/ipv6/af_inet6.c
105448@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
105449 net->ipv6.sysctl.icmpv6_time = 1*HZ;
105450 net->ipv6.sysctl.flowlabel_consistency = 1;
105451 net->ipv6.sysctl.auto_flowlabels = 0;
105452- atomic_set(&net->ipv6.fib6_sernum, 1);
105453+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
105454
105455 err = ipv6_init_mibs(net);
105456 if (err)
105457diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
105458index ace8dac..bd6942d 100644
105459--- a/net/ipv6/datagram.c
105460+++ b/net/ipv6/datagram.c
105461@@ -957,5 +957,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
105462 0,
105463 sock_i_ino(sp),
105464 atomic_read(&sp->sk_refcnt), sp,
105465- atomic_read(&sp->sk_drops));
105466+ atomic_read_unchecked(&sp->sk_drops));
105467 }
105468diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
105469index a5e9519..16b7412 100644
105470--- a/net/ipv6/icmp.c
105471+++ b/net/ipv6/icmp.c
105472@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
105473
105474 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
105475 {
105476- struct ctl_table *table;
105477+ ctl_table_no_const *table;
105478
105479 table = kmemdup(ipv6_icmp_table_template,
105480 sizeof(ipv6_icmp_table_template),
105481diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
105482index 263ef41..88c7be8 100644
105483--- a/net/ipv6/ip6_fib.c
105484+++ b/net/ipv6/ip6_fib.c
105485@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
105486 int new, old;
105487
105488 do {
105489- old = atomic_read(&net->ipv6.fib6_sernum);
105490+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
105491 new = old < INT_MAX ? old + 1 : 1;
105492- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
105493+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
105494 old, new) != old);
105495 return new;
105496 }
105497diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
105498index bc28b7d..a08feea 100644
105499--- a/net/ipv6/ip6_gre.c
105500+++ b/net/ipv6/ip6_gre.c
105501@@ -71,8 +71,8 @@ struct ip6gre_net {
105502 struct net_device *fb_tunnel_dev;
105503 };
105504
105505-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
105506-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
105507+static struct rtnl_link_ops ip6gre_link_ops;
105508+static struct rtnl_link_ops ip6gre_tap_ops;
105509 static int ip6gre_tunnel_init(struct net_device *dev);
105510 static void ip6gre_tunnel_setup(struct net_device *dev);
105511 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
105512@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
105513 }
105514
105515
105516-static struct inet6_protocol ip6gre_protocol __read_mostly = {
105517+static struct inet6_protocol ip6gre_protocol = {
105518 .handler = ip6gre_rcv,
105519 .err_handler = ip6gre_err,
105520 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
105521@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
105522 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
105523 };
105524
105525-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
105526+static struct rtnl_link_ops ip6gre_link_ops = {
105527 .kind = "ip6gre",
105528 .maxtype = IFLA_GRE_MAX,
105529 .policy = ip6gre_policy,
105530@@ -1665,7 +1665,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
105531 .get_link_net = ip6_tnl_get_link_net,
105532 };
105533
105534-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
105535+static struct rtnl_link_ops ip6gre_tap_ops = {
105536 .kind = "ip6gretap",
105537 .maxtype = IFLA_GRE_MAX,
105538 .policy = ip6gre_policy,
105539diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
105540index ddd94ec..b7cfefb 100644
105541--- a/net/ipv6/ip6_tunnel.c
105542+++ b/net/ipv6/ip6_tunnel.c
105543@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
105544
105545 static int ip6_tnl_dev_init(struct net_device *dev);
105546 static void ip6_tnl_dev_setup(struct net_device *dev);
105547-static struct rtnl_link_ops ip6_link_ops __read_mostly;
105548+static struct rtnl_link_ops ip6_link_ops;
105549
105550 static int ip6_tnl_net_id __read_mostly;
105551 struct ip6_tnl_net {
105552@@ -1780,7 +1780,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
105553 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
105554 };
105555
105556-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
105557+static struct rtnl_link_ops ip6_link_ops = {
105558 .kind = "ip6tnl",
105559 .maxtype = IFLA_IPTUN_MAX,
105560 .policy = ip6_tnl_policy,
105561diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
105562index 5fb9e21..92bf04b 100644
105563--- a/net/ipv6/ip6_vti.c
105564+++ b/net/ipv6/ip6_vti.c
105565@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
105566
105567 static int vti6_dev_init(struct net_device *dev);
105568 static void vti6_dev_setup(struct net_device *dev);
105569-static struct rtnl_link_ops vti6_link_ops __read_mostly;
105570+static struct rtnl_link_ops vti6_link_ops;
105571
105572 static int vti6_net_id __read_mostly;
105573 struct vti6_net {
105574@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
105575 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
105576 };
105577
105578-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
105579+static struct rtnl_link_ops vti6_link_ops = {
105580 .kind = "vti6",
105581 .maxtype = IFLA_VTI_MAX,
105582 .policy = vti6_policy,
105583diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
105584index 8d766d9..dcdfea7 100644
105585--- a/net/ipv6/ipv6_sockglue.c
105586+++ b/net/ipv6/ipv6_sockglue.c
105587@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
105588 if (sk->sk_type != SOCK_STREAM)
105589 return -ENOPROTOOPT;
105590
105591- msg.msg_control = optval;
105592+ msg.msg_control = (void __force_kernel *)optval;
105593 msg.msg_controllen = len;
105594 msg.msg_flags = flags;
105595
105596diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
105597index bb00c6f..16c90d7 100644
105598--- a/net/ipv6/netfilter/ip6_tables.c
105599+++ b/net/ipv6/netfilter/ip6_tables.c
105600@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
105601 #endif
105602
105603 static int get_info(struct net *net, void __user *user,
105604- const int *len, int compat)
105605+ int len, int compat)
105606 {
105607 char name[XT_TABLE_MAXNAMELEN];
105608 struct xt_table *t;
105609 int ret;
105610
105611- if (*len != sizeof(struct ip6t_getinfo)) {
105612- duprintf("length %u != %zu\n", *len,
105613+ if (len != sizeof(struct ip6t_getinfo)) {
105614+ duprintf("length %u != %zu\n", len,
105615 sizeof(struct ip6t_getinfo));
105616 return -EINVAL;
105617 }
105618@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
105619 info.size = private->size;
105620 strcpy(info.name, name);
105621
105622- if (copy_to_user(user, &info, *len) != 0)
105623+ if (copy_to_user(user, &info, len) != 0)
105624 ret = -EFAULT;
105625 else
105626 ret = 0;
105627@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
105628
105629 switch (cmd) {
105630 case IP6T_SO_GET_INFO:
105631- ret = get_info(sock_net(sk), user, len, 1);
105632+ ret = get_info(sock_net(sk), user, *len, 1);
105633 break;
105634 case IP6T_SO_GET_ENTRIES:
105635 ret = compat_get_entries(sock_net(sk), user, len);
105636@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
105637
105638 switch (cmd) {
105639 case IP6T_SO_GET_INFO:
105640- ret = get_info(sock_net(sk), user, len, 0);
105641+ ret = get_info(sock_net(sk), user, *len, 0);
105642 break;
105643
105644 case IP6T_SO_GET_ENTRIES:
105645diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
105646index 6f187c8..34b367f 100644
105647--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
105648+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
105649@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
105650
105651 static int nf_ct_frag6_sysctl_register(struct net *net)
105652 {
105653- struct ctl_table *table;
105654+ ctl_table_no_const *table = NULL;
105655 struct ctl_table_header *hdr;
105656
105657- table = nf_ct_frag6_sysctl_table;
105658 if (!net_eq(net, &init_net)) {
105659- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
105660+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
105661 GFP_KERNEL);
105662 if (table == NULL)
105663 goto err_alloc;
105664@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
105665 table[2].data = &net->nf_frag.frags.high_thresh;
105666 table[2].extra1 = &net->nf_frag.frags.low_thresh;
105667 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
105668- }
105669-
105670- hdr = register_net_sysctl(net, "net/netfilter", table);
105671+ hdr = register_net_sysctl(net, "net/netfilter", table);
105672+ } else
105673+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
105674 if (hdr == NULL)
105675 goto err_reg;
105676
105677@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
105678 return 0;
105679
105680 err_reg:
105681- if (!net_eq(net, &init_net))
105682- kfree(table);
105683+ kfree(table);
105684 err_alloc:
105685 return -ENOMEM;
105686 }
105687diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
105688index a2dfff6..1e52e6d 100644
105689--- a/net/ipv6/ping.c
105690+++ b/net/ipv6/ping.c
105691@@ -241,6 +241,24 @@ static struct pernet_operations ping_v6_net_ops = {
105692 };
105693 #endif
105694
105695+static struct pingv6_ops real_pingv6_ops = {
105696+ .ipv6_recv_error = ipv6_recv_error,
105697+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
105698+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
105699+ .icmpv6_err_convert = icmpv6_err_convert,
105700+ .ipv6_icmp_error = ipv6_icmp_error,
105701+ .ipv6_chk_addr = ipv6_chk_addr,
105702+};
105703+
105704+static struct pingv6_ops dummy_pingv6_ops = {
105705+ .ipv6_recv_error = dummy_ipv6_recv_error,
105706+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
105707+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
105708+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
105709+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
105710+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
105711+};
105712+
105713 int __init pingv6_init(void)
105714 {
105715 #ifdef CONFIG_PROC_FS
105716@@ -248,13 +266,7 @@ int __init pingv6_init(void)
105717 if (ret)
105718 return ret;
105719 #endif
105720- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
105721- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
105722- pingv6_ops.ip6_datagram_recv_specific_ctl =
105723- ip6_datagram_recv_specific_ctl;
105724- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
105725- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
105726- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
105727+ pingv6_ops = &real_pingv6_ops;
105728 return inet6_register_protosw(&pingv6_protosw);
105729 }
105730
105731@@ -263,14 +275,9 @@ int __init pingv6_init(void)
105732 */
105733 void pingv6_exit(void)
105734 {
105735- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
105736- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
105737- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
105738- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
105739- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
105740- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
105741 #ifdef CONFIG_PROC_FS
105742 unregister_pernet_subsys(&ping_v6_net_ops);
105743 #endif
105744+ pingv6_ops = &dummy_pingv6_ops;
105745 inet6_unregister_protosw(&pingv6_protosw);
105746 }
105747diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
105748index 679253d0..70b653c 100644
105749--- a/net/ipv6/proc.c
105750+++ b/net/ipv6/proc.c
105751@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
105752 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
105753 goto proc_snmp6_fail;
105754
105755- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
105756+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
105757 if (!net->mib.proc_net_devsnmp6)
105758 goto proc_dev_snmp6_fail;
105759 return 0;
105760diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
105761index dae7f1a..783b20d 100644
105762--- a/net/ipv6/raw.c
105763+++ b/net/ipv6/raw.c
105764@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
105765 {
105766 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
105767 skb_checksum_complete(skb)) {
105768- atomic_inc(&sk->sk_drops);
105769+ atomic_inc_unchecked(&sk->sk_drops);
105770 kfree_skb(skb);
105771 return NET_RX_DROP;
105772 }
105773@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
105774 struct raw6_sock *rp = raw6_sk(sk);
105775
105776 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
105777- atomic_inc(&sk->sk_drops);
105778+ atomic_inc_unchecked(&sk->sk_drops);
105779 kfree_skb(skb);
105780 return NET_RX_DROP;
105781 }
105782@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
105783
105784 if (inet->hdrincl) {
105785 if (skb_checksum_complete(skb)) {
105786- atomic_inc(&sk->sk_drops);
105787+ atomic_inc_unchecked(&sk->sk_drops);
105788 kfree_skb(skb);
105789 return NET_RX_DROP;
105790 }
105791@@ -609,7 +609,7 @@ out:
105792 return err;
105793 }
105794
105795-static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
105796+static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, unsigned int length,
105797 struct flowi6 *fl6, struct dst_entry **dstp,
105798 unsigned int flags)
105799 {
105800@@ -915,12 +915,15 @@ do_confirm:
105801 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
105802 char __user *optval, int optlen)
105803 {
105804+ struct icmp6_filter filter;
105805+
105806 switch (optname) {
105807 case ICMPV6_FILTER:
105808 if (optlen > sizeof(struct icmp6_filter))
105809 optlen = sizeof(struct icmp6_filter);
105810- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
105811+ if (copy_from_user(&filter, optval, optlen))
105812 return -EFAULT;
105813+ raw6_sk(sk)->filter = filter;
105814 return 0;
105815 default:
105816 return -ENOPROTOOPT;
105817@@ -933,6 +936,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
105818 char __user *optval, int __user *optlen)
105819 {
105820 int len;
105821+ struct icmp6_filter filter;
105822
105823 switch (optname) {
105824 case ICMPV6_FILTER:
105825@@ -944,7 +948,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
105826 len = sizeof(struct icmp6_filter);
105827 if (put_user(len, optlen))
105828 return -EFAULT;
105829- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
105830+ filter = raw6_sk(sk)->filter;
105831+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
105832 return -EFAULT;
105833 return 0;
105834 default:
105835diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
105836index d7d70e6..bd5e9fc 100644
105837--- a/net/ipv6/reassembly.c
105838+++ b/net/ipv6/reassembly.c
105839@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
105840
105841 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
105842 {
105843- struct ctl_table *table;
105844+ ctl_table_no_const *table = NULL;
105845 struct ctl_table_header *hdr;
105846
105847- table = ip6_frags_ns_ctl_table;
105848 if (!net_eq(net, &init_net)) {
105849- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
105850+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
105851 if (table == NULL)
105852 goto err_alloc;
105853
105854@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
105855 /* Don't export sysctls to unprivileged users */
105856 if (net->user_ns != &init_user_ns)
105857 table[0].procname = NULL;
105858- }
105859+ hdr = register_net_sysctl(net, "net/ipv6", table);
105860+ } else
105861+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
105862
105863- hdr = register_net_sysctl(net, "net/ipv6", table);
105864 if (hdr == NULL)
105865 goto err_reg;
105866
105867@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
105868 return 0;
105869
105870 err_reg:
105871- if (!net_eq(net, &init_net))
105872- kfree(table);
105873+ kfree(table);
105874 err_alloc:
105875 return -ENOMEM;
105876 }
105877diff --git a/net/ipv6/route.c b/net/ipv6/route.c
105878index 4688bd4..584453d 100644
105879--- a/net/ipv6/route.c
105880+++ b/net/ipv6/route.c
105881@@ -3029,7 +3029,7 @@ struct ctl_table ipv6_route_table_template[] = {
105882
105883 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
105884 {
105885- struct ctl_table *table;
105886+ ctl_table_no_const *table;
105887
105888 table = kmemdup(ipv6_route_table_template,
105889 sizeof(ipv6_route_table_template),
105890diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
105891index e4cbd57..02b1aaa 100644
105892--- a/net/ipv6/sit.c
105893+++ b/net/ipv6/sit.c
105894@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
105895 static void ipip6_dev_free(struct net_device *dev);
105896 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
105897 __be32 *v4dst);
105898-static struct rtnl_link_ops sit_link_ops __read_mostly;
105899+static struct rtnl_link_ops sit_link_ops;
105900
105901 static int sit_net_id __read_mostly;
105902 struct sit_net {
105903@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
105904 unregister_netdevice_queue(dev, head);
105905 }
105906
105907-static struct rtnl_link_ops sit_link_ops __read_mostly = {
105908+static struct rtnl_link_ops sit_link_ops = {
105909 .kind = "sit",
105910 .maxtype = IFLA_IPTUN_MAX,
105911 .policy = ipip6_policy,
105912diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
105913index c5c10fa..2577d51 100644
105914--- a/net/ipv6/sysctl_net_ipv6.c
105915+++ b/net/ipv6/sysctl_net_ipv6.c
105916@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
105917
105918 static int __net_init ipv6_sysctl_net_init(struct net *net)
105919 {
105920- struct ctl_table *ipv6_table;
105921+ ctl_table_no_const *ipv6_table;
105922 struct ctl_table *ipv6_route_table;
105923 struct ctl_table *ipv6_icmp_table;
105924 int err;
105925diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
105926index 1f5e622..8387d90 100644
105927--- a/net/ipv6/tcp_ipv6.c
105928+++ b/net/ipv6/tcp_ipv6.c
105929@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
105930 }
105931 }
105932
105933+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105934+extern int grsec_enable_blackhole;
105935+#endif
105936+
105937 static void tcp_v6_hash(struct sock *sk)
105938 {
105939 if (sk->sk_state != TCP_CLOSE) {
105940@@ -1345,6 +1349,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
105941 return 0;
105942
105943 reset:
105944+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105945+ if (!grsec_enable_blackhole)
105946+#endif
105947 tcp_v6_send_reset(sk, skb);
105948 discard:
105949 if (opt_skb)
105950@@ -1454,12 +1461,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
105951
105952 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
105953 inet6_iif(skb));
105954- if (!sk)
105955+ if (!sk) {
105956+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105957+ ret = 1;
105958+#endif
105959 goto no_tcp_socket;
105960+ }
105961
105962 process:
105963- if (sk->sk_state == TCP_TIME_WAIT)
105964+ if (sk->sk_state == TCP_TIME_WAIT) {
105965+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105966+ ret = 2;
105967+#endif
105968 goto do_time_wait;
105969+ }
105970
105971 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
105972 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
105973@@ -1510,6 +1525,10 @@ csum_error:
105974 bad_packet:
105975 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
105976 } else {
105977+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105978+ if (!grsec_enable_blackhole || (ret == 1 &&
105979+ (skb->dev->flags & IFF_LOOPBACK)))
105980+#endif
105981 tcp_v6_send_reset(NULL, skb);
105982 }
105983
105984diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
105985index d048d46..bf141c3 100644
105986--- a/net/ipv6/udp.c
105987+++ b/net/ipv6/udp.c
105988@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
105989 udp_ipv6_hash_secret + net_hash_mix(net));
105990 }
105991
105992+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105993+extern int grsec_enable_blackhole;
105994+#endif
105995+
105996 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
105997 {
105998 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
105999@@ -448,7 +452,7 @@ try_again:
106000 if (unlikely(err)) {
106001 trace_kfree_skb(skb, udpv6_recvmsg);
106002 if (!peeked) {
106003- atomic_inc(&sk->sk_drops);
106004+ atomic_inc_unchecked(&sk->sk_drops);
106005 if (is_udp4)
106006 UDP_INC_STATS_USER(sock_net(sk),
106007 UDP_MIB_INERRORS,
106008@@ -714,7 +718,7 @@ csum_error:
106009 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
106010 drop:
106011 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
106012- atomic_inc(&sk->sk_drops);
106013+ atomic_inc_unchecked(&sk->sk_drops);
106014 kfree_skb(skb);
106015 return -1;
106016 }
106017@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
106018 if (likely(skb1 == NULL))
106019 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
106020 if (!skb1) {
106021- atomic_inc(&sk->sk_drops);
106022+ atomic_inc_unchecked(&sk->sk_drops);
106023 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
106024 IS_UDPLITE(sk));
106025 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
106026@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
106027 goto csum_error;
106028
106029 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
106030+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
106031+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
106032+#endif
106033 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
106034
106035 kfree_skb(skb);
106036diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
106037index 8d2d01b4..313511e 100644
106038--- a/net/ipv6/xfrm6_policy.c
106039+++ b/net/ipv6/xfrm6_policy.c
106040@@ -224,11 +224,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
106041 }
106042 }
106043
106044-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
106045+static int xfrm6_garbage_collect(struct dst_ops *ops)
106046 {
106047 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
106048
106049- xfrm6_policy_afinfo.garbage_collect(net);
106050+ xfrm_garbage_collect_deferred(net);
106051 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
106052 }
106053
106054@@ -341,19 +341,19 @@ static struct ctl_table xfrm6_policy_table[] = {
106055
106056 static int __net_init xfrm6_net_init(struct net *net)
106057 {
106058- struct ctl_table *table;
106059+ ctl_table_no_const *table = NULL;
106060 struct ctl_table_header *hdr;
106061
106062- table = xfrm6_policy_table;
106063 if (!net_eq(net, &init_net)) {
106064- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
106065+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
106066 if (!table)
106067 goto err_alloc;
106068
106069 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
106070- }
106071+ hdr = register_net_sysctl(net, "net/ipv6", table);
106072+ } else
106073+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
106074
106075- hdr = register_net_sysctl(net, "net/ipv6", table);
106076 if (!hdr)
106077 goto err_reg;
106078
106079@@ -361,8 +361,7 @@ static int __net_init xfrm6_net_init(struct net *net)
106080 return 0;
106081
106082 err_reg:
106083- if (!net_eq(net, &init_net))
106084- kfree(table);
106085+ kfree(table);
106086 err_alloc:
106087 return -ENOMEM;
106088 }
106089diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
106090index c1d247e..9e5949d 100644
106091--- a/net/ipx/ipx_proc.c
106092+++ b/net/ipx/ipx_proc.c
106093@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
106094 struct proc_dir_entry *p;
106095 int rc = -ENOMEM;
106096
106097- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
106098+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
106099
106100 if (!ipx_proc_dir)
106101 goto out;
106102diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
106103index 683346d..cb0e12d 100644
106104--- a/net/irda/ircomm/ircomm_tty.c
106105+++ b/net/irda/ircomm/ircomm_tty.c
106106@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
106107 add_wait_queue(&port->open_wait, &wait);
106108
106109 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
106110- __FILE__, __LINE__, tty->driver->name, port->count);
106111+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
106112
106113 spin_lock_irqsave(&port->lock, flags);
106114- port->count--;
106115+ atomic_dec(&port->count);
106116 port->blocked_open++;
106117 spin_unlock_irqrestore(&port->lock, flags);
106118
106119@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
106120 }
106121
106122 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
106123- __FILE__, __LINE__, tty->driver->name, port->count);
106124+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
106125
106126 schedule();
106127 }
106128@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
106129
106130 spin_lock_irqsave(&port->lock, flags);
106131 if (!tty_hung_up_p(filp))
106132- port->count++;
106133+ atomic_inc(&port->count);
106134 port->blocked_open--;
106135 spin_unlock_irqrestore(&port->lock, flags);
106136
106137 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
106138- __FILE__, __LINE__, tty->driver->name, port->count);
106139+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
106140
106141 if (!retval)
106142 port->flags |= ASYNC_NORMAL_ACTIVE;
106143@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
106144
106145 /* ++ is not atomic, so this should be protected - Jean II */
106146 spin_lock_irqsave(&self->port.lock, flags);
106147- self->port.count++;
106148+ atomic_inc(&self->port.count);
106149 spin_unlock_irqrestore(&self->port.lock, flags);
106150 tty_port_tty_set(&self->port, tty);
106151
106152 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
106153- self->line, self->port.count);
106154+ self->line, atomic_read(&self->port.count));
106155
106156 /* Not really used by us, but lets do it anyway */
106157 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
106158@@ -961,7 +961,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
106159 tty_kref_put(port->tty);
106160 }
106161 port->tty = NULL;
106162- port->count = 0;
106163+ atomic_set(&port->count, 0);
106164 spin_unlock_irqrestore(&port->lock, flags);
106165
106166 wake_up_interruptible(&port->open_wait);
106167@@ -1308,7 +1308,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
106168 seq_putc(m, '\n');
106169
106170 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
106171- seq_printf(m, "Open count: %d\n", self->port.count);
106172+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
106173 seq_printf(m, "Max data size: %d\n", self->max_data_size);
106174 seq_printf(m, "Max header size: %d\n", self->max_header_size);
106175
106176diff --git a/net/irda/irproc.c b/net/irda/irproc.c
106177index b9ac598..f88cc56 100644
106178--- a/net/irda/irproc.c
106179+++ b/net/irda/irproc.c
106180@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
106181 {
106182 int i;
106183
106184- proc_irda = proc_mkdir("irda", init_net.proc_net);
106185+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
106186 if (proc_irda == NULL)
106187 return;
106188
106189diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
106190index 53d9311..cbaf99f 100644
106191--- a/net/iucv/af_iucv.c
106192+++ b/net/iucv/af_iucv.c
106193@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
106194 {
106195 char name[12];
106196
106197- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
106198+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
106199 while (__iucv_get_sock_by_name(name)) {
106200 sprintf(name, "%08x",
106201- atomic_inc_return(&iucv_sk_list.autobind_name));
106202+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
106203 }
106204 memcpy(iucv->src_name, name, 8);
106205 }
106206diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
106207index 2a6a1fd..6c112b0 100644
106208--- a/net/iucv/iucv.c
106209+++ b/net/iucv/iucv.c
106210@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
106211 return NOTIFY_OK;
106212 }
106213
106214-static struct notifier_block __refdata iucv_cpu_notifier = {
106215+static struct notifier_block iucv_cpu_notifier = {
106216 .notifier_call = iucv_cpu_notify,
106217 };
106218
106219diff --git a/net/key/af_key.c b/net/key/af_key.c
106220index f8ac939..1e189bf 100644
106221--- a/net/key/af_key.c
106222+++ b/net/key/af_key.c
106223@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
106224 static u32 get_acqseq(void)
106225 {
106226 u32 res;
106227- static atomic_t acqseq;
106228+ static atomic_unchecked_t acqseq;
106229
106230 do {
106231- res = atomic_inc_return(&acqseq);
106232+ res = atomic_inc_return_unchecked(&acqseq);
106233 } while (!res);
106234 return res;
106235 }
106236diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
106237index 781b3a2..73a7434 100644
106238--- a/net/l2tp/l2tp_eth.c
106239+++ b/net/l2tp/l2tp_eth.c
106240@@ -42,12 +42,12 @@ struct l2tp_eth {
106241 struct sock *tunnel_sock;
106242 struct l2tp_session *session;
106243 struct list_head list;
106244- atomic_long_t tx_bytes;
106245- atomic_long_t tx_packets;
106246- atomic_long_t tx_dropped;
106247- atomic_long_t rx_bytes;
106248- atomic_long_t rx_packets;
106249- atomic_long_t rx_errors;
106250+ atomic_long_unchecked_t tx_bytes;
106251+ atomic_long_unchecked_t tx_packets;
106252+ atomic_long_unchecked_t tx_dropped;
106253+ atomic_long_unchecked_t rx_bytes;
106254+ atomic_long_unchecked_t rx_packets;
106255+ atomic_long_unchecked_t rx_errors;
106256 };
106257
106258 /* via l2tp_session_priv() */
106259@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
106260 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
106261
106262 if (likely(ret == NET_XMIT_SUCCESS)) {
106263- atomic_long_add(len, &priv->tx_bytes);
106264- atomic_long_inc(&priv->tx_packets);
106265+ atomic_long_add_unchecked(len, &priv->tx_bytes);
106266+ atomic_long_inc_unchecked(&priv->tx_packets);
106267 } else {
106268- atomic_long_inc(&priv->tx_dropped);
106269+ atomic_long_inc_unchecked(&priv->tx_dropped);
106270 }
106271 return NETDEV_TX_OK;
106272 }
106273@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
106274 {
106275 struct l2tp_eth *priv = netdev_priv(dev);
106276
106277- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
106278- stats->tx_packets = atomic_long_read(&priv->tx_packets);
106279- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
106280- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
106281- stats->rx_packets = atomic_long_read(&priv->rx_packets);
106282- stats->rx_errors = atomic_long_read(&priv->rx_errors);
106283+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
106284+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
106285+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
106286+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
106287+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
106288+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
106289 return stats;
106290 }
106291
106292@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
106293 nf_reset(skb);
106294
106295 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
106296- atomic_long_inc(&priv->rx_packets);
106297- atomic_long_add(data_len, &priv->rx_bytes);
106298+ atomic_long_inc_unchecked(&priv->rx_packets);
106299+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
106300 } else {
106301- atomic_long_inc(&priv->rx_errors);
106302+ atomic_long_inc_unchecked(&priv->rx_errors);
106303 }
106304 return;
106305
106306 error:
106307- atomic_long_inc(&priv->rx_errors);
106308+ atomic_long_inc_unchecked(&priv->rx_errors);
106309 kfree_skb(skb);
106310 }
106311
106312diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
106313index 05dfc8aa..df6cfd7 100644
106314--- a/net/l2tp/l2tp_ip.c
106315+++ b/net/l2tp/l2tp_ip.c
106316@@ -608,7 +608,7 @@ static struct inet_protosw l2tp_ip_protosw = {
106317 .ops = &l2tp_ip_ops,
106318 };
106319
106320-static struct net_protocol l2tp_ip_protocol __read_mostly = {
106321+static const struct net_protocol l2tp_ip_protocol = {
106322 .handler = l2tp_ip_recv,
106323 .netns_ok = 1,
106324 };
106325diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
106326index 8611f1b..bc60a2d 100644
106327--- a/net/l2tp/l2tp_ip6.c
106328+++ b/net/l2tp/l2tp_ip6.c
106329@@ -757,7 +757,7 @@ static struct inet_protosw l2tp_ip6_protosw = {
106330 .ops = &l2tp_ip6_ops,
106331 };
106332
106333-static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
106334+static const struct inet6_protocol l2tp_ip6_protocol = {
106335 .handler = l2tp_ip6_recv,
106336 };
106337
106338diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
106339index 1a3c7e0..80f8b0c 100644
106340--- a/net/llc/llc_proc.c
106341+++ b/net/llc/llc_proc.c
106342@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
106343 int rc = -ENOMEM;
106344 struct proc_dir_entry *p;
106345
106346- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
106347+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
106348 if (!llc_proc_dir)
106349 goto out;
106350
106351diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
106352index dd4ff36..3462997 100644
106353--- a/net/mac80211/cfg.c
106354+++ b/net/mac80211/cfg.c
106355@@ -581,7 +581,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
106356 ret = ieee80211_vif_use_channel(sdata, chandef,
106357 IEEE80211_CHANCTX_EXCLUSIVE);
106358 }
106359- } else if (local->open_count == local->monitors) {
106360+ } else if (local_read(&local->open_count) == local->monitors) {
106361 local->_oper_chandef = *chandef;
106362 ieee80211_hw_config(local, 0);
106363 }
106364@@ -3468,7 +3468,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
106365 else
106366 local->probe_req_reg--;
106367
106368- if (!local->open_count)
106369+ if (!local_read(&local->open_count))
106370 break;
106371
106372 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
106373@@ -3603,8 +3603,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
106374 if (chanctx_conf) {
106375 *chandef = sdata->vif.bss_conf.chandef;
106376 ret = 0;
106377- } else if (local->open_count > 0 &&
106378- local->open_count == local->monitors &&
106379+ } else if (local_read(&local->open_count) > 0 &&
106380+ local_read(&local->open_count) == local->monitors &&
106381 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
106382 if (local->use_chanctx)
106383 *chandef = local->monitor_chandef;
106384diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
106385index 8d53d65..a4ac794 100644
106386--- a/net/mac80211/ieee80211_i.h
106387+++ b/net/mac80211/ieee80211_i.h
106388@@ -29,6 +29,7 @@
106389 #include <net/ieee80211_radiotap.h>
106390 #include <net/cfg80211.h>
106391 #include <net/mac80211.h>
106392+#include <asm/local.h>
106393 #include "key.h"
106394 #include "sta_info.h"
106395 #include "debug.h"
106396@@ -1126,7 +1127,7 @@ struct ieee80211_local {
106397 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
106398 spinlock_t queue_stop_reason_lock;
106399
106400- int open_count;
106401+ local_t open_count;
106402 int monitors, cooked_mntrs;
106403 /* number of interfaces with corresponding FIF_ flags */
106404 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
106405diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
106406index 81a2751..c06a026 100644
106407--- a/net/mac80211/iface.c
106408+++ b/net/mac80211/iface.c
106409@@ -544,7 +544,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
106410 break;
106411 }
106412
106413- if (local->open_count == 0) {
106414+ if (local_read(&local->open_count) == 0) {
106415 res = drv_start(local);
106416 if (res)
106417 goto err_del_bss;
106418@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
106419 res = drv_add_interface(local, sdata);
106420 if (res)
106421 goto err_stop;
106422- } else if (local->monitors == 0 && local->open_count == 0) {
106423+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
106424 res = ieee80211_add_virtual_monitor(local);
106425 if (res)
106426 goto err_stop;
106427@@ -701,7 +701,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
106428 atomic_inc(&local->iff_promiscs);
106429
106430 if (coming_up)
106431- local->open_count++;
106432+ local_inc(&local->open_count);
106433
106434 if (hw_reconf_flags)
106435 ieee80211_hw_config(local, hw_reconf_flags);
106436@@ -739,7 +739,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
106437 err_del_interface:
106438 drv_remove_interface(local, sdata);
106439 err_stop:
106440- if (!local->open_count)
106441+ if (!local_read(&local->open_count))
106442 drv_stop(local);
106443 err_del_bss:
106444 sdata->bss = NULL;
106445@@ -907,7 +907,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
106446 }
106447
106448 if (going_down)
106449- local->open_count--;
106450+ local_dec(&local->open_count);
106451
106452 switch (sdata->vif.type) {
106453 case NL80211_IFTYPE_AP_VLAN:
106454@@ -969,7 +969,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
106455 }
106456 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
106457
106458- if (local->open_count == 0)
106459+ if (local_read(&local->open_count) == 0)
106460 ieee80211_clear_tx_pending(local);
106461
106462 /*
106463@@ -1012,7 +1012,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
106464 if (cancel_scan)
106465 flush_delayed_work(&local->scan_work);
106466
106467- if (local->open_count == 0) {
106468+ if (local_read(&local->open_count) == 0) {
106469 ieee80211_stop_device(local);
106470
106471 /* no reconfiguring after stop! */
106472@@ -1023,7 +1023,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
106473 ieee80211_configure_filter(local);
106474 ieee80211_hw_config(local, hw_reconf_flags);
106475
106476- if (local->monitors == local->open_count)
106477+ if (local->monitors == local_read(&local->open_count))
106478 ieee80211_add_virtual_monitor(local);
106479 }
106480
106481diff --git a/net/mac80211/main.c b/net/mac80211/main.c
106482index 5e09d35..e2fdbe2 100644
106483--- a/net/mac80211/main.c
106484+++ b/net/mac80211/main.c
106485@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
106486 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
106487 IEEE80211_CONF_CHANGE_POWER);
106488
106489- if (changed && local->open_count) {
106490+ if (changed && local_read(&local->open_count)) {
106491 ret = drv_config(local, changed);
106492 /*
106493 * Goal:
106494diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
106495index ca405b6..6cc8bee 100644
106496--- a/net/mac80211/pm.c
106497+++ b/net/mac80211/pm.c
106498@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
106499 struct ieee80211_sub_if_data *sdata;
106500 struct sta_info *sta;
106501
106502- if (!local->open_count)
106503+ if (!local_read(&local->open_count))
106504 goto suspend;
106505
106506 ieee80211_scan_cancel(local);
106507@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
106508 cancel_work_sync(&local->dynamic_ps_enable_work);
106509 del_timer_sync(&local->dynamic_ps_timer);
106510
106511- local->wowlan = wowlan && local->open_count;
106512+ local->wowlan = wowlan && local_read(&local->open_count);
106513 if (local->wowlan) {
106514 int err = drv_suspend(local, wowlan);
106515 if (err < 0) {
106516@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
106517 WARN_ON(!list_empty(&local->chanctx_list));
106518
106519 /* stop hardware - this must stop RX */
106520- if (local->open_count)
106521+ if (local_read(&local->open_count))
106522 ieee80211_stop_device(local);
106523
106524 suspend:
106525diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
106526index d53355b..21f583a 100644
106527--- a/net/mac80211/rate.c
106528+++ b/net/mac80211/rate.c
106529@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
106530
106531 ASSERT_RTNL();
106532
106533- if (local->open_count)
106534+ if (local_read(&local->open_count))
106535 return -EBUSY;
106536
106537 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
106538diff --git a/net/mac80211/util.c b/net/mac80211/util.c
106539index 747bdcf..eb2b981 100644
106540--- a/net/mac80211/util.c
106541+++ b/net/mac80211/util.c
106542@@ -1741,7 +1741,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
106543 bool sched_scan_stopped = false;
106544
106545 /* nothing to do if HW shouldn't run */
106546- if (!local->open_count)
106547+ if (!local_read(&local->open_count))
106548 goto wake_up;
106549
106550 #ifdef CONFIG_PM
106551@@ -1993,7 +1993,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
106552 local->in_reconfig = false;
106553 barrier();
106554
106555- if (local->monitors == local->open_count && local->monitors > 0)
106556+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
106557 ieee80211_add_virtual_monitor(local);
106558
106559 /*
106560@@ -2048,7 +2048,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
106561 * If this is for hw restart things are still running.
106562 * We may want to change that later, however.
106563 */
106564- if (local->open_count && (!local->suspended || reconfig_due_to_wowlan))
106565+ if (local_read(&local->open_count) && (!local->suspended || reconfig_due_to_wowlan))
106566 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
106567
106568 if (!local->suspended)
106569@@ -2072,7 +2072,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
106570 flush_delayed_work(&local->scan_work);
106571 }
106572
106573- if (local->open_count && !reconfig_due_to_wowlan)
106574+ if (local_read(&local->open_count) && !reconfig_due_to_wowlan)
106575 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
106576
106577 list_for_each_entry(sdata, &local->interfaces, list) {
106578diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
106579index b02660f..c0f791c 100644
106580--- a/net/netfilter/Kconfig
106581+++ b/net/netfilter/Kconfig
106582@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
106583
106584 To compile it as a module, choose M here. If unsure, say N.
106585
106586+config NETFILTER_XT_MATCH_GRADM
106587+ tristate '"gradm" match support'
106588+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
106589+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
106590+ ---help---
106591+ The gradm match allows to match on grsecurity RBAC being enabled.
106592+ It is useful when iptables rules are applied early on bootup to
106593+ prevent connections to the machine (except from a trusted host)
106594+ while the RBAC system is disabled.
106595+
106596 config NETFILTER_XT_MATCH_HASHLIMIT
106597 tristate '"hashlimit" match support'
106598 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
106599diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
106600index 89f73a9..e4e5bd9 100644
106601--- a/net/netfilter/Makefile
106602+++ b/net/netfilter/Makefile
106603@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
106604 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
106605 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
106606 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
106607+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
106608 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
106609 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
106610 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
106611diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
106612index d259da3..6a32b2c 100644
106613--- a/net/netfilter/ipset/ip_set_core.c
106614+++ b/net/netfilter/ipset/ip_set_core.c
106615@@ -1952,7 +1952,7 @@ done:
106616 return ret;
106617 }
106618
106619-static struct nf_sockopt_ops so_set __read_mostly = {
106620+static struct nf_sockopt_ops so_set = {
106621 .pf = PF_INET,
106622 .get_optmin = SO_IP_SET,
106623 .get_optmax = SO_IP_SET + 1,
106624diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
106625index b0f7b62..0541842 100644
106626--- a/net/netfilter/ipvs/ip_vs_conn.c
106627+++ b/net/netfilter/ipvs/ip_vs_conn.c
106628@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
106629 /* Increase the refcnt counter of the dest */
106630 ip_vs_dest_hold(dest);
106631
106632- conn_flags = atomic_read(&dest->conn_flags);
106633+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
106634 if (cp->protocol != IPPROTO_UDP)
106635 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
106636 flags = cp->flags;
106637@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
106638
106639 cp->control = NULL;
106640 atomic_set(&cp->n_control, 0);
106641- atomic_set(&cp->in_pkts, 0);
106642+ atomic_set_unchecked(&cp->in_pkts, 0);
106643
106644 cp->packet_xmit = NULL;
106645 cp->app = NULL;
106646@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
106647
106648 /* Don't drop the entry if its number of incoming packets is not
106649 located in [0, 8] */
106650- i = atomic_read(&cp->in_pkts);
106651+ i = atomic_read_unchecked(&cp->in_pkts);
106652 if (i > 8 || i < 0) return 0;
106653
106654 if (!todrop_rate[i]) return 0;
106655diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
106656index b87ca32..76c7799 100644
106657--- a/net/netfilter/ipvs/ip_vs_core.c
106658+++ b/net/netfilter/ipvs/ip_vs_core.c
106659@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
106660 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
106661 /* do not touch skb anymore */
106662
106663- atomic_inc(&cp->in_pkts);
106664+ atomic_inc_unchecked(&cp->in_pkts);
106665 ip_vs_conn_put(cp);
106666 return ret;
106667 }
106668@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
106669 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
106670 pkts = sysctl_sync_threshold(ipvs);
106671 else
106672- pkts = atomic_add_return(1, &cp->in_pkts);
106673+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
106674
106675 if (ipvs->sync_state & IP_VS_STATE_MASTER)
106676 ip_vs_sync_conn(net, cp, pkts);
106677diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
106678index ed99448..3ba6cad 100644
106679--- a/net/netfilter/ipvs/ip_vs_ctl.c
106680+++ b/net/netfilter/ipvs/ip_vs_ctl.c
106681@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
106682 */
106683 ip_vs_rs_hash(ipvs, dest);
106684 }
106685- atomic_set(&dest->conn_flags, conn_flags);
106686+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
106687
106688 /* bind the service */
106689 old_svc = rcu_dereference_protected(dest->svc, 1);
106690@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
106691 * align with netns init in ip_vs_control_net_init()
106692 */
106693
106694-static struct ctl_table vs_vars[] = {
106695+static ctl_table_no_const vs_vars[] __read_only = {
106696 {
106697 .procname = "amemthresh",
106698 .maxlen = sizeof(int),
106699@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
106700 " %-7s %-6d %-10d %-10d\n",
106701 &dest->addr.in6,
106702 ntohs(dest->port),
106703- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
106704+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
106705 atomic_read(&dest->weight),
106706 atomic_read(&dest->activeconns),
106707 atomic_read(&dest->inactconns));
106708@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
106709 "%-7s %-6d %-10d %-10d\n",
106710 ntohl(dest->addr.ip),
106711 ntohs(dest->port),
106712- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
106713+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
106714 atomic_read(&dest->weight),
106715 atomic_read(&dest->activeconns),
106716 atomic_read(&dest->inactconns));
106717@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
106718
106719 entry.addr = dest->addr.ip;
106720 entry.port = dest->port;
106721- entry.conn_flags = atomic_read(&dest->conn_flags);
106722+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
106723 entry.weight = atomic_read(&dest->weight);
106724 entry.u_threshold = dest->u_threshold;
106725 entry.l_threshold = dest->l_threshold;
106726@@ -3040,7 +3040,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
106727 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
106728 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
106729 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
106730- (atomic_read(&dest->conn_flags) &
106731+ (atomic_read_unchecked(&dest->conn_flags) &
106732 IP_VS_CONN_F_FWD_MASK)) ||
106733 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
106734 atomic_read(&dest->weight)) ||
106735@@ -3675,7 +3675,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
106736 {
106737 int idx;
106738 struct netns_ipvs *ipvs = net_ipvs(net);
106739- struct ctl_table *tbl;
106740+ ctl_table_no_const *tbl;
106741
106742 atomic_set(&ipvs->dropentry, 0);
106743 spin_lock_init(&ipvs->dropentry_lock);
106744diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
106745index 127f140..553d652 100644
106746--- a/net/netfilter/ipvs/ip_vs_lblc.c
106747+++ b/net/netfilter/ipvs/ip_vs_lblc.c
106748@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
106749 * IPVS LBLC sysctl table
106750 */
106751 #ifdef CONFIG_SYSCTL
106752-static struct ctl_table vs_vars_table[] = {
106753+static ctl_table_no_const vs_vars_table[] __read_only = {
106754 {
106755 .procname = "lblc_expiration",
106756 .data = NULL,
106757diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
106758index 2229d2d..b32b785 100644
106759--- a/net/netfilter/ipvs/ip_vs_lblcr.c
106760+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
106761@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
106762 * IPVS LBLCR sysctl table
106763 */
106764
106765-static struct ctl_table vs_vars_table[] = {
106766+static ctl_table_no_const vs_vars_table[] __read_only = {
106767 {
106768 .procname = "lblcr_expiration",
106769 .data = NULL,
106770diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
106771index d93ceeb..4556144 100644
106772--- a/net/netfilter/ipvs/ip_vs_sync.c
106773+++ b/net/netfilter/ipvs/ip_vs_sync.c
106774@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
106775 cp = cp->control;
106776 if (cp) {
106777 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
106778- pkts = atomic_add_return(1, &cp->in_pkts);
106779+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
106780 else
106781 pkts = sysctl_sync_threshold(ipvs);
106782 ip_vs_sync_conn(net, cp->control, pkts);
106783@@ -771,7 +771,7 @@ control:
106784 if (!cp)
106785 return;
106786 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
106787- pkts = atomic_add_return(1, &cp->in_pkts);
106788+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
106789 else
106790 pkts = sysctl_sync_threshold(ipvs);
106791 goto sloop;
106792@@ -902,7 +902,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
106793
106794 if (opt)
106795 memcpy(&cp->in_seq, opt, sizeof(*opt));
106796- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
106797+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
106798 cp->state = state;
106799 cp->old_state = cp->state;
106800 /*
106801diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
106802index 3aedbda..6a63567 100644
106803--- a/net/netfilter/ipvs/ip_vs_xmit.c
106804+++ b/net/netfilter/ipvs/ip_vs_xmit.c
106805@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
106806 else
106807 rc = NF_ACCEPT;
106808 /* do not touch skb anymore */
106809- atomic_inc(&cp->in_pkts);
106810+ atomic_inc_unchecked(&cp->in_pkts);
106811 goto out;
106812 }
106813
106814@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
106815 else
106816 rc = NF_ACCEPT;
106817 /* do not touch skb anymore */
106818- atomic_inc(&cp->in_pkts);
106819+ atomic_inc_unchecked(&cp->in_pkts);
106820 goto out;
106821 }
106822
106823diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
106824index a4b5e2a..13b1de3 100644
106825--- a/net/netfilter/nf_conntrack_acct.c
106826+++ b/net/netfilter/nf_conntrack_acct.c
106827@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
106828 #ifdef CONFIG_SYSCTL
106829 static int nf_conntrack_acct_init_sysctl(struct net *net)
106830 {
106831- struct ctl_table *table;
106832+ ctl_table_no_const *table;
106833
106834 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
106835 GFP_KERNEL);
106836diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
106837index 13fad86..18c984c 100644
106838--- a/net/netfilter/nf_conntrack_core.c
106839+++ b/net/netfilter/nf_conntrack_core.c
106840@@ -1733,6 +1733,10 @@ void nf_conntrack_init_end(void)
106841 #define DYING_NULLS_VAL ((1<<30)+1)
106842 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
106843
106844+#ifdef CONFIG_GRKERNSEC_HIDESYM
106845+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
106846+#endif
106847+
106848 int nf_conntrack_init_net(struct net *net)
106849 {
106850 int ret = -ENOMEM;
106851@@ -1758,7 +1762,11 @@ int nf_conntrack_init_net(struct net *net)
106852 if (!net->ct.stat)
106853 goto err_pcpu_lists;
106854
106855+#ifdef CONFIG_GRKERNSEC_HIDESYM
106856+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
106857+#else
106858 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
106859+#endif
106860 if (!net->ct.slabname)
106861 goto err_slabname;
106862
106863diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
106864index 4e78c57..ec8fb74 100644
106865--- a/net/netfilter/nf_conntrack_ecache.c
106866+++ b/net/netfilter/nf_conntrack_ecache.c
106867@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
106868 #ifdef CONFIG_SYSCTL
106869 static int nf_conntrack_event_init_sysctl(struct net *net)
106870 {
106871- struct ctl_table *table;
106872+ ctl_table_no_const *table;
106873
106874 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
106875 GFP_KERNEL);
106876diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
106877index bd9d315..989947e 100644
106878--- a/net/netfilter/nf_conntrack_helper.c
106879+++ b/net/netfilter/nf_conntrack_helper.c
106880@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
106881
106882 static int nf_conntrack_helper_init_sysctl(struct net *net)
106883 {
106884- struct ctl_table *table;
106885+ ctl_table_no_const *table;
106886
106887 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
106888 GFP_KERNEL);
106889diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
106890index b65d586..beec902 100644
106891--- a/net/netfilter/nf_conntrack_proto.c
106892+++ b/net/netfilter/nf_conntrack_proto.c
106893@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
106894
106895 static void
106896 nf_ct_unregister_sysctl(struct ctl_table_header **header,
106897- struct ctl_table **table,
106898+ ctl_table_no_const **table,
106899 unsigned int users)
106900 {
106901 if (users > 0)
106902diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
106903index fc823fa..8311af3 100644
106904--- a/net/netfilter/nf_conntrack_standalone.c
106905+++ b/net/netfilter/nf_conntrack_standalone.c
106906@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
106907
106908 static int nf_conntrack_standalone_init_sysctl(struct net *net)
106909 {
106910- struct ctl_table *table;
106911+ ctl_table_no_const *table;
106912
106913 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
106914 GFP_KERNEL);
106915diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
106916index 7a394df..bd91a8a 100644
106917--- a/net/netfilter/nf_conntrack_timestamp.c
106918+++ b/net/netfilter/nf_conntrack_timestamp.c
106919@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
106920 #ifdef CONFIG_SYSCTL
106921 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
106922 {
106923- struct ctl_table *table;
106924+ ctl_table_no_const *table;
106925
106926 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
106927 GFP_KERNEL);
106928diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
106929index 675d12c..b36e825 100644
106930--- a/net/netfilter/nf_log.c
106931+++ b/net/netfilter/nf_log.c
106932@@ -386,7 +386,7 @@ static const struct file_operations nflog_file_ops = {
106933
106934 #ifdef CONFIG_SYSCTL
106935 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
106936-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
106937+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
106938
106939 static int nf_log_proc_dostring(struct ctl_table *table, int write,
106940 void __user *buffer, size_t *lenp, loff_t *ppos)
106941@@ -417,13 +417,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
106942 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
106943 mutex_unlock(&nf_log_mutex);
106944 } else {
106945+ ctl_table_no_const nf_log_table = *table;
106946+
106947 mutex_lock(&nf_log_mutex);
106948 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
106949 if (!logger)
106950- table->data = "NONE";
106951+ nf_log_table.data = "NONE";
106952 else
106953- table->data = logger->name;
106954- r = proc_dostring(table, write, buffer, lenp, ppos);
106955+ nf_log_table.data = logger->name;
106956+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
106957 mutex_unlock(&nf_log_mutex);
106958 }
106959
106960diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
106961index c68c1e5..8b5d670 100644
106962--- a/net/netfilter/nf_sockopt.c
106963+++ b/net/netfilter/nf_sockopt.c
106964@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
106965 }
106966 }
106967
106968- list_add(&reg->list, &nf_sockopts);
106969+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
106970 out:
106971 mutex_unlock(&nf_sockopt_mutex);
106972 return ret;
106973@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
106974 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
106975 {
106976 mutex_lock(&nf_sockopt_mutex);
106977- list_del(&reg->list);
106978+ pax_list_del((struct list_head *)&reg->list);
106979 mutex_unlock(&nf_sockopt_mutex);
106980 }
106981 EXPORT_SYMBOL(nf_unregister_sockopt);
106982diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
106983index 11d85b3..7fcc420 100644
106984--- a/net/netfilter/nfnetlink_log.c
106985+++ b/net/netfilter/nfnetlink_log.c
106986@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
106987 struct nfnl_log_net {
106988 spinlock_t instances_lock;
106989 struct hlist_head instance_table[INSTANCE_BUCKETS];
106990- atomic_t global_seq;
106991+ atomic_unchecked_t global_seq;
106992 };
106993
106994 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
106995@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
106996 /* global sequence number */
106997 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
106998 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
106999- htonl(atomic_inc_return(&log->global_seq))))
107000+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
107001 goto nla_put_failure;
107002
107003 if (data_len) {
107004diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
107005index 65f3e2b..2e9d6a0 100644
107006--- a/net/netfilter/nft_compat.c
107007+++ b/net/netfilter/nft_compat.c
107008@@ -317,14 +317,7 @@ static void nft_match_eval(const struct nft_expr *expr,
107009 return;
107010 }
107011
107012- switch(ret) {
107013- case true:
107014- data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
107015- break;
107016- case false:
107017- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
107018- break;
107019- }
107020+ data[NFT_REG_VERDICT].verdict = ret ? NFT_CONTINUE : NFT_BREAK;
107021 }
107022
107023 static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
107024diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
107025new file mode 100644
107026index 0000000..c566332
107027--- /dev/null
107028+++ b/net/netfilter/xt_gradm.c
107029@@ -0,0 +1,51 @@
107030+/*
107031+ * gradm match for netfilter
107032